code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
import os import tempfile import unittest import logging from pyidf import ValidationLevel import pyidf from pyidf.idf import IDF from pyidf.node import PipeUnderground log = logging.getLogger(__name__) class TestPipeUnderground(unittest.TestCase): def setUp(self): self.fd, self.path = tempfile.mkstemp() def tearDown(self): os.remove(self.path) def test_create_pipeunderground(self): pyidf.validation_level = ValidationLevel.error obj = PipeUnderground() # alpha var_name = "Name" obj.name = var_name # object-list var_construction_name = "object-list|Construction Name" obj.construction_name = var_construction_name # node var_fluid_inlet_node_name = "node|Fluid Inlet Node Name" obj.fluid_inlet_node_name = var_fluid_inlet_node_name # node var_fluid_outlet_node_name = "node|Fluid Outlet Node Name" obj.fluid_outlet_node_name = var_fluid_outlet_node_name # alpha var_sun_exposure = "SunExposed" obj.sun_exposure = var_sun_exposure # real var_pipe_inside_diameter = 0.0001 obj.pipe_inside_diameter = var_pipe_inside_diameter # real var_pipe_length = 0.0001 obj.pipe_length = var_pipe_length # alpha var_soil_material_name = "Soil Material Name" obj.soil_material_name = var_soil_material_name # alpha var_undisturbed_ground_temperature_model_type = "Site:GroundTemperature:Undisturbed:FiniteDifference" obj.undisturbed_ground_temperature_model_type = var_undisturbed_ground_temperature_model_type # object-list var_undisturbed_ground_temperature_model_name = "object-list|Undisturbed Ground Temperature Model Name" obj.undisturbed_ground_temperature_model_name = var_undisturbed_ground_temperature_model_name idf = IDF() idf.add(obj) idf.save(self.path, check=False) with open(self.path, mode='r') as f: for line in f: log.debug(line.strip()) idf2 = IDF(self.path) self.assertEqual(idf2.pipeundergrounds[0].name, var_name) self.assertEqual(idf2.pipeundergrounds[0].construction_name, var_construction_name) self.assertEqual(idf2.pipeundergrounds[0].fluid_inlet_node_name, var_fluid_inlet_node_name) self.assertEqual(idf2.pipeundergrounds[0].fluid_outlet_node_name, var_fluid_outlet_node_name) self.assertEqual(idf2.pipeundergrounds[0].sun_exposure, var_sun_exposure) self.assertAlmostEqual(idf2.pipeundergrounds[0].pipe_inside_diameter, var_pipe_inside_diameter) self.assertAlmostEqual(idf2.pipeundergrounds[0].pipe_length, var_pipe_length) self.assertEqual(idf2.pipeundergrounds[0].soil_material_name, var_soil_material_name) self.assertEqual(idf2.pipeundergrounds[0].undisturbed_ground_temperature_model_type, var_undisturbed_ground_temperature_model_type) self.assertEqual(idf2.pipeundergrounds[0].undisturbed_ground_temperature_model_name, var_undisturbed_ground_temperature_model_name)
rbuffat/pyidf
tests/test_pipeunderground.py
Python
apache-2.0
3,135
#!/usr/bin/env python import wx import logging import webbrowser import ConfigParser import argparse import wx.lib.delayedresult as dr import wx.lib.agw.hyperlink as hl from os import path, makedirs from distutils.version import LooseVersion from github3 import GitHub from GUI import UploaderAppFrame, SettingsDialog from appdirs import user_config_dir, user_log_dir from wx.lib.pubsub import pub path_to_module = path.dirname(__file__) app_config = path.join(path_to_module, 'irida-uploader.cfg') if not path.isfile(app_config): app_config = path.join(path_to_module, '..', 'irida-uploader.cfg') if not path.exists(user_log_dir("iridaUploader")): makedirs(user_log_dir("iridaUploader")) log_format = '%(asctime)s %(levelname)s\t%(filename)s:%(funcName)s:%(lineno)d - %(message)s' # if any logging gets called before `basicConfig`, our attempts to configure the # logging here will be clobbered. This removes any existing handlers that might # have been set up when some other log message was printed, so that we can # actually configure the logging the way we want. logging.getLogger().handlers = [] logging.basicConfig(level=logging.DEBUG, filename=path.join(user_log_dir("iridaUploader"), 'irida-uploader.log'), format=log_format, filemode='w') console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(console) class Uploader(wx.App): def __init__(self, show_new_ui=False, redirect=False, filename=None): wx.App.__init__(self, redirect, filename) self.get_app_info() self.check_for_update() user_config_file = path.join(user_config_dir("iridaUploader"), "config.conf") if not path.exists(user_config_file): dialog = SettingsDialog(first_run=True) dialog.ShowModal() self._show_main_app() def _show_main_app(self): frame = UploaderAppFrame(app_name=self.__app_name__, app_version=self.__app_version__, app_url=self.url) frame.Show() def get_app_info(self): config_parser = ConfigParser.ConfigParser() config_parser.read(app_config) self.__app_version__ = config_parser.get('Application', 'version', None) self.__app_name__ = config_parser.get('Application', 'name', None) @property def url(self): return "https://github.com/phac-nml/irida-miseq-uploader" def check_for_update(self): def find_update(): logging.debug("Checking remote for new updates.") try: gh = GitHub() repo = gh.repository("phac-nml", "irida-miseq-uploader") # get the latest tag from github return next(repo.iter_tags(number=1)) except: logging.warn("Couldn't reach github to check for new version.") raise def handle_update(result): latest_tag = result.get() logging.debug("Found latest version: [{}]".format(latest_tag)) release_url = self.url + "/releases/latest" if LooseVersion(self.__app_version__) < LooseVersion(latest_tag.name): logging.info("Newer version found.") dialog = NewVersionMessageDialog( parent=None, id=wx.ID_ANY, message=("A new version of the IRIDA MiSeq " "Uploader tool is available. You can" " download the latest version from "), title="IRIDA MiSeq Uploader update available", download_url=release_url, style=wx.CAPTION|wx.CLOSE_BOX|wx.STAY_ON_TOP) dialog.ShowModal() dialog.Destroy() else: logging.debug("No new versions found.") dr.startWorker(handle_update, find_update) class NewVersionMessageDialog(wx.Dialog): def __init__(self, parent, id, title, message, download_url, size=wx.DefaultSize, pos=wx.DefaultPosition, style=wx.DEFAULT_DIALOG_STYLE, name='dialog'): wx.Dialog.__init__(self, parent, id, title, pos, size, style, name) label = wx.StaticText(self, label=message) button = wx.Button(self, id=wx.ID_OK, label="Close") button.SetDefault() line = wx.StaticLine(self, wx.ID_ANY, size=(20, -1), style=wx.LI_HORIZONTAL) download_ctrl = hl.HyperLinkCtrl(self, wx.ID_ANY, download_url, URL=download_url) sizer = wx.BoxSizer(wx.VERTICAL) button_sizer = wx.StdDialogButtonSizer() button_sizer.AddButton(button) button_sizer.Realize() sizer.Add(label, 0, wx.ALIGN_CENTER|wx.ALL, 5) sizer.Add(download_ctrl, 0, wx.ALL, 10) sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5) sizer.Add(button_sizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) self.SetSizer(sizer) sizer.Fit(self) def main(): app = Uploader() app.MainLoop() if __name__ == "__main__": main()
phac-nml/irida-miseq-uploader
run_IRIDA_Uploader.py
Python
apache-2.0
5,119
# Copyright 2012 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A test module in a sub-package.""" __author__ = '[email protected] (Robert Schuppenies)' import unittest class FooTest(unittest.TestCase): def test_pass(self): pass def test_fail(self): self.assertTrue(False)
zenlambda/aeta
testdata/test_modules/sample_package/subpackage/test_ham.py
Python
apache-2.0
825
######## # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from dsl_parser import (constants, models) from dsl_parser.elements import (imports, misc, plugins, node_types, node_templates, relationships, workflows, policies, data_types, version as _version) from dsl_parser.framework.elements import Element from dsl_parser.framework.requirements import Value class BlueprintVersionExtractor(Element): schema = { 'tosca_definitions_version': _version.ToscaDefinitionsVersion, # here so it gets version validated 'dsl_definitions': misc.DSLDefinitions, } requires = { _version.ToscaDefinitionsVersion: ['version', Value('plan_version')] } def parse(self, version, plan_version): return { 'version': version, 'plan_version': plan_version } class BlueprintImporter(Element): schema = { 'imports': imports.ImportsLoader, } requires = { imports.ImportsLoader: ['resource_base'] } def parse(self, resource_base): return { 'merged_blueprint': self.child(imports.ImportsLoader).value, 'resource_base': resource_base } class Blueprint(Element): schema = { 'tosca_definitions_version': _version.ToscaDefinitionsVersion, 'description': misc.Description, 'imports': imports.Imports, 'dsl_definitions': misc.DSLDefinitions, 'metadata': misc.Metadata, 'inputs': misc.Inputs, 'plugins': plugins.Plugins, 'node_types': node_types.NodeTypes, 'relationships': relationships.Relationships, 'node_templates': node_templates.NodeTemplates, 'policy_types': policies.PolicyTypes, 'policy_triggers': policies.PolicyTriggers, 'groups': policies.Groups, 'policies': policies.Policies, 'workflows': workflows.Workflows, 'outputs': misc.Outputs, 'data_types': data_types.DataTypes } requires = { node_templates.NodeTemplates: ['deployment_plugins_to_install'], workflows.Workflows: ['workflow_plugins_to_install'], policies.Policies: ['scaling_groups'] } def parse(self, workflow_plugins_to_install, deployment_plugins_to_install, scaling_groups): return models.Plan({ constants.DESCRIPTION: self.child(misc.Description).value, constants.METADATA: self.child(misc.Metadata).value, constants.NODES: self.child(node_templates.NodeTemplates).value, constants.RELATIONSHIPS: self.child( relationships.Relationships).value, constants.WORKFLOWS: self.child(workflows.Workflows).value, constants.POLICY_TYPES: self.child(policies.PolicyTypes).value, constants.POLICY_TRIGGERS: self.child(policies.PolicyTriggers).value, constants.POLICIES: self.child(policies.Policies).value, constants.GROUPS: self.child(policies.Groups).value, constants.SCALING_GROUPS: scaling_groups or {}, constants.INPUTS: self.child(misc.Inputs).value, constants.OUTPUTS: self.child(misc.Outputs).value, constants.DEPLOYMENT_PLUGINS_TO_INSTALL: deployment_plugins_to_install, constants.WORKFLOW_PLUGINS_TO_INSTALL: workflow_plugins_to_install, constants.VERSION: self.child( _version.ToscaDefinitionsVersion).value })
cloudify-cosmo/cloudify-dsl-parser
dsl_parser/elements/blueprint.py
Python
apache-2.0
4,469
from setuptools import setup setup(name='flightPredict', version='0.3', description='Python Library for SPark MLLIB Flight Predict sample application', url='git+https://github.com/ibm-watson-data-lab/simple-data-pipe-connector-flightstats.git', author='David Taieb', author_email='[email protected]', license='Apache 2.0', packages=['flightPredict'], zip_safe=False)
ibm-cds-labs/simple-data-pipe-connector-flightstats
setup.py
Python
apache-2.0
420
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: release-1.23 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class V1beta1PolicyRulesWithSubjects(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'non_resource_rules': 'list[V1beta1NonResourcePolicyRule]', 'resource_rules': 'list[V1beta1ResourcePolicyRule]', 'subjects': 'list[V1beta1Subject]' } attribute_map = { 'non_resource_rules': 'nonResourceRules', 'resource_rules': 'resourceRules', 'subjects': 'subjects' } def __init__(self, non_resource_rules=None, resource_rules=None, subjects=None, local_vars_configuration=None): # noqa: E501 """V1beta1PolicyRulesWithSubjects - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._non_resource_rules = None self._resource_rules = None self._subjects = None self.discriminator = None if non_resource_rules is not None: self.non_resource_rules = non_resource_rules if resource_rules is not None: self.resource_rules = resource_rules self.subjects = subjects @property def non_resource_rules(self): """Gets the non_resource_rules of this V1beta1PolicyRulesWithSubjects. # noqa: E501 `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501 :return: The non_resource_rules of this V1beta1PolicyRulesWithSubjects. # noqa: E501 :rtype: list[V1beta1NonResourcePolicyRule] """ return self._non_resource_rules @non_resource_rules.setter def non_resource_rules(self, non_resource_rules): """Sets the non_resource_rules of this V1beta1PolicyRulesWithSubjects. `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL. # noqa: E501 :param non_resource_rules: The non_resource_rules of this V1beta1PolicyRulesWithSubjects. # noqa: E501 :type: list[V1beta1NonResourcePolicyRule] """ self._non_resource_rules = non_resource_rules @property def resource_rules(self): """Gets the resource_rules of this V1beta1PolicyRulesWithSubjects. # noqa: E501 `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501 :return: The resource_rules of this V1beta1PolicyRulesWithSubjects. # noqa: E501 :rtype: list[V1beta1ResourcePolicyRule] """ return self._resource_rules @resource_rules.setter def resource_rules(self, resource_rules): """Sets the resource_rules of this V1beta1PolicyRulesWithSubjects. `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty. # noqa: E501 :param resource_rules: The resource_rules of this V1beta1PolicyRulesWithSubjects. # noqa: E501 :type: list[V1beta1ResourcePolicyRule] """ self._resource_rules = resource_rules @property def subjects(self): """Gets the subjects of this V1beta1PolicyRulesWithSubjects. # noqa: E501 subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501 :return: The subjects of this V1beta1PolicyRulesWithSubjects. # noqa: E501 :rtype: list[V1beta1Subject] """ return self._subjects @subjects.setter def subjects(self, subjects): """Sets the subjects of this V1beta1PolicyRulesWithSubjects. subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required. # noqa: E501 :param subjects: The subjects of this V1beta1PolicyRulesWithSubjects. # noqa: E501 :type: list[V1beta1Subject] """ if self.local_vars_configuration.client_side_validation and subjects is None: # noqa: E501 raise ValueError("Invalid value for `subjects`, must not be `None`") # noqa: E501 self._subjects = subjects def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1PolicyRulesWithSubjects): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1PolicyRulesWithSubjects): return True return self.to_dict() != other.to_dict()
kubernetes-client/python
kubernetes/client/models/v1beta1_policy_rules_with_subjects.py
Python
apache-2.0
7,013
class ActionError(RuntimeError): """ Raise when something went wrong when doing an action. """ class NoValidSCVError(ActionError): """ Raise when no valid scv can be selected according to defined rules. """ class NoValidBuildingLocationError(ActionError): """ Raise when no valid location to build a building is found in the current screen. """ class NoUnitError(ActionError): """ Raise when a unit or building is not present on screen whereas a function is asked to find one. """ class NoValidBuildingPointError(ActionError): """ TODO: give a more precise description than the name of the error """
Xaxetrov/OSCAR
oscar/meta_action/meta_action_error.py
Python
apache-2.0
667
#!/usr/bin/env python # encoding: utf-8 """ populate_rango.py Created by Luis C. Berrocal on 2013-10-20. Copyright (c) 2013 __MyCompanyName__. All rights reserved. """ import os import sys def populate(): python_cat = add_cat('Python', 128,64) add_page(cat=python_cat, title="Official Python Tutorial", url="http://docs.python.org/2/tutorial/") add_page(cat=python_cat, title="How to Think like a Computer Scientist", url="http://www.greenteapress.com/thinkpython/") add_page(cat=python_cat, title="Learn Python in 10 Minutes", url="http://www.korokithakis.net/tutorials/python/") django_cat = add_cat("Django",64, 32) add_page(cat=django_cat, title="Official Django Tutorial", url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/") add_page(cat=django_cat, title="Django Rocks", url="http://www.djangorocks.com/") add_page(cat=django_cat, title="How to Tango with Django", url="http://www.tangowithdjango.com/") frame_cat = add_cat("Other Frameworks", 32, 16) add_page(cat=frame_cat, title="Bottle", url="http://bottlepy.org/docs/dev/") add_page(cat=frame_cat, title="Flask", url="http://flask.pocoo.org") # Print out what we have added to the user. for c in Category.objects.all(): for p in Page.objects.filter(category=c): print "- {0} - {1}".format(str(c), str(p)) def add_page(cat, title, url, views=0): p = Page.objects.get_or_create(category=cat, title=title, url=url, views=views)[0] return p def add_cat(name, views, likes): c = Category.objects.get_or_create(name=name, views=views, likes=likes)[0] return c # Start execution here! if __name__ == '__main__': print "Starting Rango population script..." os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings') from rango.models import Category, Page populate()
luiscberrocal/rango_tutorial
populate_rango.py
Python
apache-2.0
2,007
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A class to store named variables and a scope operator to manage sharing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections as collections_lib import copy import enum # pylint: disable=g-bad-import-order import functools import sys import threading import traceback import six from six import iteritems from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python import tf2 from tensorflow.python.eager import context from tensorflow.python.eager import monitoring from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export __all__ = [ "AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable", "get_local_variable", "variable_scope", "variable_op_scope", "no_regularizer", "VariableSynchronization", "VariableAggregation" ] _api_usage_gauge = monitoring.BoolGauge( "/tensorflow/api/resource_variables", "Whether variable_scope.enable_resource_variables() is called.") class _PartitionInfo(object): """Holds partition info used by initializer functions.""" def __init__(self, full_shape, var_offset): """Constructor. Args: full_shape: Tuple or list of `int` indicating the full combined shape of the partitioned variables. var_offset: Tuple or list of `int` specifying offset of this partition with respect to the full variable for each dimension. Raises: TypeError: If `full_shape` or `var_offset` is not a sequence. ValueError: If `full_shape` or `var_offset` differ in length. If `var_offset` exceeds `full_shape` in any dimension. """ if not isinstance(full_shape, collections_lib.Sequence) or isinstance( full_shape, six.string_types): raise TypeError( "`full_shape` must be a sequence (like tuple or list) instead of " + type(full_shape).__name__) if not isinstance(var_offset, collections_lib.Sequence) or isinstance( var_offset, six.string_types): raise TypeError( "`var_offset` must be a sequence (like tuple or list) instead of " + type(var_offset).__name__) if len(var_offset) != len(full_shape): raise ValueError( "Expected equal length, but `var_offset` is of length {} while " "full_shape is of length {}.".format( len(var_offset), len(full_shape))) for i in xrange(len(full_shape)): offset = var_offset[i] shape = full_shape[i] if offset < 0 or offset >= shape: raise ValueError( "Expected 0 <= offset < shape but found offset={}, shape={} for " "var_offset={}, full_shape={}".format(offset, shape, var_offset, full_shape)) self._full_shape = full_shape self._var_offset = var_offset @property def full_shape(self): return self._full_shape @property def var_offset(self): return self._var_offset def single_offset(self, shape): """Returns the offset when the variable is partitioned in at most one dim. Args: shape: Tuple or list of `int` indicating the shape of one specific variable partition. Returns: `int` representing the offset in the dimension along which the variable is partitioned. Returns 0 if the variable is not being partitioned. Raises: ValueError: Depending on self.single_slice_dim(). """ single_slice_dim = self.single_slice_dim(shape) # If this variable is not being partitioned at all, single_slice_dim() could # return None. if single_slice_dim is None: return 0 return self.var_offset[single_slice_dim] def single_slice_dim(self, shape): """Returns the slice dim when the variable is partitioned only in one dim. Args: shape: Tuple or list of `int` indicating the shape of one specific variable partition. Returns: `int` representing the dimension that the variable is partitioned in, or `None` if the variable doesn't seem to be partitioned at all. Raises: TypeError: If `shape` is not a sequence. ValueError: If `shape` is not the same length as `self.full_shape`. If the variable is partitioned in more than one dimension. """ if not isinstance(shape, collections_lib.Sequence) or isinstance( shape, six.string_types): raise TypeError( "`shape` must be a sequence (like tuple or list) instead of " + type(shape).__name__) if len(shape) != len(self.full_shape): raise ValueError( "Expected equal length, but received shape={} of length {} while " "self.full_shape={} is of length {}.".format(shape, len(shape), self.full_shape, len(self.full_shape))) for i in xrange(len(shape)): if self.var_offset[i] + shape[i] > self.full_shape[i]: raise ValueError( "With self.var_offset={}, a partition of shape={} would exceed " "self.full_shape={} in dimension {}.".format( self.var_offset, shape, self.full_shape, i)) slice_dim = None for i in xrange(len(shape)): if shape[i] == self.full_shape[i]: continue if slice_dim is not None: raise ValueError( "Cannot use single_slice_dim() with shape={} and " "self.full_shape={} since slice dim could be either dimension {} " "or {}.".format(shape, self.full_shape, i, slice_dim)) slice_dim = i return slice_dim class _ReuseMode(enum.Enum): """Mode for variable access within a variable scope.""" # Indicates that variables are to be fetched if they already exist or # otherwise created. AUTO_REUSE = 1 # TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of # enum values. # REUSE_FALSE = 2 # REUSE_TRUE = 3 # TODO(apassos) remove these forwarding symbols. VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name AUTO_REUSE = _ReuseMode.AUTO_REUSE tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE") AUTO_REUSE.__doc__ = """ When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that get_variable() should create the requested variable if it doesn't exist or, if it does exist, simply return it. """ _DEFAULT_USE_RESOURCE = tf2.enabled() @tf_export(v1=["enable_resource_variables"]) def enable_resource_variables(): """Creates resource variables by default. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature. """ global _DEFAULT_USE_RESOURCE _DEFAULT_USE_RESOURCE = True _api_usage_gauge.get_cell().set(True) @tf_export(v1=["resource_variables_enabled"]) def resource_variables_enabled(): """Returns `True` if resource variables are enabled. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature. """ global _DEFAULT_USE_RESOURCE return _DEFAULT_USE_RESOURCE @deprecation.deprecated( None, "non-resource variables are not supported in the long term") @tf_export(v1=["disable_resource_variables"]) def disable_resource_variables(): """Opts out of resource variables. If your code needs tf.disable_resource_variables() to be called to work properly please file a bug. """ global _DEFAULT_USE_RESOURCE _DEFAULT_USE_RESOURCE = False _api_usage_gauge.get_cell().set(False) class _VariableStore(object): """Variable store that carries a number of named Variables. New variable names and new variables can be created; all stored variables are initialized with the initializer passed to __init__. Attributes: vars: a dictionary with string names (same as passed in GetVar) as keys and the corresponding TensorFlow Variables as values. """ def __init__(self): """Create a variable store.""" self._vars = {} # A dictionary of the stored TensorFlow variables. self._partitioned_vars = {} # A dict of the stored PartitionedVariables. self._store_eager_variables = False def get_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with these parameters or create a new one. If a variable with the given name is already stored, we return the stored variable. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to `False` when you only want to create new Variables. Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want variables to be created if they don't exist or returned if they do. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of variables. When eager execution is enabled this argument is always forced to be False. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. collections: List of graph collections keys to add the `Variable` to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the `Variable` reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and dtype of the `Variable` to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates instead an experimental ResourceVariable which has well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be true. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, or when violating reuse during variable creation. RuntimeError: when eager execution is enabled and not called from an EagerVariableStore. """ if custom_getter is not None and not callable(custom_getter): raise ValueError("Passed a custom_getter which is not callable: %s" % custom_getter) with ops.init_scope(): if context.executing_eagerly(): # Variable creation and initialization takes place in `init_scope`s; # as such, if an `init_scope` lifts us into the eager context, then we # need to use `ResourceVariable`s. use_resource = True # Note that it's fine to reuse eager variables whose initialization was # lifted from a function-building graph into the eager context (that's why # the following clause is not wrapped in an `init_scope`); lifted variables # are tracked by the graph's `VariableStore`. if context.executing_eagerly(): if not self._store_eager_variables and reuse: raise RuntimeError( "When eager execution is enabled variable reuse is only supported" " when an EagerVariableStore is active. See the documentation on" " EagerVariableStore for example usage.") if self._store_eager_variables: reuse = AUTO_REUSE # If a *_ref type is passed in an error would be triggered further down the # stack. We prevent this using base_dtype to get a non-ref version of the # type, before doing anything else. When _ref types are removed in favor of # resources, this line can be removed. try: dtype = dtype.base_dtype except AttributeError: # .base_dtype not existing means that we will try and use the raw dtype # which was passed in - this might be a NumPy type which is valid. pass # This is the main logic of get_variable. However, custom_getter # may override this logic. So we save it as a callable and pass # it to custom_getter. # Note: the parameters of _true_getter, and their documentation, match # *exactly* item-for-item with the docstring of this method. def _true_getter( # pylint: disable=missing-docstring name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): is_scalar = ( shape is not None and isinstance(shape, collections_lib.Sequence) and not shape) # Partitioned variable case if partitioner is not None and not is_scalar: if not callable(partitioner): raise ValueError("Partitioner must be callable, but received: %s" % partitioner) with ops.name_scope(None): return self._get_partitioned_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # Special case for partitioned variable to allow reuse without having to # specify partitioner. if (reuse is True and partitioner is None and name in self._partitioned_vars): return self._get_partitioned_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=None, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # Single variable case if "%s/part_0" % name in self._vars: raise ValueError( "No partitioner was provided, but a partitioned version of the " "variable was found: %s/part_0. Perhaps a variable of the same " "name was already created with partitioning?" % name) return self._get_single_variable( name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( synchronization, aggregation, trainable, name)) if custom_getter is not None: # Handle backwards compatibility with getter arguments that were added # to the API after users started writing custom getters. custom_getter_kwargs = { "getter": _true_getter, "name": name, "shape": shape, "dtype": dtype, "initializer": initializer, "regularizer": regularizer, "reuse": reuse, "trainable": trainable, "collections": collections, "caching_device": caching_device, "partitioner": partitioner, "validate_shape": validate_shape, "use_resource": use_resource, "synchronization": synchronization, "aggregation": aggregation, } # `fn_args` and `has_kwargs` can handle functions, `functools.partial`, # `lambda`. if ("constraint" in function_utils.fn_args(custom_getter) or function_utils.has_kwargs(custom_getter)): custom_getter_kwargs["constraint"] = constraint return custom_getter(**custom_getter_kwargs) else: return _true_getter( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) def _get_partitioned_variable(self, name, partitioner, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. Set `reuse` to `True` when you only want to reuse existing Variables. Set `reuse` to `False` when you only want to create new Variables. Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want variables to be created if they don't exist or returned if they do. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: the name of the new or existing sharded variable. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). shape: shape of the new or existing sharded variable. dtype: type of the new or existing sharded variable (defaults to `DT_FLOAT`). initializer: initializer for the sharded variable. regularizer: a (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of variables. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: A `PartitionedVariable` object. Raises: ValueError: when creating a new variable and shape is not declared, when reusing a variable and specifying a conflicting shape, when violating reuse during variable creation, or if an existing sharded variable exists for the given name but with different sharding. """ initializing_from_value = initializer is not None and isinstance( initializer, ops.Tensor) if name in self._vars: raise ValueError( "A partitioner was provided, but an unpartitioned version of the " "variable was found: %s. Perhaps a variable of the same name was " "already created without partitioning?" % name) shape = tensor_shape.as_shape(shape) if initializing_from_value: shape = shape.merge_with(initializer.get_shape()) partitions = None if not reuse or partitioner: partitions = _call_partitioner(partitioner, shape, dtype) if name in self._partitioned_vars: if reuse is False: raise ValueError( "Partitioned variable with name %s already exists. Did you mean to " "set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name) existing_var = self._partitioned_vars[name] if not shape.is_compatible_with(existing_var.get_shape()): raise ValueError( "Trying to reuse partitioned variable %s, but specified shape %s " "and found shape %s." % (name, shape, existing_var.get_shape())) if not dtype.is_compatible_with(existing_var.dtype): raise ValueError( "Trying to reuse partitioned variable %s, but specified dtype %s " "and found dtype %s." % (name, dtype.name, existing_var.dtype.name)) # pylint: disable=protected-access if (partitions is not None and existing_var._get_partitions() != partitions): raise ValueError( "Trying to reuse partitioned variable %s, but specified partitions " "%s and found partitions %s." % (name, partitions, existing_var._get_partitions())) # pylint: enable=protected-access return existing_var if reuse is True: raise ValueError("PartitionedVariable %s does not exist, or was not " "created with tf.get_variable(). Did you mean to set " "reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name) slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions) if "%s/part_0" % name in self._vars: if "%s/part_%d" % (name, num_slices - 1) not in self._vars: raise ValueError( "Partitioner returned a different partitioning than what was " "already found. Partitioner returned %d shards, and shard " "%s/part_0 was found, but %s/part_%d was not." % (num_slices, name, name, num_slices - 1)) if "%s/part_%d" % (name, num_slices) in self._vars: raise ValueError( "Partitioner returned a different partitioning than what was " "already found. Partitioner returned %d shards, and shard " "%s/part_0 was found, but so was the extra shard %s/part_%d." % (num_slices, name, name, num_slices)) vs = [] for i, (var_offset, var_shape) in enumerate( _iter_slices(shape.as_list(), num_slices, slice_dim)): partition_info = _PartitionInfo( full_shape=shape.as_list(), var_offset=var_offset) var_full_name = "%s/part_%d" % (name, i) with ops.name_scope(var_full_name + "/PartitionedInitializer"): # Create the tensor to initialize the variable with default value. if initializer is None: init, initializing_from_value = self._get_default_initializer( name=name, shape=shape, dtype=dtype) if initializing_from_value: init_shape = None else: init_shape = var_shape elif callable(initializer): init = initializer init_shape = var_shape elif isinstance(initializer, ops.Tensor): init = array_ops.slice(initializer, var_offset, var_shape) # Use the dtype of the given tensor. dtype = init.dtype.base_dtype init_shape = None else: init = ops.convert_to_tensor(initializer, dtype=dtype) init = array_ops.slice(init, var_offset, var_shape) init_shape = None with ops.name_scope(None): var = self._get_single_variable( name=var_full_name, shape=init_shape, dtype=dtype, initializer=init, partition_info=partition_info, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # pylint: disable=protected-access var._set_save_slice_info( variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset, var_shape)) vs.append(var) # pylint: enable=protected-access partitioned_var = variables.PartitionedVariable( name=name, shape=shape, dtype=dtype, variable_list=vs, partitions=partitions) if not context.executing_eagerly() or self._store_eager_variables: self._partitioned_vars[name] = partitioned_var return partitioned_var def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=None, collections=None, caching_device=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Get or create a single Variable (e.g. a shard or entire variable). See the documentation of get_variable above (ignore partitioning components) for details. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. initializer: see get_variable. regularizer: see get_variable. partition_info: _PartitionInfo object. reuse: see get_variable. trainable: see get_variable. collections: see get_variable. caching_device: see get_variable. validate_shape: see get_variable. use_resource: see get_variable. constraint: see get_variable. synchronization: see get_variable. aggregation: see get_variable. Returns: A Variable. See documentation of get_variable above. Raises: ValueError: See documentation of get_variable above. """ # Set to true if initializer is a constant. initializing_from_value = False if initializer is not None and not callable(initializer): initializing_from_value = True if shape is not None and initializing_from_value: raise ValueError("If initializer is a constant, do not specify shape.") dtype = dtypes.as_dtype(dtype) shape = tensor_shape.as_shape(shape) if name in self._vars: # Here we handle the case when returning an existing variable. if reuse is False: var = self._vars[name] err_msg = ("Variable %s already exists, disallowed." " Did you mean to set reuse=True or " "reuse=tf.AUTO_REUSE in VarScope?" % name) # ResourceVariables don't have an op associated with so no traceback if isinstance(var, resource_variable_ops.ResourceVariable): raise ValueError(err_msg) tb = var.op.traceback[::-1] # Throw away internal tf entries and only take a few lines. In some # cases the traceback can be longer (e.g. if someone uses factory # functions to create variables) so we take more than needed in the # default case. tb = [x for x in tb if "tensorflow/python" not in x[0]][:5] raise ValueError("%s Originally defined at:\n\n%s" % (err_msg, "".join(traceback.format_list(tb)))) found_var = self._vars[name] if not shape.is_compatible_with(found_var.get_shape()): raise ValueError("Trying to share variable %s, but specified shape %s" " and found shape %s." % (name, shape, found_var.get_shape())) if not dtype.is_compatible_with(found_var.dtype): dtype_str = dtype.name found_type_str = found_var.dtype.name raise ValueError("Trying to share variable %s, but specified dtype %s" " and found dtype %s." % (name, dtype_str, found_type_str)) return found_var # The code below handles only the case of creating a new variable. if reuse is True: raise ValueError("Variable %s does not exist, or was not created with " "tf.get_variable(). Did you mean to set " "reuse=tf.AUTO_REUSE in VarScope?" % name) # Create the tensor to initialize the variable with default value. if initializer is None: initializer, initializing_from_value = self._get_default_initializer( name=name, shape=shape, dtype=dtype) # Enter an init scope when creating the initializer. with ops.init_scope(): if initializing_from_value: init_val = initializer variable_dtype = None else: # Instantiate initializer if provided initializer is a type object. if tf_inspect.isclass(initializer): initializer = initializer() if shape is not None and shape.is_fully_defined(): init_val = lambda: initializer( # pylint: disable=g-long-lambda shape.as_list(), dtype=dtype, partition_info=partition_info) variable_dtype = dtype.base_dtype elif len(tf_inspect.getargspec(initializer).args) == len( tf_inspect.getargspec(initializer).defaults or []): init_val = initializer variable_dtype = None else: raise ValueError("The initializer passed is not valid. It should " "be a callable with no arguments and the " "shape should not be provided or an instance of " "`tf.keras.initializers.*' and `shape` should be " "fully defined.") # Create the variable. if use_resource is None: # Set the default value if unspecified. use_resource = _DEFAULT_USE_RESOURCE v = variables.VariableV1( initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape, constraint=constraint, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation) if context.executing_eagerly() and self._store_eager_variables: if collections: ops.add_to_collections(collections, v) else: ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v) if trainable: ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v) if not context.executing_eagerly() or self._store_eager_variables: # In eager mode we do not want to keep default references to Variable # objects as this will prevent their memory from being released. self._vars[name] = v logging.vlog(1, "Created variable %s with shape %s and init %s", v.name, format(shape), initializer) # Run the regularizer if requested and save the resulting loss. if regularizer: with ops.colocate_with(v): with ops.name_scope(name + "/Regularizer/"): with ops.init_scope(): loss = regularizer(v) if loss is not None: if context.executing_eagerly(): v_name = "v_%s" % type(v) loss_name = "loss_%s" % type(loss) else: v_name = v.name loss_name = loss.name logging.vlog( 1, "Applied regularizer to %s and added the result %s " "to REGULARIZATION_LOSSES.", v_name, loss_name) ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss) return v # Initialize variable when no initializer provided def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32): """Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype. """ del shape # If dtype is DT_FLOAT, provide a uniform unit scaling initializer if dtype.is_floating: initializer = init_ops.glorot_uniform_initializer() initializing_from_value = False # If dtype is DT_INT/DT_UINT, provide a default value `zero` # If dtype is DT_BOOL, provide a default value `FALSE` elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or dtype == dtypes.string): initializer = init_ops.zeros_initializer() initializing_from_value = False # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here? else: raise ValueError("An initializer for variable %s of %s is required" % (name, dtype.base_dtype)) return initializer, initializing_from_value # To stop regularization, use this regularizer @tf_export(v1=["no_regularizer"]) def no_regularizer(_): """Use this function to prevent regularization of variables.""" return None # TODO(alive): support caching devices and partitioned variables in Eager mode. @tf_export(v1=["VariableScope"]) class VariableScope(object): """Variable scope object to carry defaults to provide to `get_variable`. Many of the arguments we need for `get_variable` in a variable store are most easily handled with a context. This object is used for the defaults. Attributes: name: name of the current scope, used as prefix in get_variable. initializer: default initializer passed to get_variable. regularizer: default regularizer passed to get_variable. reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in get_variable. When eager execution is enabled this argument is always forced to be False. caching_device: string, callable, or None: the caching device passed to get_variable. partitioner: callable or `None`: the partitioner passed to `get_variable`. custom_getter: default custom getter passed to get_variable. name_scope: The name passed to `tf.name_scope`. dtype: default type passed to get_variable (defaults to DT_FLOAT). use_resource: if False, create a normal Variable; if True create an experimental ResourceVariable with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. """ def __init__(self, reuse, name="", initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, name_scope="", dtype=dtypes.float32, use_resource=None, constraint=None): """Creates a new VariableScope with the given properties.""" self._name = name self._initializer = initializer self._regularizer = regularizer self._reuse = reuse self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._name_scope = name_scope self._dtype = dtype self._use_resource = use_resource self._constraint = constraint if context.executing_eagerly(): if self._caching_device is not None: raise NotImplementedError("Caching devices is not yet supported " "when eager execution is enabled.") self._reuse = AUTO_REUSE self._use_resource = True @property def name(self): return self._name @property def original_name_scope(self): return self._name_scope @property def reuse(self): return self._reuse @property def initializer(self): return self._initializer @property def dtype(self): return self._dtype @property def use_resource(self): return self._use_resource @property def regularizer(self): return self._regularizer @property def caching_device(self): return self._caching_device @property def partitioner(self): return self._partitioner @property def custom_getter(self): return self._custom_getter @property def constraint(self): return self._constraint def reuse_variables(self): """Reuse variables in this scope.""" self._reuse = True def set_initializer(self, initializer): """Set initializer for this scope.""" self._initializer = initializer def set_dtype(self, dtype): """Set data type for this scope.""" self._dtype = dtype def set_use_resource(self, use_resource): """Sets whether to use ResourceVariables for this scope.""" if context.executing_eagerly() and not use_resource: raise ValueError("When eager execution is enabled, " "use_resource cannot be set to false.") self._use_resource = use_resource def set_regularizer(self, regularizer): """Set regularizer for this scope.""" self._regularizer = regularizer def set_caching_device(self, caching_device): """Set caching_device for this scope.""" if context.executing_eagerly(): raise NotImplementedError("Caching devices are not yet supported " "when eager execution is enabled.") self._caching_device = caching_device def set_partitioner(self, partitioner): """Set partitioner for this scope.""" self._partitioner = partitioner def set_custom_getter(self, custom_getter): """Set custom getter for this scope.""" self._custom_getter = custom_getter def get_collection(self, name): """Get this scope's variables.""" scope = self._name + "/" if self._name else "" return ops.get_collection(name, scope) def trainable_variables(self): """Get this scope's trainable variables.""" return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) def global_variables(self): """Get this scope's global variables.""" return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) def local_variables(self): """Get this scope's local variables.""" return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES) def get_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, reuse=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with this name or create a new one.""" if regularizer is None: regularizer = self._regularizer if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if custom_getter is None: custom_getter = self._custom_getter if context.executing_eagerly(): reuse = False use_resource = True else: if reuse is None: reuse = self._reuse if use_resource is None: use_resource = self._use_resource full_name = self.name + "/" + name if self.name else name # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # Check that `initializer` dtype and `dtype` are consistent before # replacing them with defaults. if (dtype is not None and initializer is not None and not callable(initializer)): init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype if init_dtype != dtype: raise ValueError("Initializer type '%s' and explicit dtype '%s' " "don't match." % (init_dtype, dtype)) if initializer is None: initializer = self._initializer if constraint is None: constraint = self._constraint if dtype is None: dtype = self._dtype return var_store.get_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint, synchronization=synchronization, aggregation=aggregation) def _get_partitioned_variable(self, var_store, name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets an existing variable with this name or create a new one.""" if initializer is None: initializer = self._initializer if regularizer is None: regularizer = self._regularizer if constraint is None: constraint = self._constraint if caching_device is None: caching_device = self._caching_device if partitioner is None: partitioner = self._partitioner if dtype is None: dtype = self._dtype if use_resource is None: use_resource = self._use_resource if self._custom_getter is not None: raise ValueError( "Private access to _get_partitioned_variable is not allowed when " "a custom getter is set. Current custom getter: %s. " "It is likely that you're using create_partitioned_variables. " "If so, consider instead using get_variable with a non-empty " "partitioner parameter instead." % self._custom_getter) if partitioner is None: raise ValueError("No partitioner was specified") # This allows the variable scope name to be used as the variable name if # this function is invoked with an empty name arg, for backward # compatibility with create_partitioned_variables(). full_name_list = [] if self.name: full_name_list.append(self.name) if name: full_name_list.append(name) full_name = "/".join(full_name_list) # Variable names only depend on variable_scope (full_name here), # not name_scope, so we reset it below for the time of variable creation. with ops.name_scope(None): # pylint: disable=protected-access return var_store._get_partitioned_variable( full_name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, reuse=self.reuse, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # pylint: enable=protected-access _VARSTORE_KEY = ("__variable_store",) _VARSCOPESTORE_KEY = ("__varscope",) class _VariableScopeStore(threading.local): """A thread local store for the current variable scope and scope counts.""" def __init__(self): super(_VariableScopeStore, self).__init__() self.current_scope = VariableScope(False) self.variable_scopes_count = {} def open_variable_scope(self, scope_name): if scope_name in self.variable_scopes_count: self.variable_scopes_count[scope_name] += 1 else: self.variable_scopes_count[scope_name] = 1 def close_variable_subscopes(self, scope_name): for k in list(self.variable_scopes_count.keys()): if scope_name is None or k.startswith(scope_name + "/"): self.variable_scopes_count[k] = 0 def variable_scope_count(self, scope_name): return self.variable_scopes_count.get(scope_name, 0) def get_variable_scope_store(): """Returns the variable scope store for current thread.""" scope_store = ops.get_collection(_VARSCOPESTORE_KEY) if not scope_store: scope_store = _VariableScopeStore() ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store) else: scope_store = scope_store[0] return scope_store @tf_export(v1=["get_variable_scope"]) def get_variable_scope(): """Returns the current variable scope.""" return get_variable_scope_store().current_scope def _get_default_variable_store(): store = ops.get_collection(_VARSTORE_KEY) if store: return store[0] store = _VariableStore() ops.add_to_collection(_VARSTORE_KEY, store) return store @tf_contextlib.contextmanager def with_variable_store(store): store_collection = ops.get_collection_ref(_VARSTORE_KEY) old = list(store_collection) store_collection[:] = [store] try: yield finally: store_collection[:] = old class EagerVariableStore(object): """Wrapper allowing functional layers to be used with eager execution. When eager execution is enabled Variables get deleted when they go out of scope, and are not stored in global collections by default. A lot of code (mostly the functional layers in tf.layers) assumes that variables are kept in a global list. EagerVariableStore can be used in conjunction with this code to make it eager-friendly. For example, to create a dense layer, use: ``` container = tfe.EagerVariableStore() for input in dataset_iterator: with container.as_default(): x = tf.compat.v1.layers.dense(input, name="l1") print(container.variables) # Should print the variables used in the layer. ``` """ def __init__(self, store=None): if store is not None: if not store._store_eager_variables: # pylint: disable=protected-access raise ValueError("Cannot construct EagerVariableStore from a " "VariableStore object that does not hold eager " "variables.") self._store = store else: self._store = _VariableStore() self._store._store_eager_variables = True # pylint: disable=protected-access def as_default(self): return with_variable_store(self._store) def variables(self): return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access def trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def non_trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if not x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def copy(self): """Copy this variable store and all of its contents. Variables contained in this store will be copied over to the new variable store, meaning that they can be modified without affecting the variables in this store. Returns: A new EagerVariableStore instance containing copied variables. """ # pylint: disable=protected-access new_store = EagerVariableStore() for key, var in iteritems(self._store._vars): # Strip device out of variable name. try: index = var.name.index(":") except ValueError: stripped_var_name = var.name else: stripped_var_name = var.name[:index] # Create new variable with same value, name, and "trainable" flag. new_var = resource_variable_ops.ResourceVariable( var.read_value(), name=stripped_var_name, trainable=var.trainable) new_store._store._vars[key] = new_var return new_store # pylint: enable=protected-access # The argument list for get_variable must match arguments to get_local_variable. # So, if you are updating the arguments, also update arguments to # get_local_variable below. @tf_export(v1=["get_variable"]) def get_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): return get_variable_scope().get_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint, synchronization=synchronization, aggregation=aggregation) get_variable_or_local_docstring = ("""%s %sThis function prefixes the name with the current variable scope and performs reuse checks. See the [Variable Scope How To](https://tensorflow.org/guide/variables) for an extensive description of how reusing works. Here is a basic example: ```python def foo(): with tf.variable_scope("foo", reuse=tf.AUTO_REUSE): v = tf.get_variable("v", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` If initializer is `None` (the default), the default initializer passed in the variable scope will be used. If that one is `None` too, a `glorot_uniform_initializer` will be used. The initializer can also be a Tensor, in which case the variable is initialized to this value and shape. Similarly, if the regularizer is `None` (the default), the default regularizer passed in the variable scope will be used (if that is `None` too, then by default no regularization is performed). If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. Can either be an initializer object or a Tensor. If it's a Tensor, its shape must be known unless validate_shape is False. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection `tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization. %scollections: List of graph collections keys to add the Variable to. Defaults to `[%s]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. For this to be used the initializer must be a Tensor and not an initializer object. use_resource: If False, creates a regular Variable. If true, creates an experimental ResourceVariable instead with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when violating reuse during variable creation, or when `initializer` dtype and `dtype` don't match. Reuse is set inside `variable_scope`. """) get_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing variable with these parameters or create a new one.", "", "trainable: If `True` also add the variable to the graph collection\n" " `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ", "GraphKeys.GLOBAL_VARIABLES") # The argument list for get_local_variable must match arguments to get_variable. # So, if you are updating the arguments, also update arguments to get_variable. @tf_export(v1=["get_local_variable"]) def get_local_variable( # pylint: disable=missing-docstring name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=False, # pylint: disable=unused-argument collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): if collections: collections += [ops.GraphKeys.LOCAL_VARIABLES] else: collections = [ops.GraphKeys.LOCAL_VARIABLES] return get_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=False, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, custom_getter=custom_getter, constraint=constraint) get_local_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing *local* variable or creates a new one.", "Behavior is the same as in `get_variable`, except that variables are\n" "added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n" "`False`.\n", "", "GraphKeys.LOCAL_VARIABLES") def _get_partitioned_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable instead which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: A tuple `(shards, partitions)` where `shards` is the list of `Variable` shards and `partitions` is the output of the partitioner on the input shape. Raises: ValueError: when creating a new variable and shape is not declared, or when violating reuse during variable creation. Reuse is set inside `variable_scope`. """ # pylint: disable=protected-access scope = get_variable_scope() if scope.custom_getter is not None: raise ValueError( "Private access to _get_partitioned_variable is not allowed when " "a custom getter is set. Current custom getter: %s. " "It is likely that you're using create_partitioned_variables. " "If so, consider instead using get_variable with a non-empty " "partitioner parameter instead." % scope.custom_getter) return scope._get_partitioned_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # pylint: enable=protected-access # Named like a function for compatibility with the previous # @tf_contextlib.contextmanager definition. class _pure_variable_scope(object): # pylint: disable=invalid-name """A context for the variable_scope, see `variable_scope` for docs.""" def __init__(self, name_or_scope, reuse=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, old_name_scope=None, dtype=dtypes.float32, use_resource=None, constraint=None): """Creates a context for the variable_scope, see `variable_scope` for docs. Note: this does not create a name scope. Args: name_or_scope: `string` or `VariableScope`: the scope to open. reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit the parent scope's reuse flag. initializer: default initializer for variables within this scope. regularizer: default regularizer for variables within this scope. caching_device: default caching device for variables within this scope. partitioner: default partitioner for variables within this scope. custom_getter: default custom getter for variables within this scope. old_name_scope: the original name scope when re-entering a variable scope. dtype: type of the variables within this scope (defaults to `DT_FLOAT`). use_resource: If False, variables in this scope will be regular Variables. If True, experimental ResourceVariables will be creates instead, with well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. """ self._name_or_scope = name_or_scope self._reuse = reuse self._initializer = initializer self._regularizer = regularizer self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._old_name_scope = old_name_scope self._dtype = dtype self._use_resource = use_resource self._constraint = constraint self._var_store = _get_default_variable_store() self._var_scope_store = get_variable_scope_store() self._last_variable_scope_object = None if isinstance(self._name_or_scope, VariableScope): self._new_name = self._name_or_scope.name name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access # Handler for the case when we jump to a shared scope. We create a new # VariableScope (self._var_scope_object) that contains a copy of the # provided shared scope, possibly with changed reuse and initializer, if # the user requested this. variable_scope_object = VariableScope( self._name_or_scope.reuse if not self._reuse else self._reuse, name=self._new_name, initializer=self._name_or_scope.initializer, regularizer=self._name_or_scope.regularizer, caching_device=self._name_or_scope.caching_device, partitioner=self._name_or_scope.partitioner, dtype=self._name_or_scope.dtype, custom_getter=self._name_or_scope.custom_getter, name_scope=name_scope, use_resource=self._name_or_scope.use_resource, constraint=self._constraint) if self._initializer is not None: variable_scope_object.set_initializer(self._initializer) if self._regularizer is not None: variable_scope_object.set_regularizer(self._regularizer) if self._caching_device is not None: variable_scope_object.set_caching_device(self._caching_device) if self._partitioner is not None: variable_scope_object.set_partitioner(self._partitioner) if self._custom_getter is not None: variable_scope_object.set_custom_getter( _maybe_wrap_custom_getter(self._custom_getter, self._name_or_scope.custom_getter)) if self._dtype is not None: variable_scope_object.set_dtype(self._dtype) if self._use_resource is not None: variable_scope_object.set_use_resource(self._use_resource) self._cached_variable_scope_object = variable_scope_object def __enter__(self): """Begins the scope block. Returns: A VariableScope. Raises: ValueError: when trying to reuse within a create scope, or create within a reuse scope, or if reuse is not `None` or `True`. TypeError: when the types of some arguments are not appropriate. """ self._old = self._var_scope_store.current_scope if isinstance(self._name_or_scope, VariableScope): self._var_scope_store.open_variable_scope(self._new_name) self._old_subscopes = copy.copy( self._var_scope_store.variable_scopes_count) variable_scope_object = self._cached_variable_scope_object else: # Handler for the case when we just prolong current variable scope. # VariableScope with name extended by the provided one, and inherited # reuse and initializer (except if the user provided values to set). self._new_name = ( self._old.name + "/" + self._name_or_scope if self._old.name else self._name_or_scope) self._reuse = (self._reuse or self._old.reuse) # Re-using is inherited by sub-scopes. if self._old_name_scope is None: name_scope = self._name_or_scope else: name_scope = self._old_name_scope variable_scope_object = VariableScope( self._reuse, name=self._new_name, initializer=self._old.initializer, regularizer=self._old.regularizer, caching_device=self._old.caching_device, partitioner=self._old.partitioner, dtype=self._old.dtype, use_resource=self._old.use_resource, custom_getter=self._old.custom_getter, name_scope=name_scope, constraint=self._constraint) if self._initializer is not None: variable_scope_object.set_initializer(self._initializer) if self._regularizer is not None: variable_scope_object.set_regularizer(self._regularizer) if self._caching_device is not None: variable_scope_object.set_caching_device(self._caching_device) if self._partitioner is not None: variable_scope_object.set_partitioner(self._partitioner) if self._custom_getter is not None: variable_scope_object.set_custom_getter( _maybe_wrap_custom_getter(self._custom_getter, self._old.custom_getter)) if self._dtype is not None: variable_scope_object.set_dtype(self._dtype) if self._use_resource is not None: variable_scope_object.set_use_resource(self._use_resource) self._var_scope_store.open_variable_scope(self._new_name) self._var_scope_store.current_scope = variable_scope_object self._last_variable_scope_object = variable_scope_object return variable_scope_object def __exit__(self, type_arg, value_arg, traceback_arg): if (self._var_scope_store.current_scope is not self._last_variable_scope_object): raise RuntimeError("Improper nesting of variable_scope.") # If jumping out from a non-prolonged scope, restore counts. if isinstance(self._name_or_scope, VariableScope): self._var_scope_store.variable_scopes_count = self._old_subscopes else: self._var_scope_store.close_variable_subscopes(self._new_name) self._var_scope_store.current_scope = self._old def _maybe_wrap_custom_getter(custom_getter, old_getter): """Wrap a call to a custom_getter to use the old_getter internally.""" if old_getter is None: return custom_getter # The new custom_getter should call the old one def wrapped_custom_getter(getter, *args, **kwargs): # Call: # custom_getter( # lambda: old_getter(true_getter, ...), *args, **kwargs) # which means custom_getter will call old_getter, which # will call the true_getter, perform any intermediate # processing, and return the results to the current # getter, which will also perform additional processing. return custom_getter(functools.partial(old_getter, getter), *args, **kwargs) return wrapped_custom_getter def _get_unique_variable_scope(prefix): """Get a name with the given prefix unique in the current variable scope.""" var_scope_store = get_variable_scope_store() current_scope = get_variable_scope() name = current_scope.name + "/" + prefix if current_scope.name else prefix if var_scope_store.variable_scope_count(name) == 0: return prefix idx = 1 while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0: idx += 1 return prefix + ("_%d" % idx) # Named like a function for backwards compatibility with the # @tf_contextlib.contextmanager version, which was switched to a class to avoid # some object creation overhead. @tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name class variable_scope(object): """A context manager for defining ops that creates variables (layers). This context manager validates that the (optional) `values` are from the same graph, ensures that graph is the default graph, and pushes a name scope and a variable scope. If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None, then `default_name` is used. In that case, if the same name has been previously used in the same scope, it will be made unique by appending `_N` to it. Variable scope allows you to create new variables and to share already created ones while providing checks to not create or share by accident. For details, see the [Variable Scope How To](https://tensorflow.org/guide/variables), here we present only a few basic examples. Simple example of how to create a new variable: ```python with tf.compat.v1.variable_scope("foo"): with tf.compat.v1.variable_scope("bar"): v = tf.compat.v1.get_variable("v", [1]) assert v.name == "foo/bar/v:0" ``` Simple example of how to reenter a premade variable scope safely: ```python with tf.compat.v1.variable_scope("foo") as vs: pass # Re-enter the variable scope. with tf.compat.v1.variable_scope(vs, auxiliary_name_scope=False) as vs1: # Restore the original name_scope. with tf.name_scope(vs1.original_name_scope): v = tf.compat.v1.get_variable("v", [1]) assert v.name == "foo/v:0" c = tf.constant([1], name="c") assert c.name == "foo/c:0" ``` Basic example of sharing a variable AUTO_REUSE: ```python def foo(): with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE): v = tf.compat.v1.get_variable("v", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` Basic example of sharing a variable with reuse=True: ```python with tf.compat.v1.variable_scope("foo"): v = tf.compat.v1.get_variable("v", [1]) with tf.compat.v1.variable_scope("foo", reuse=True): v1 = tf.compat.v1.get_variable("v", [1]) assert v1 == v ``` Sharing a variable by capturing a scope and setting reuse: ```python with tf.compat.v1.variable_scope("foo") as scope: v = tf.compat.v1.get_variable("v", [1]) scope.reuse_variables() v1 = tf.compat.v1.get_variable("v", [1]) assert v1 == v ``` To prevent accidental sharing of variables, we raise an exception when getting an existing variable in a non-reusing scope. ```python with tf.compat.v1.variable_scope("foo"): v = tf.compat.v1.get_variable("v", [1]) v1 = tf.compat.v1.get_variable("v", [1]) # Raises ValueError("... v already exists ..."). ``` Similarly, we raise an exception when trying to get a variable that does not exist in reuse mode. ```python with tf.compat.v1.variable_scope("foo", reuse=True): v = tf.compat.v1.get_variable("v", [1]) # Raises ValueError("... v does not exists ..."). ``` Note that the `reuse` flag is inherited: if we open a reusing scope, then all its sub-scopes become reusing as well. A note about name scoping: Setting `reuse` does not impact the naming of other ops such as mult. See related discussion on [github#6189](https://github.com/tensorflow/tensorflow/issues/6189) Note that up to and including version 1.0, it was allowed (though explicitly discouraged) to pass False to the reuse argument, yielding undocumented behaviour slightly different from None. Starting at 1.1.0 passing None and False as reuse has exactly the same effect. A note about using variable scopes in multi-threaded environment: Variable scopes are thread local, so one thread will not see another thread's current scope. Also, when using `default_name`, unique scopes names are also generated only on a per thread basis. If the same name was used within a different thread, that doesn't prevent a new thread from creating the same scope. However, the underlying variable store is shared across threads (within the same graph). As such, if another thread tries to create a new variable with the same name as a variable created by a previous thread, it will fail unless reuse is True. Further, each thread starts with an empty variable scope. So if you wish to preserve name prefixes from a scope from the main thread, you should capture the main thread's scope and re-enter it in each thread. For e.g. ``` main_thread_scope = variable_scope.get_variable_scope() # Thread's target function: def thread_target_fn(captured_scope): with variable_scope.variable_scope(captured_scope): # .... regular code for this thread thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,)) ``` """ def __init__(self, name_or_scope, default_name=None, values=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, reuse=None, dtype=None, use_resource=None, constraint=None, auxiliary_name_scope=True): """Initialize the context manager. Args: name_or_scope: `string` or `VariableScope`: the scope to open. default_name: The default name to use if the `name_or_scope` argument is `None`, this name will be uniquified. If name_or_scope is provided it won't be used and therefore it is not required and can be None. values: The list of `Tensor` arguments that are passed to the op function. initializer: default initializer for variables within this scope. regularizer: default regularizer for variables within this scope. caching_device: default caching device for variables within this scope. partitioner: default partitioner for variables within this scope. custom_getter: default custom getter for variables within this scope. reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into reuse mode for this scope as well as all sub-scopes; if tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and return them otherwise; if None, we inherit the parent scope's reuse flag. When eager execution is enabled, new variables are always created unless an EagerVariableStore or template is currently active. dtype: type of variables created in this scope (defaults to the type in the passed scope, or inherited from parent scope). use_resource: If False, all variables will be regular Variables. If True, experimental ResourceVariables with well-defined semantics will be used instead. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. auxiliary_name_scope: If `True`, we create an auxiliary name scope with the scope. If `False`, we don't create it. Note that the argument is not inherited, and it only takes effect for once when creating. You should only use it for re-entering a premade variable scope. Returns: A scope that can be captured and reused. Raises: ValueError: when trying to reuse within a create scope, or create within a reuse scope. TypeError: when the types of some arguments are not appropriate. """ self._name_or_scope = name_or_scope self._default_name = default_name self._values = values self._initializer = initializer self._regularizer = regularizer self._caching_device = caching_device self._partitioner = partitioner self._custom_getter = custom_getter self._reuse = reuse self._dtype = dtype self._use_resource = use_resource self._constraint = constraint if self._default_name is None and self._name_or_scope is None: raise TypeError("If default_name is None then name_or_scope is required") if self._reuse is False: # We don't allow non-inheriting scopes, False = None here. self._reuse = None if not (self._reuse is True or self._reuse is None or self._reuse is AUTO_REUSE): raise ValueError("The reuse parameter must be True or False or None.") if self._values is None: self._values = [] self._in_graph_mode = not context.executing_eagerly() if self._in_graph_mode: self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access self._cached_pure_variable_scope = None self._current_name_scope = None if not isinstance(auxiliary_name_scope, bool): raise TypeError("The auxiliary_name_scope must be `True` or `False`, " "while get {}".format(auxiliary_name_scope)) self._auxiliary_name_scope = auxiliary_name_scope def __enter__(self): # If the default graph is building a function, then we should not replace it # with the cached graph. if ops.get_default_graph().building_function: self._building_function = True else: self._building_function = False if self._in_graph_mode and not self._building_function: self._graph_context_manager = self._graph.as_default() self._graph_context_manager.__enter__() if self._cached_pure_variable_scope is not None: # Fast path for re-entering variable_scopes. We've held on to the pure # variable scope from a previous successful __enter__, so we avoid some # overhead by re-using that object. if self._current_name_scope is not None: self._current_name_scope.__enter__() return self._cached_pure_variable_scope.__enter__() try: return self._enter_scope_uncached() finally: if (self._in_graph_mode and not self._building_function and self._graph_context_manager is not None): self._graph_context_manager.__exit__(*sys.exc_info()) def _enter_scope_uncached(self): """Enters the context manager when there is no cached scope yet. Returns: The entered variable scope. Raises: TypeError: A wrong type is passed as `scope` at __init__(). ValueError: `reuse` is incorrectly set at __init__(). """ if self._auxiliary_name_scope: # Create a new name scope later current_name_scope = None else: # Reenter the current name scope name_scope = ops.get_name_scope() if name_scope: # Hack to reenter name_scope += "/" current_name_scope = ops.name_scope(name_scope) else: # Root scope current_name_scope = ops.name_scope(name_scope) # IMPORTANT: Only assign to self._cached_pure_variable_scope and # self._current_name_scope after successful __enter__() calls. if self._name_or_scope is not None: if not isinstance(self._name_or_scope, (VariableScope,) + six.string_types): raise TypeError("VariableScope: name_or_scope must be a string or " "VariableScope.") if isinstance(self._name_or_scope, six.string_types): name_scope = self._name_or_scope else: name_scope = self._name_or_scope.name.split("/")[-1] if name_scope or current_name_scope: current_name_scope = current_name_scope or ops.name_scope(name_scope) try: current_name_scope_name = current_name_scope.__enter__() except: current_name_scope.__exit__(*sys.exc_info()) raise self._current_name_scope = current_name_scope if isinstance(self._name_or_scope, six.string_types): old_name_scope = current_name_scope_name else: old_name_scope = self._name_or_scope.original_name_scope pure_variable_scope = _pure_variable_scope( self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=old_name_scope, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope else: self._current_name_scope = None # This can only happen if someone is entering the root variable scope. pure_variable_scope = _pure_variable_scope( self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope else: # Here name_or_scope is None. Using default name, but made unique. if self._reuse: raise ValueError("reuse=True cannot be used without a name_or_scope") current_name_scope = current_name_scope or ops.name_scope( self._default_name) try: current_name_scope_name = current_name_scope.__enter__() except: current_name_scope.__exit__(*sys.exc_info()) raise self._current_name_scope = current_name_scope unique_default_name = _get_unique_variable_scope(self._default_name) pure_variable_scope = _pure_variable_scope( unique_default_name, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=current_name_scope_name, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint) try: entered_pure_variable_scope = pure_variable_scope.__enter__() except: pure_variable_scope.__exit__(*sys.exc_info()) raise self._cached_pure_variable_scope = pure_variable_scope return entered_pure_variable_scope def __exit__(self, type_arg, value_arg, traceback_arg): self._cached_pure_variable_scope.__exit__(type_arg, value_arg, traceback_arg) if self._current_name_scope: self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg) if self._in_graph_mode and not self._building_function: self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg) # pylint: disable=g-doc-return-or-yield @tf_export(v1=["variable_op_scope"]) @tf_contextlib.contextmanager def variable_op_scope(values, name_or_scope, default_name=None, initializer=None, regularizer=None, caching_device=None, partitioner=None, custom_getter=None, reuse=None, dtype=None, use_resource=None, constraint=None): """Deprecated: context manager for defining an op that creates variables.""" logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated," " use tf.variable_scope(name, default_name, values)") with variable_scope( name_or_scope, default_name=default_name, values=values, initializer=initializer, regularizer=regularizer, caching_device=caching_device, partitioner=partitioner, custom_getter=custom_getter, reuse=reuse, dtype=dtype, use_resource=use_resource, constraint=constraint) as scope: yield scope def _call_partitioner(partitioner, shape, dtype): """Call partitioner validating its inputs/output. Args: partitioner: a function mapping `Tensor` shape and dtype to a list of partitions. shape: shape of the `Tensor` to partition, must have at least two dimensions. dtype: dtype of the elements in the `Tensor`. Returns: A list with elements >=1 and exactly one >1. The index of that element corresponds to the partitioning axis. """ if not shape.is_fully_defined(): raise ValueError("Shape of a new partitioned variable must be " "fully defined, but instead was %s." % (shape,)) if shape.ndims < 1: raise ValueError("A partitioned Variable must have rank at least 1, " "shape: %s" % shape) slicing = partitioner(shape=shape, dtype=dtype) if not isinstance(slicing, collections_lib.Sequence): raise ValueError("Partitioner must return a sequence, but saw: %s" % slicing) if len(slicing) != shape.ndims: raise ValueError( "Partitioner returned a partition list that does not match the " "Variable's rank: %s vs. %s" % (slicing, shape)) if any(p < 1 for p in slicing): raise ValueError("Partitioner returned zero partitions for some axes: %s" % slicing) if sum(p > 1 for p in slicing) > 1: raise ValueError("Can only slice a variable along one dimension: " "shape: %s, partitioning: %s" % (shape, slicing)) return slicing # TODO(slebedev): could be inlined, but # `_VariableStore._get_partitioned_variable` is too complex even # without this logic. def _get_slice_dim_and_num_slices(slicing): """Get slicing dimension and number of slices from the partitioner output.""" for slice_dim, num_slices in enumerate(slicing): if num_slices > 1: break else: # Degenerate case: no partitioning applied. slice_dim = 0 num_slices = 1 return slice_dim, num_slices def _iter_slices(full_shape, num_slices, slice_dim): """Slices a given a shape along the specified dimension.""" num_slices_with_excess = full_shape[slice_dim] % num_slices offset = [0] * len(full_shape) min_slice_len = full_shape[slice_dim] // num_slices for i in xrange(num_slices): shape = full_shape[:] shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess) yield offset[:], shape offset[slice_dim] += shape[slice_dim] def default_variable_creator(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) collections = kwargs.get("collections", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) expected_shape = kwargs.get("expected_shape", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) use_resource = kwargs.get("use_resource", None) synchronization = kwargs.get("synchronization", None) aggregation = kwargs.get("aggregation", None) shape = kwargs.get("shape", None) if use_resource is None: use_resource = get_variable_scope().use_resource if use_resource is None: use_resource = _DEFAULT_USE_RESOURCE use_resource = use_resource or context.executing_eagerly() if use_resource: distribute_strategy = kwargs.get("distribute_strategy", None) return resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope, distribute_strategy=distribute_strategy, synchronization=synchronization, aggregation=aggregation, shape=shape) else: return variables.RefVariable( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, expected_shape=expected_shape, import_scope=import_scope, synchronization=synchronization, aggregation=aggregation, shape=shape) def default_variable_creator_v2(next_creator=None, **kwargs): """Default variable creator.""" assert next_creator is None initial_value = kwargs.get("initial_value", None) trainable = kwargs.get("trainable", None) validate_shape = kwargs.get("validate_shape", True) caching_device = kwargs.get("caching_device", None) name = kwargs.get("name", None) variable_def = kwargs.get("variable_def", None) dtype = kwargs.get("dtype", None) import_scope = kwargs.get("import_scope", None) constraint = kwargs.get("constraint", None) distribute_strategy = kwargs.get("distribute_strategy", None) synchronization = kwargs.get("synchronization", None) aggregation = kwargs.get("aggregation", None) shape = kwargs.get("shape", None) return resource_variable_ops.ResourceVariable( initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint, variable_def=variable_def, import_scope=import_scope, distribute_strategy=distribute_strategy, synchronization=synchronization, aggregation=aggregation, shape=shape) variables.default_variable_creator = default_variable_creator variables.default_variable_creator_v2 = default_variable_creator_v2 def _make_getter(captured_getter, captured_previous): """Gets around capturing loop variables in python being broken.""" return lambda **kwargs: captured_getter(captured_previous, **kwargs) # TODO(apassos) remove forwarding symbol variable = variables.VariableV1 @tf_export(v1=["variable_creator_scope"]) @tf_contextlib.contextmanager def variable_creator_scope_v1(variable_creator): """Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. constraint: A constraint function to be applied to the variable after updates by some algorithms. use_resource: if True, a ResourceVariable is always created. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active """ with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access yield # Note: only the docstrings differ between this and v1. @tf_export("variable_creator_scope", v1=[]) @tf_contextlib.contextmanager def variable_creator_scope(variable_creator): """Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, the default, GradientTapes automatically watch uses of this Variable. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. constraint: A constraint function to be applied to the variable after updates by some algorithms. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active """ with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access yield
ghchinoy/tensorflow
tensorflow/python/ops/variable_scope.py
Python
apache-2.0
113,647
from __future__ import division, print_function, absolute_import, unicode_literals import os from mog_commons.case_class import CaseClass from mog_commons.functional import omap from javactl.util import normalize_path class AppSetting(CaseClass): def __init__(self, name=None, home=None, jar=None, entry_point=None, command=None, pid_file=None): # constraints assert name is not None, 'app.name is required' assert home is not None, 'app.home is required' assert os.path.isabs(home), 'app.home must be an absolute path' assert (jar is None) != (command is None), 'Either app.jar or app.command but not both must be given' assert jar is not None or entry_point is None, 'app.entry_point must be used with app.jar' normalize = lambda p: normalize_path(p, home) CaseClass.__init__( self, ('name', name), ('home', home), ('jar', omap(normalize, jar)), ('entry_point', entry_point), ('command', omap(normalize, command)), ('pid_file', omap(normalize, pid_file)) ) def is_duplicate_allowed(self): return self.pid_file is not None def get_args(self, java_args): if self.jar is not None: if self.entry_point is not None: return java_args + ['-cp', self.jar, self.entry_point] else: return java_args + ['-jar', self.jar] else: return [self.command]
mogproject/javactl
src/javactl/setting/app_setting.py
Python
apache-2.0
1,507
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from six.moves import range from kafka_utils.kafka_cluster_manager.cluster_info.broker import Broker from kafka_utils.kafka_cluster_manager.cluster_info.partition import Partition def create_broker(broker_id, partitions): b = Broker(broker_id, partitions=set(partitions)) for p in partitions: p.add_replica(b) return b def create_and_attach_partition(topic, partition_id): partition = Partition(topic, partition_id) topic.add_partition(partition) return partition def broker_range(n): """Return list of brokers with broker ids ranging from 0 to n-1.""" return {str(x): {"host": "host%s" % x} for x in range(n)}
Yelp/kafka-utils
tests/kafka_cluster_manager/helper.py
Python
apache-2.0
1,295
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Face functions for image classification. """ from . import _local from . import _cloud def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None): """Preprocess data. Produce output that can be used by training efficiently. Args: train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet. If eval_dataset is None, the pipeline will randomly split train_dataset into train/eval set with 7:3 ratio. output_dir: The output directory to use. Preprocessing will create a sub directory under it for each run, and also update "latest" file which points to the latest preprocessed directory. Users are responsible for cleanup. Can be local or GCS path. eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet. If specified, it will be used for evaluation during training, and train_dataset will be completely used for training. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but not None, it will run in cloud. Otherwise, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait. """ if cloud is None: return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint) if not isinstance(cloud, dict): cloud = {} return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud) def preprocess(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None): """Blocking version of preprocess_async(). The only difference is that it blocks the caller until the job finishes, and it does not have a return value. """ job = preprocess_async(train_dataset, output_dir, eval_dataset, checkpoint, cloud) job.wait() print(job.state) def train_async(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None): """Train model. The output can be used for batch prediction or online deployment. Args: input_dir: A directory path containing preprocessed results. Can be local or GCS path. batch_size: size of batch used for training. max_steps: number of steps to train. output_dir: The output directory to use. Can be local or GCS path. checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used. cloud: a google.datalab.ml.CloudTrainingConfig object to let it run in cloud. If None, it runs locally. Returns: A google.datalab.utils.Job object that can be used to query state from or wait. """ if cloud is None: return _local.Local.train(input_dir, batch_size, max_steps, output_dir, checkpoint) return _cloud.Cloud.train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud) def train(input_dir, batch_size, max_steps, output_dir, checkpoint=None, cloud=None): """Blocking version of train_async(). The only difference is that it blocks the caller until the job finishes, and it does not have a return value. """ job = train_async(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud) job.wait() print(job.state) def predict(model, image_files, resize=False, show_image=True, cloud=None): """Predict using an model in a local or GCS directory (offline), or a deployed model (online). Args: model: if cloud is None, a local or GCS directory of a trained model. Otherwise, it specifies a deployed model identified by model.version, such as "imagemodel.v1". image_files: The paths to the image files to predict labels. Can be local or GCS paths. resize: Whether to resize the image to a reasonable size (300x300) before prediction. show_image: Whether to show images in the results. cloud: if None, predicts with offline model locally. Otherwise, predict with a deployed online model. Returns: A pandas DataFrame including the prediction results. """ print('Predicting...') if cloud is None: results = _local.Local.predict(model, image_files, resize, show_image) else: results = _cloud.Cloud.predict(model, image_files, resize, show_image) return results def batch_predict_async(dataset, model_dir, output_csv=None, output_bq_table=None, cloud=None): """Batch prediction with an offline model. Args: dataset: CsvDataSet or BigQueryDataSet for batch prediction input. Can contain either one column 'image_url', or two columns with another being 'label'. model_dir: The directory of a trained inception model. Can be local or GCS paths. output_csv: The output csv file for prediction results. If specified, it will also output a csv schema file with the name output_csv + '.schema.json'. output_bq_table: if specified, the output BigQuery table for prediction results. output_csv and output_bq_table can both be set. cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but not None, it will run in cloud. Otherwise, it runs locally. If specified, it must include 'temp_location' with value being a GCS path, because cloud run requires a staging GCS directory. Raises: ValueError if both output_csv and output_bq_table are None, or if cloud is not None but it does not include 'temp_location'. Returns: A google.datalab.utils.Job object that can be used to query state from or wait. """ if cloud is None: return _local.Local.batch_predict(dataset, model_dir, output_csv, output_bq_table) if not isinstance(cloud, dict): cloud = {} return _cloud.Cloud.batch_predict(dataset, model_dir, output_csv, output_bq_table, cloud) def batch_predict(dataset, model_dir, output_csv=None, output_bq_table=None, cloud=None): """Blocking version of batch_predict_async(). The only difference is that it blocks the caller until the job finishes, and it does not have a return value. """ job = batch_predict_async(dataset, model_dir, output_csv, output_bq_table, cloud) job.wait() print(job.state)
jdanbrown/pydatalab
solutionbox/image_classification/mltoolbox/image/classification/_api.py
Python
apache-2.0
6,780
# Copyright 2011, OpenStack Foundation # Copyright 2012, Red Hat, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import glance_store from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import encodeutils from oslo_utils import excutils import six import webob from glance.common import exception from glance.common import timeutils from glance.domain import proxy as domain_proxy from glance.i18n import _, _LE notifier_opts = [ cfg.StrOpt('default_publisher_id', default="image.localhost", help=_(""" Default publisher_id for outgoing Glance notifications. This is the value that the notification driver will use to identify messages for events originating from the Glance service. Typically, this is the hostname of the instance that generated the message. Possible values: * Any reasonable instance identifier, for example: image.host1 Related options: * None """)), cfg.ListOpt('disabled_notifications', default=[], help=_(""" List of notifications to be disabled. Specify a list of notifications that should not be emitted. A notification can be given either as a notification type to disable a single event notification, or as a notification group prefix to disable all event notifications within a group. Possible values: A comma-separated list of individual notification types or notification groups to be disabled. Currently supported groups: * image * image.member * task * metadef_namespace * metadef_object * metadef_property * metadef_resource_type * metadef_tag For a complete listing and description of each event refer to: http://docs.openstack.org/developer/glance/notifications.html The values must be specified as: <group_name>.<event_name> For example: image.create,task.success,metadef_tag Related options: * None """)), ] CONF = cfg.CONF CONF.register_opts(notifier_opts) LOG = logging.getLogger(__name__) def set_defaults(control_exchange='glance'): oslo_messaging.set_transport_defaults(control_exchange) def get_transport(): return oslo_messaging.get_notification_transport(CONF) class Notifier(object): """Uses a notification strategy to send out messages about events.""" def __init__(self): publisher_id = CONF.default_publisher_id self._transport = get_transport() self._notifier = oslo_messaging.Notifier(self._transport, publisher_id=publisher_id) def warn(self, event_type, payload): self._notifier.warn({}, event_type, payload) def info(self, event_type, payload): self._notifier.info({}, event_type, payload) def error(self, event_type, payload): self._notifier.error({}, event_type, payload) def _get_notification_group(notification): return notification.split('.', 1)[0] def _is_notification_enabled(notification): disabled_notifications = CONF.disabled_notifications notification_group = _get_notification_group(notification) notifications = (notification, notification_group) for disabled_notification in disabled_notifications: if disabled_notification in notifications: return False return True def _send_notification(notify, notification_type, payload): if _is_notification_enabled(notification_type): notify(notification_type, payload) def format_image_notification(image): """ Given a glance.domain.Image object, return a dictionary of relevant notification information. We purposely do not include 'location' as it may contain credentials. """ return { 'id': image.image_id, 'name': image.name, 'status': image.status, 'created_at': timeutils.isotime(image.created_at), 'updated_at': timeutils.isotime(image.updated_at), 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'protected': image.protected, 'checksum': image.checksum, 'owner': image.owner, 'disk_format': image.disk_format, 'container_format': image.container_format, 'size': image.size, 'virtual_size': image.virtual_size, 'is_public': image.visibility == 'public', 'properties': dict(image.extra_properties), 'tags': list(image.tags), 'deleted': False, 'deleted_at': None, } def format_image_member_notification(image_member): """Given a glance.domain.ImageMember object, return a dictionary of relevant notification information. """ return { 'image_id': image_member.image_id, 'member_id': image_member.member_id, 'status': image_member.status, 'created_at': timeutils.isotime(image_member.created_at), 'updated_at': timeutils.isotime(image_member.updated_at), 'deleted': False, 'deleted_at': None, } def format_task_notification(task): # NOTE(nikhil): input is not passed to the notifier payload as it may # contain sensitive info. return { 'id': task.task_id, 'type': task.type, 'status': task.status, 'result': None, 'owner': task.owner, 'message': None, 'expires_at': timeutils.isotime(task.expires_at), 'created_at': timeutils.isotime(task.created_at), 'updated_at': timeutils.isotime(task.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_namespace_notification(metadef_namespace): return { 'namespace': metadef_namespace.namespace, 'namespace_old': metadef_namespace.namespace, 'display_name': metadef_namespace.display_name, 'protected': metadef_namespace.protected, 'visibility': metadef_namespace.visibility, 'owner': metadef_namespace.owner, 'description': metadef_namespace.description, 'created_at': timeutils.isotime(metadef_namespace.created_at), 'updated_at': timeutils.isotime(metadef_namespace.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_object_notification(metadef_object): object_properties = metadef_object.properties or {} properties = [] for name, prop in six.iteritems(object_properties): object_property = _format_metadef_object_property(name, prop) properties.append(object_property) return { 'namespace': metadef_object.namespace, 'name': metadef_object.name, 'name_old': metadef_object.name, 'properties': properties, 'required': metadef_object.required, 'description': metadef_object.description, 'created_at': timeutils.isotime(metadef_object.created_at), 'updated_at': timeutils.isotime(metadef_object.updated_at), 'deleted': False, 'deleted_at': None, } def _format_metadef_object_property(name, metadef_property): return { 'name': name, 'type': metadef_property.type or None, 'title': metadef_property.title or None, 'description': metadef_property.description or None, 'default': metadef_property.default or None, 'minimum': metadef_property.minimum or None, 'maximum': metadef_property.maximum or None, 'enum': metadef_property.enum or None, 'pattern': metadef_property.pattern or None, 'minLength': metadef_property.minLength or None, 'maxLength': metadef_property.maxLength or None, 'confidential': metadef_property.confidential or None, 'items': metadef_property.items or None, 'uniqueItems': metadef_property.uniqueItems or None, 'minItems': metadef_property.minItems or None, 'maxItems': metadef_property.maxItems or None, 'additionalItems': metadef_property.additionalItems or None, } def format_metadef_property_notification(metadef_property): schema = metadef_property.schema return { 'namespace': metadef_property.namespace, 'name': metadef_property.name, 'name_old': metadef_property.name, 'type': schema.get('type'), 'title': schema.get('title'), 'description': schema.get('description'), 'default': schema.get('default'), 'minimum': schema.get('minimum'), 'maximum': schema.get('maximum'), 'enum': schema.get('enum'), 'pattern': schema.get('pattern'), 'minLength': schema.get('minLength'), 'maxLength': schema.get('maxLength'), 'confidential': schema.get('confidential'), 'items': schema.get('items'), 'uniqueItems': schema.get('uniqueItems'), 'minItems': schema.get('minItems'), 'maxItems': schema.get('maxItems'), 'additionalItems': schema.get('additionalItems'), 'deleted': False, 'deleted_at': None, } def format_metadef_resource_type_notification(metadef_resource_type): return { 'namespace': metadef_resource_type.namespace, 'name': metadef_resource_type.name, 'name_old': metadef_resource_type.name, 'prefix': metadef_resource_type.prefix, 'properties_target': metadef_resource_type.properties_target, 'created_at': timeutils.isotime(metadef_resource_type.created_at), 'updated_at': timeutils.isotime(metadef_resource_type.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_tag_notification(metadef_tag): return { 'namespace': metadef_tag.namespace, 'name': metadef_tag.name, 'name_old': metadef_tag.name, 'created_at': timeutils.isotime(metadef_tag.created_at), 'updated_at': timeutils.isotime(metadef_tag.updated_at), 'deleted': False, 'deleted_at': None, } class NotificationBase(object): def get_payload(self, obj): return {} def send_notification(self, notification_id, obj, extra_payload=None): payload = self.get_payload(obj) if extra_payload is not None: payload.update(extra_payload) _send_notification(self.notifier.info, notification_id, payload) @six.add_metaclass(abc.ABCMeta) class NotificationProxy(NotificationBase): def __init__(self, repo, context, notifier): self.repo = repo self.context = context self.notifier = notifier super_class = self.get_super_class() super_class.__init__(self, repo) @abc.abstractmethod def get_super_class(self): pass @six.add_metaclass(abc.ABCMeta) class NotificationRepoProxy(NotificationBase): def __init__(self, repo, context, notifier): self.repo = repo self.context = context self.notifier = notifier proxy_kwargs = {'context': self.context, 'notifier': self.notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, repo, proxy_class, proxy_kwargs) @abc.abstractmethod def get_super_class(self): pass @abc.abstractmethod def get_proxy_class(self): pass @six.add_metaclass(abc.ABCMeta) class NotificationFactoryProxy(object): def __init__(self, factory, context, notifier): kwargs = {'context': context, 'notifier': notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, factory, proxy_class, kwargs) @abc.abstractmethod def get_super_class(self): pass @abc.abstractmethod def get_proxy_class(self): pass class ImageProxy(NotificationProxy, domain_proxy.Image): def get_super_class(self): return domain_proxy.Image def get_payload(self, obj): return format_image_notification(obj) def _format_image_send(self, bytes_sent): return { 'bytes_sent': bytes_sent, 'image_id': self.repo.image_id, 'owner_id': self.repo.owner, 'receiver_tenant_id': self.context.tenant, 'receiver_user_id': self.context.user, } def _get_chunk_data_iterator(self, data, chunk_size=None): sent = 0 for chunk in data: yield chunk sent += len(chunk) if sent != (chunk_size or self.repo.size): notify = self.notifier.error else: notify = self.notifier.info try: _send_notification(notify, 'image.send', self._format_image_send(sent)) except Exception as err: msg = (_LE("An error occurred during image.send" " notification: %(err)s") % {'err': err}) LOG.error(msg) def get_data(self, offset=0, chunk_size=None): # Due to the need of evaluating subsequent proxies, this one # should return a generator, the call should be done before # generator creation data = self.repo.get_data(offset=offset, chunk_size=chunk_size) return self._get_chunk_data_iterator(data, chunk_size=chunk_size) def set_data(self, data, size=None): self.send_notification('image.prepare', self.repo) notify_error = self.notifier.error try: self.repo.set_data(data, size) except glance_store.StorageFull as e: msg = (_("Image storage media is full: %s") % encodeutils.exception_to_unicode(e)) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) except glance_store.StorageWriteDenied as e: msg = (_("Insufficient permissions on image storage media: %s") % encodeutils.exception_to_unicode(e)) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPServiceUnavailable(explanation=msg) except ValueError as e: msg = (_("Cannot save data for image %(image_id)s: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) except exception.Duplicate as e: msg = (_("Unable to upload duplicate image data for image" "%(image_id)s: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPConflict(explanation=msg) except exception.Forbidden as e: msg = (_("Not allowed to upload image data for image %(image_id)s:" " %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.NotFound as e: exc_str = encodeutils.exception_to_unicode(e) msg = (_("Image %(image_id)s could not be found after upload." " The image may have been deleted during the upload:" " %(error)s") % {'image_id': self.repo.image_id, 'error': exc_str}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPNotFound(explanation=exc_str) except webob.exc.HTTPError as e: with excutils.save_and_reraise_exception(): msg = (_("Failed to upload image data for image %(image_id)s" " due to HTTP error: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_("Failed to upload image data for image %(image_id)s " "due to internal error: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) else: self.send_notification('image.upload', self.repo) self.send_notification('image.activate', self.repo) class ImageMemberProxy(NotificationProxy, domain_proxy.ImageMember): def get_super_class(self): return domain_proxy.ImageMember class ImageFactoryProxy(NotificationFactoryProxy, domain_proxy.ImageFactory): def get_super_class(self): return domain_proxy.ImageFactory def get_proxy_class(self): return ImageProxy class ImageRepoProxy(NotificationRepoProxy, domain_proxy.Repo): def get_super_class(self): return domain_proxy.Repo def get_proxy_class(self): return ImageProxy def get_payload(self, obj): return format_image_notification(obj) def save(self, image, from_state=None): super(ImageRepoProxy, self).save(image, from_state=from_state) self.send_notification('image.update', image) def add(self, image): super(ImageRepoProxy, self).add(image) self.send_notification('image.create', image) def remove(self, image): super(ImageRepoProxy, self).remove(image) self.send_notification('image.delete', image, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) class ImageMemberRepoProxy(NotificationBase, domain_proxy.MemberRepo): def __init__(self, repo, image, context, notifier): self.repo = repo self.image = image self.context = context self.notifier = notifier proxy_kwargs = {'context': self.context, 'notifier': self.notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, image, repo, proxy_class, proxy_kwargs) def get_super_class(self): return domain_proxy.MemberRepo def get_proxy_class(self): return ImageMemberProxy def get_payload(self, obj): return format_image_member_notification(obj) def save(self, member, from_state=None): super(ImageMemberRepoProxy, self).save(member, from_state=from_state) self.send_notification('image.member.update', member) def add(self, member): super(ImageMemberRepoProxy, self).add(member) self.send_notification('image.member.create', member) def remove(self, member): super(ImageMemberRepoProxy, self).remove(member) self.send_notification('image.member.delete', member, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) class TaskProxy(NotificationProxy, domain_proxy.Task): def get_super_class(self): return domain_proxy.Task def get_payload(self, obj): return format_task_notification(obj) def begin_processing(self): super(TaskProxy, self).begin_processing() self.send_notification('task.processing', self.repo) def succeed(self, result): super(TaskProxy, self).succeed(result) self.send_notification('task.success', self.repo) def fail(self, message): super(TaskProxy, self).fail(message) self.send_notification('task.failure', self.repo) def run(self, executor): super(TaskProxy, self).run(executor) self.send_notification('task.run', self.repo) class TaskFactoryProxy(NotificationFactoryProxy, domain_proxy.TaskFactory): def get_super_class(self): return domain_proxy.TaskFactory def get_proxy_class(self): return TaskProxy class TaskRepoProxy(NotificationRepoProxy, domain_proxy.TaskRepo): def get_super_class(self): return domain_proxy.TaskRepo def get_proxy_class(self): return TaskProxy def get_payload(self, obj): return format_task_notification(obj) def add(self, task): result = super(TaskRepoProxy, self).add(task) self.send_notification('task.create', task) return result def remove(self, task): result = super(TaskRepoProxy, self).remove(task) self.send_notification('task.delete', task, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) return result class TaskStubProxy(NotificationProxy, domain_proxy.TaskStub): def get_super_class(self): return domain_proxy.TaskStub class TaskStubRepoProxy(NotificationRepoProxy, domain_proxy.TaskStubRepo): def get_super_class(self): return domain_proxy.TaskStubRepo def get_proxy_class(self): return TaskStubProxy class MetadefNamespaceProxy(NotificationProxy, domain_proxy.MetadefNamespace): def get_super_class(self): return domain_proxy.MetadefNamespace class MetadefNamespaceFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefNamespaceFactory): def get_super_class(self): return domain_proxy.MetadefNamespaceFactory def get_proxy_class(self): return MetadefNamespaceProxy class MetadefNamespaceRepoProxy(NotificationRepoProxy, domain_proxy.MetadefNamespaceRepo): def get_super_class(self): return domain_proxy.MetadefNamespaceRepo def get_proxy_class(self): return MetadefNamespaceProxy def get_payload(self, obj): return format_metadef_namespace_notification(obj) def save(self, metadef_namespace): name = getattr(metadef_namespace, '_old_namespace', metadef_namespace.namespace) result = super(MetadefNamespaceRepoProxy, self).save(metadef_namespace) self.send_notification( 'metadef_namespace.update', metadef_namespace, extra_payload={ 'namespace_old': name, }) return result def add(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).add(metadef_namespace) self.send_notification('metadef_namespace.create', metadef_namespace) return result def remove(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove( metadef_namespace) self.send_notification( 'metadef_namespace.delete', metadef_namespace, extra_payload={'deleted': True, 'deleted_at': timeutils.isotime()} ) return result def remove_objects(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_objects( metadef_namespace) self.send_notification('metadef_namespace.delete_objects', metadef_namespace) return result def remove_properties(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_properties( metadef_namespace) self.send_notification('metadef_namespace.delete_properties', metadef_namespace) return result def remove_tags(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_tags( metadef_namespace) self.send_notification('metadef_namespace.delete_tags', metadef_namespace) return result class MetadefObjectProxy(NotificationProxy, domain_proxy.MetadefObject): def get_super_class(self): return domain_proxy.MetadefObject class MetadefObjectFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefObjectFactory): def get_super_class(self): return domain_proxy.MetadefObjectFactory def get_proxy_class(self): return MetadefObjectProxy class MetadefObjectRepoProxy(NotificationRepoProxy, domain_proxy.MetadefObjectRepo): def get_super_class(self): return domain_proxy.MetadefObjectRepo def get_proxy_class(self): return MetadefObjectProxy def get_payload(self, obj): return format_metadef_object_notification(obj) def save(self, metadef_object): name = getattr(metadef_object, '_old_name', metadef_object.name) result = super(MetadefObjectRepoProxy, self).save(metadef_object) self.send_notification( 'metadef_object.update', metadef_object, extra_payload={ 'namespace': metadef_object.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_object): result = super(MetadefObjectRepoProxy, self).add(metadef_object) self.send_notification('metadef_object.create', metadef_object) return result def remove(self, metadef_object): result = super(MetadefObjectRepoProxy, self).remove(metadef_object) self.send_notification( 'metadef_object.delete', metadef_object, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_object.namespace.namespace } ) return result class MetadefPropertyProxy(NotificationProxy, domain_proxy.MetadefProperty): def get_super_class(self): return domain_proxy.MetadefProperty class MetadefPropertyFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefPropertyFactory): def get_super_class(self): return domain_proxy.MetadefPropertyFactory def get_proxy_class(self): return MetadefPropertyProxy class MetadefPropertyRepoProxy(NotificationRepoProxy, domain_proxy.MetadefPropertyRepo): def get_super_class(self): return domain_proxy.MetadefPropertyRepo def get_proxy_class(self): return MetadefPropertyProxy def get_payload(self, obj): return format_metadef_property_notification(obj) def save(self, metadef_property): name = getattr(metadef_property, '_old_name', metadef_property.name) result = super(MetadefPropertyRepoProxy, self).save(metadef_property) self.send_notification( 'metadef_property.update', metadef_property, extra_payload={ 'namespace': metadef_property.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_property): result = super(MetadefPropertyRepoProxy, self).add(metadef_property) self.send_notification('metadef_property.create', metadef_property) return result def remove(self, metadef_property): result = super(MetadefPropertyRepoProxy, self).remove(metadef_property) self.send_notification( 'metadef_property.delete', metadef_property, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_property.namespace.namespace } ) return result class MetadefResourceTypeProxy(NotificationProxy, domain_proxy.MetadefResourceType): def get_super_class(self): return domain_proxy.MetadefResourceType class MetadefResourceTypeFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefResourceTypeFactory): def get_super_class(self): return domain_proxy.MetadefResourceTypeFactory def get_proxy_class(self): return MetadefResourceTypeProxy class MetadefResourceTypeRepoProxy(NotificationRepoProxy, domain_proxy.MetadefResourceTypeRepo): def get_super_class(self): return domain_proxy.MetadefResourceTypeRepo def get_proxy_class(self): return MetadefResourceTypeProxy def get_payload(self, obj): return format_metadef_resource_type_notification(obj) def add(self, md_resource_type): result = super(MetadefResourceTypeRepoProxy, self).add( md_resource_type) self.send_notification('metadef_resource_type.create', md_resource_type) return result def remove(self, md_resource_type): result = super(MetadefResourceTypeRepoProxy, self).remove( md_resource_type) self.send_notification( 'metadef_resource_type.delete', md_resource_type, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': md_resource_type.namespace.namespace } ) return result class MetadefTagProxy(NotificationProxy, domain_proxy.MetadefTag): def get_super_class(self): return domain_proxy.MetadefTag class MetadefTagFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefTagFactory): def get_super_class(self): return domain_proxy.MetadefTagFactory def get_proxy_class(self): return MetadefTagProxy class MetadefTagRepoProxy(NotificationRepoProxy, domain_proxy.MetadefTagRepo): def get_super_class(self): return domain_proxy.MetadefTagRepo def get_proxy_class(self): return MetadefTagProxy def get_payload(self, obj): return format_metadef_tag_notification(obj) def save(self, metadef_tag): name = getattr(metadef_tag, '_old_name', metadef_tag.name) result = super(MetadefTagRepoProxy, self).save(metadef_tag) self.send_notification( 'metadef_tag.update', metadef_tag, extra_payload={ 'namespace': metadef_tag.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_tag): result = super(MetadefTagRepoProxy, self).add(metadef_tag) self.send_notification('metadef_tag.create', metadef_tag) return result def add_tags(self, metadef_tags): result = super(MetadefTagRepoProxy, self).add_tags(metadef_tags) for metadef_tag in metadef_tags: self.send_notification('metadef_tag.create', metadef_tag) return result def remove(self, metadef_tag): result = super(MetadefTagRepoProxy, self).remove(metadef_tag) self.send_notification( 'metadef_tag.delete', metadef_tag, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_tag.namespace.namespace } ) return result
stevelle/glance
glance/notifier.py
Python
apache-2.0
31,619
#!/usr/bin/env python #=============================================================================== # Copyright 2015 Geoscience Australia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #=============================================================================== """ IngestDBWrapper: provides low-level database commands for the ingest process. This class (based on ConnectionWrapper) provides low-level database commands used by the ingest process. This is where the SQL queries go. The methods in this class should be context free, so all context information should be passed in as parameters and passed out as return values. To put it another way, the database connection should be the *only* data attribute. If you feel you need to cache the result of database queries or track context, please do it in the calling class, not here. This is intended as a very clean and simple interface to the database, to replace big chunks of SQL with meaningfully named method calls. """ from __future__ import absolute_import import logging import datetime from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED import agdc.dbutil as dbutil import pytz from EOtools.utils import log_multiline # Set up logger. LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.INFO) # # Module level constants # ONE_HOUR = datetime.timedelta(0, 3600) # # Symbolic names for tile classes # TC_PENDING = 0 TC_SINGLE_SCENE = 1 TC_DELETED = 2 TC_SUPERSEDED = 3 TC_MOSAIC = 4 # pylint: disable=too-many-public-methods class IngestDBWrapper(dbutil.ConnectionWrapper): """IngestDBWrapper: low-level database commands for the ingest process. """ # # Constants # # This is the +- percentage to match within for fuzzy datetime matches. FUZZY_MATCH_PERCENTAGE = 0 # # Utility Functions # def execute_sql_single(self, sql, params): """Executes an sql query returning (at most) a single row. This creates a cursor, executes the sql query or command specified by the operation string 'sql' and parameters 'params', and returns the first row of the result, or None if there is no result.""" cur = self.conn.cursor() self.log_sql(cur.mogrify(sql, params)) cur.execute(sql, params) result = cur.fetchone() return result def execute_sql_multi(self, sql, params): """Executes an sql query returning multiple rows. This creates a cursor, executes the sql query or command specified by the operation string 'sql' and parameters 'params', and returns a list of results, or an empty list if there are no results.""" cur = self.conn.cursor() self.log_sql(cur.mogrify(sql, params)) cur.execute(sql, params) result = cur.fetchall() return result @staticmethod def log_sql(sql_query_string): """Logs an sql query to the logger at debug level. This uses the log_multiline utility function from EOtools.utils. sql_query_string is as returned from cursor.mogrify.""" log_multiline(LOGGER.debug, sql_query_string, title='SQL', prefix='\t') # # Queries and Commands # def turn_off_autocommit(self): """Turns autocommit off for the database connection. Returns the old commit mode in a form suitable for passing to the restore_commit_mode method. Note that changeing commit mode must be done outside a transaction.""" old_commit_mode = (self.conn.autocommit, self.conn.isolation_level) self.conn.autocommit = False self.conn.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED) return old_commit_mode def turn_on_autocommit(self): """Turns autocommit on for the database connection. Returns the old commit mode in a form suitable for passing to the restore_commit_mode method. Note that changeing commit mode must be done outside a transaction.""" old_commit_mode = (self.conn.autocommit, self.conn.isolation_level) self.conn.autocommit = True self.conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) return old_commit_mode def restore_commit_mode(self, commit_mode): """Restores the commit mode of the database connection. The commit mode passed in should have come from either the turn_off_autocommit or turn_on_autocommit method. This method will then restore the connection commit\ mode to what is was before.""" (autocommit, isolation_level) = commit_mode self.conn.autocommit = autocommit self.conn.set_isolation_level(isolation_level) def get_satellite_id(self, satellite_tag): """Finds a satellite_id in the database. This method returns a satellite_id found by matching the satellite_tag in the database, or None if it cannot be found.""" sql = ("SELECT satellite_id FROM satellite\n" + "WHERE satellite_tag = %s;") params = (satellite_tag,) result = self.execute_sql_single(sql, params) satellite_id = result[0] if result else None return satellite_id def get_sensor_id(self, satellite_id, sensor_name): """Finds a sensor_id in the database. This method returns a sensor_id found by matching the satellite_id, sensor_name pair in the database, or None if such a pair cannot be found.""" sql = ("SELECT sensor_id FROM sensor\n" + "WHERE satellite_id = %s AND\n" + " sensor_name = %s;") params = (satellite_id, sensor_name) result = self.execute_sql_single(sql, params) sensor_id = result[0] if result else None return sensor_id def get_level_id(self, level_name): """Finds a (processing) level_id in the database. This method returns a level_id found by matching the level_name in the database, or None if it cannot be found.""" sql = ("SELECT level_id FROM processing_level\n" + "WHERE level_name = %s;") params = (level_name,) result = self.execute_sql_single(sql, params) level_id = result[0] if result else None return level_id def get_acquisition_id_exact(self, acquisition_dict): """Finds the id of an acquisition record in the database. Returns an acquisition_id if a record matching the key fields in acquistion_dict is found, None otherwise. The key fields are: satellite_id, sensor_id, x_ref, y_ref, start_datetime, and end_datetime. The acquisition_dict must contain values for all of these. This query requires an exact match for the start and end datetimes. """ sql = ("SELECT acquisition_id FROM acquisition\n" + "WHERE satellite_id = %(satellite_id)s AND\n" + " sensor_id = %(sensor_id)s AND\n" + (" x_ref = %(x_ref)s AND\n" if acquisition_dict['x_ref'] is not None else " x_ref is null AND\n") + (" y_ref = %(y_ref)s AND\n" if acquisition_dict['y_ref'] is not None else " y_ref is null AND\n") + " start_datetime = %(start_datetime)s AND\n" + " end_datetime = %(end_datetime)s;") result = self.execute_sql_single(sql, acquisition_dict) acquisition_id = result[0] if result else None return acquisition_id def get_acquisition_id_fuzzy(self, acquisition_dict): """Finds the id of an acquisition record in the database. Returns an acquisition_id if a record matching the key fields in acquistion_dict is found, None otherwise. The key fields are: satellite_id, sensor_id, x_ref, y_ref, start_datetime, and end_datetime. The acquisition_dict must contain values for all of these. This query uses an approximate match for the start and end datetimes. """ aq_length = (acquisition_dict['end_datetime'] - acquisition_dict['start_datetime']) delta = (aq_length*self.FUZZY_MATCH_PERCENTAGE)/100 params = dict(acquisition_dict) params['delta'] = delta sql = ("SELECT acquisition_id FROM acquisition\n" + "WHERE satellite_id = %(satellite_id)s AND\n" + " sensor_id = %(sensor_id)s AND\n" + (" x_ref = %(x_ref)s AND\n" if params['x_ref'] is not None else " x_ref is null AND\n") + (" y_ref = %(y_ref)s AND\n" if params['y_ref'] is not None else " y_ref is null AND\n") + " start_datetime BETWEEN\n" + " %(start_datetime)s - %(delta)s AND\n" + " %(start_datetime)s + %(delta)s AND\n" + " end_datetime BETWEEN\n" + " %(end_datetime)s - %(delta)s AND\n" + " %(end_datetime)s + %(delta)s;") result = self.execute_sql_single(sql, params) acquisition_id = result[0] if result else None return acquisition_id def insert_acquisition_record(self, acquisition_dict): """Creates a new acquisition record in the database. The values of the fields in the new record are taken from acquisition_dict. Returns the acquisition_id of the new record.""" # Columns to be inserted. If gcp_count or mtl_text are empty, we # exclude them from the list, so they pick up the defaults instead. column_list = ['acquisition_id', 'satellite_id', 'sensor_id', 'x_ref', 'y_ref', 'start_datetime', 'end_datetime', 'll_lon', 'll_lat', 'lr_lon', 'lr_lat', 'ul_lon', 'ul_lat', 'ur_lon', 'ur_lat' ] if acquisition_dict['gcp_count'] is not None: column_list.append('gcp_count') if acquisition_dict['mtl_text'] is not None: column_list.append('mtl_text') columns = "(" + ",\n".join(column_list) + ")" # Values are taken from the acquisition_dict, with keys the same # as the column name, except for acquisition_id, which is the next # value in the acquisition_id_seq sequence. value_list = [] for column in column_list: if column == 'acquisition_id': value_list.append("nextval('acquisition_id_seq')") else: value_list.append("%(" + column + ")s") values = "(" + ",\n".join(value_list) + ")" sql = ("INSERT INTO acquisition " + columns + "\n" + "VALUES " + values + "\n" + "RETURNING acquisition_id;") result = self.execute_sql_single(sql, acquisition_dict) acquisition_id = result[0] return acquisition_id def get_dataset_id(self, dataset_dict): """Finds the id of a dataset record in the database. Returns a dataset_id if a record metching the key fields in dataset_dict is found, None otherwise. The key fields are: aquisition_id and level_id. The dataset_dict must contain values for both of these.""" sql = ("SELECT dataset_id FROM dataset\n" + "WHERE acquisition_id = %(acquisition_id)s AND\n" + " level_id = %(level_id)s;") result = self.execute_sql_single(sql, dataset_dict) dataset_id = result[0] if result else None return dataset_id def dataset_older_than_database(self, dataset_id, disk_datetime_processed, tile_class_filter=None): """Compares the datetime_processed of the dataset on disk with that on the database. The database time is the earliest of either the datetime_processed field from the dataset table or the earliest tile.ctime field for the dataset's tiles. Tiles considered are restricted to those with tile_class_ids listed in tile_class_filter if it is non-empty. Returns tuple (disk_datetime_processed, database_datetime_processed, tile_ingested_datetime) if no ingestion required or None if ingestion is required """ sql_dtp = ("SELECT datetime_processed FROM dataset\n" + "WHERE dataset_id = %s;") result = self.execute_sql_single(sql_dtp, (dataset_id,)) database_datetime_processed = result[0] if database_datetime_processed < disk_datetime_processed: return None # The database's dataset record is newer that what is on disk. # Consider whether the tile record's are older than dataset on disk. # Make the dataset's datetime_processed timezone-aware. utc = pytz.timezone("UTC") disk_datetime_processed = utc.localize(disk_datetime_processed) sql_ctime = ("SELECT MIN(ctime) FROM tile\n" + "WHERE dataset_id = %(dataset_id)s\n" + ("AND tile_class_id IN %(tile_class_filter)s\n" if tile_class_filter else "") + ";" ) params = {'dataset_id': dataset_id, 'tile_class_filter': tuple(tile_class_filter) } result = self.execute_sql_single(sql_ctime, params) min_ctime = result[0] if min_ctime is None: return None if min_ctime < disk_datetime_processed: return None # The dataset on disk is more recent than the database records and # should be re-ingested. Return tuple containing relevant times return (disk_datetime_processed, utc.localize(database_datetime_processed), min_ctime) def insert_dataset_record(self, dataset_dict): """Creates a new dataset record in the database. The values of the fields in the new record are taken from dataset_dict. Returns the dataset_id of the new record.""" # Columns to be inserted. column_list = ['dataset_id', 'acquisition_id', 'dataset_path', 'level_id', 'datetime_processed', 'dataset_size', 'crs', 'll_x', 'll_y', 'lr_x', 'lr_y', 'ul_x', 'ul_y', 'ur_x', 'ur_y', 'x_pixels', 'y_pixels', 'xml_text'] columns = "(" + ",\n".join(column_list) + ")" # Values are taken from the dataset_dict, with keys the same # as the column name, except for dataset_id, which is the next # value in the dataset_id_seq sequence. value_list = [] for column in column_list: if column == 'dataset_id': value_list.append("nextval('dataset_id_seq')") else: value_list.append("%(" + column + ")s") values = "(" + ",\n".join(value_list) + ")" sql = ("INSERT INTO dataset " + columns + "\n" + "VALUES " + values + "\n" + "RETURNING dataset_id;") result = self.execute_sql_single(sql, dataset_dict) dataset_id = result[0] return dataset_id def update_dataset_record(self, dataset_dict): """Updates an existing dataset record in the database. The record to update is identified by dataset_id, which must be present in dataset_dict. Its non-key fields are updated to match the values in dataset_dict. """ # Columns to be updated column_list = ['dataset_path', 'datetime_processed', 'dataset_size', 'crs', 'll_x', 'll_y', 'lr_x', 'lr_y', 'ul_x', 'ul_y', 'ur_x', 'ur_y', 'x_pixels', 'y_pixels', 'xml_text'] assign_list = [(col + " = %(" + col + ")s") for col in column_list] assignments = ",\n".join(assign_list) sql = ("UPDATE dataset\n" + "SET " + assignments + "\n" + "WHERE dataset_id = %(dataset_id)s" + "\n" + "RETURNING dataset_id;") self.execute_sql_single(sql, dataset_dict) def get_dataset_tile_ids(self, dataset_id, tile_class_filter=()): """Returns a list of tile_ids associated with a dataset. If tile_class_filter is not an empty tuple then the tile_ids returned are restricted to those with tile_class_ids that that match the tile_class_filter. Otherwise all tile_ids for the dataset are returned.""" sql = ("SELECT tile_id FROM tile\n" + "WHERE dataset_id = %(dataset_id)s\n" + ("AND tile_class_id IN %(tile_class_filter)s\n" if tile_class_filter else "") + "ORDER By tile_id;" ) params = {'dataset_id': dataset_id, 'tile_class_filter': tuple(tile_class_filter) } result = self.execute_sql_multi(sql, params) tile_id_list = [tup[0] for tup in result] return tile_id_list def get_tile_pathname(self, tile_id): """Returns the pathname for a tile.""" sql = ("SELECT tile_pathname FROM tile\n" + "WHERE tile_id = %s;") result = self.execute_sql_single(sql, (tile_id,)) tile_pathname = result[0] return tile_pathname def remove_tile_record(self, tile_id): """Removes a tile record from the database.""" sql = "DELETE FROM tile WHERE tile_id = %s RETURNING tile_id;" self.execute_sql_single(sql, (tile_id,)) def get_tile_id(self, tile_dict): """Finds the id of a tile record in the database. Returns a tile_id if a record metching the key fields in tile_dict is found, None otherwise. The key fields are: dataset_id, x_index, y_index, and tile_type_id. The tile_dict must contain values for all of these.""" sql = ("SELECT tile_id FROM tile\n" + "WHERE dataset_id = %(dataset_id)s AND\n" + " x_index = %(x_index)s AND\n" + " y_index = %(y_index)s AND\n" + " tile_type_id = %(tile_type_id)s;") result = self.execute_sql_single(sql, tile_dict) tile_id = result[0] if result else None return tile_id def tile_footprint_exists(self, tile_dict): """Check the tile footprint table for an existing entry. The table is checked for existing entry with combination (x_index, y_index, tile_type_id). Returns True if such an entry exists and False otherwise. """ sql = ("SELECT 1 FROM tile_footprint\n" + "WHERE x_index = %(x_index)s AND\n" + " y_index = %(y_index)s AND\n" + " tile_type_id = %(tile_type_id)s;") result = self.execute_sql_single(sql, tile_dict) footprint_exists = True if result else False return footprint_exists def insert_tile_footprint(self, footprint_dict): """Inserts an entry into the tile_footprint table of the database. TODO: describe how bbox generated. """ # TODO Use Alex's code in email to generate bbox # Columns to be updated column_list = ['x_index', 'y_index', 'tile_type_id', 'x_min', 'y_min', 'x_max', 'y_max', 'bbox'] columns = "(" + ",\n".join(column_list) + ")" value_list = [] for column in column_list: if column == 'bbox': value_list.append('NULL') else: value_list.append("%(" + column + ")s") values = "(" + ",\n".join(value_list) + ")" sql = ("INSERT INTO tile_footprint " + columns + "\n" + "VALUES " + values + "\n" + "RETURNING x_index;") self.execute_sql_single(sql, footprint_dict) def insert_tile_record(self, tile_dict): """Creates a new tile record in the database. The values of the fields in the new record are taken from tile_dict. Returns the tile_id of the new record.""" column_list = ['tile_id', 'x_index', 'y_index', 'tile_type_id', 'dataset_id', 'tile_pathname', 'tile_class_id', 'tile_size', 'ctime'] columns = "(" + ",\n".join(column_list) + ")" # Values are taken from the tile_dict, with keys the same # as the column name, except for tile_id, which is the next # value in the dataset_id_seq sequence. value_list = [] for column in column_list: if column == 'tile_id': value_list.append("nextval('tile_id_seq')") elif column == 'ctime': value_list.append('now()') else: value_list.append("%(" + column + ")s") values = "(" + ",\n".join(value_list) + ")" sql = ("INSERT INTO tile " + columns + "\n" + "VALUES " + values + "\n" + "RETURNING tile_id;") result = self.execute_sql_single(sql, tile_dict) tile_id = result[0] return tile_id def get_overlapping_dataset_ids(self, dataset_id, delta_t=ONE_HOUR, tile_class_filter=(1, 3)): """Return dataset ids for overlapping datasets (incuding this dataset) Given an original dataset specified by 'dataset_id', return the list of dataset_ids for datasets that overlap this one. An overlap occurs when a tile belonging to a target dataset overlaps in space and time with one from the orignal dataset. 'delta_t' sets the tolerance for detecting time overlaps. It should be a python datetime.timedelta object (obtainable by constructor or by subtracting two datetimes). Only tiles of a class present in the tuple 'tile_class_filter' are considered. Note that if the original dataset has no tiles of the relevent types an empty list will be returned. Otherwise the list will contain at least the original dataset id. """ sql = ("SELECT DISTINCT od.dataset_id\n" + "FROM dataset d\n" + "INNER JOIN tile t USING (dataset_id)\n" + "INNER JOIN acquisition a USING (acquisition_id)\n" + "INNER JOIN tile o ON\n" + " o.x_index = t.x_index AND\n" + " o.y_index = t.y_index AND\n" + " o.tile_type_id = t.tile_type_id\n" + "INNER JOIN dataset od ON\n" + " od.dataset_id = o.dataset_id AND\n" + " od.level_id = d.level_id\n" + "INNER JOIN acquisition oa ON\n" + " oa.acquisition_id = od.acquisition_id AND\n" + " oa.satellite_id = a.satellite_id\n" + "WHERE\n" + " d.dataset_id = %(dataset_id)s\n" + (" AND t.tile_class_id IN %(tile_class_filter)s\n" if tile_class_filter else "") + (" AND o.tile_class_id IN %(tile_class_filter)s\n" if tile_class_filter else "") + " AND (\n" + " (oa.start_datetime BETWEEN\n" + " a.start_datetime - %(delta_t)s AND\n" + " a.end_datetime + %(delta_t)s)\n" + " OR\n" + " (oa.end_datetime BETWEEN\n" + " a.start_datetime - %(delta_t)s AND\n" + " a.end_datetime + %(delta_t)s)\n" + " )\n" + "ORDER BY od.dataset_id;") params = {'dataset_id': dataset_id, 'delta_t': delta_t, 'tile_class_filter': tuple(tile_class_filter) } result = self.execute_sql_multi(sql, params) dataset_id_list = [tup[0] for tup in result] return dataset_id_list def get_overlapping_tiles_for_dataset(self, dataset_id, delta_t=ONE_HOUR, input_tile_class_filter=None, output_tile_class_filter=None, dataset_filter=None): """Return a nested dictonary for the tiles overlapping a dataset. The top level dictonary is keyed by tile footprint (x_index, y_index, tile_type_id). Each entry is a list of tile records. Each tile record is a dictonary with entries for tile_id, dataset_id, tile_class, tile_pathname, and ctime. Arguments: dataset_id: id of the dataset to act as the base for the query. The input tiles are the ones associated with this dataset. delta_t: The tolerance used to detect overlaps in time. This should be a python timedelta object (from the datatime module). input_tile_class_filter: A tuple of tile_class_ids to restrict the input tiles. If non-empty, input tiles not matching these will be ignored. output_tile_class_filter: A tuple of tile_class_ids to restrict the output tiles. If non-empty, output tiles not matching these will be ignored. dataset_filter: A tuple of dataset_ids to restrict the datasets that the output tiles belong to. If non-empty, output tiles not from these datasets will be ignored. Used to avoid operating on tiles belonging to non-locked datasets. """ sql = ("SELECT DISTINCT o.tile_id, o.x_index, o.y_index,\n" + " o.tile_type_id, o.dataset_id, o.tile_pathname,\n" + " o.tile_class_id, o.tile_size, o.ctime,\n" + " oa.start_datetime\n" + "FROM tile t\n" + "INNER JOIN dataset d USING (dataset_id)\n" + "INNER JOIN acquisition a USING (acquisition_id)\n" + "INNER JOIN tile o ON\n" + " o.x_index = t.x_index AND\n" + " o.y_index = t.y_index AND\n" + " o.tile_type_id = t.tile_type_id\n" + "INNER JOIN dataset od ON\n" + " od.dataset_id = o.dataset_id AND\n" + " od.level_id = d.level_id\n" + "INNER JOIN acquisition oa ON\n" + " oa.acquisition_id = od.acquisition_id AND\n" + " oa.satellite_id = a.satellite_id\n" + "WHERE\n" + " d.dataset_id = %(dataset_id)s\n" + (" AND od.dataset_id IN %(dataset_filter)s\n" if dataset_filter else "") + (" AND t.tile_class_id IN %(input_tile_class_filter)s\n" if input_tile_class_filter else "") + (" AND o.tile_class_id IN %(output_tile_class_filter)s\n" if output_tile_class_filter else "") + " AND (\n" + " (oa.start_datetime BETWEEN\n" + " a.start_datetime - %(delta_t)s AND\n" + " a.end_datetime + %(delta_t)s)\n" + " OR\n" + " (oa.end_datetime BETWEEN\n" + " a.start_datetime - %(delta_t)s AND\n" + " a.end_datetime + %(delta_t)s)\n" + " )\n" + "ORDER BY oa.start_datetime;" ) params = {'dataset_id': dataset_id, 'delta_t': delta_t, 'input_tile_class_filter': tuple(input_tile_class_filter), 'output_tile_class_filter': tuple(output_tile_class_filter), 'dataset_filter': tuple(dataset_filter) } result = self.execute_sql_multi(sql, params) overlap_dict = {} for record in result: tile_footprint = tuple(record[1:4]) tile_record = {'tile_id': record[0], 'x_index': record[1], 'y_index': record[2], 'tile_type_id': record[3], 'dataset_id': record[4], 'tile_pathname': record[5], 'tile_class_id': record[6], 'tile_size': record[7], 'ctime': record[8] } if tile_footprint not in overlap_dict: overlap_dict[tile_footprint] = [] overlap_dict[tile_footprint].append(tile_record) return overlap_dict def update_tile_class(self, tile_id, new_tile_class_id): """Update the tile_class_id of a tile to a new value.""" sql = ("UPDATE tile\n" + "SET tile_class_id = %(new_tile_class_id)s\n" + "WHERE tile_id = %(tile_id)s\n" + "RETURNING tile_id;" ) params = {'tile_id': tile_id, 'new_tile_class_id': new_tile_class_id } self.execute_sql_single(sql, params)
ama-jharrison/agdc
agdc/agdc/abstract_ingester/ingest_db_wrapper.py
Python
apache-2.0
30,969
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from rally import exceptions from rally.plugins.openstack.context.keystone import roles from tests.unit import fakes from tests.unit import test CTX = "rally.plugins.openstack.context.keystone.roles" class RoleGeneratorTestCase(test.TestCase): def create_default_roles_and_patch_add_remove_functions(self, fc): fc.keystone().roles.add_user_role = mock.MagicMock() fc.keystone().roles.remove_user_role = mock.MagicMock() fc.keystone().roles.create("r1", "test_role1") fc.keystone().roles.create("r2", "test_role2") self.assertEqual(2, len(fc.keystone().roles.list())) @property def context(self): return { "config": { "roles": [ "test_role1", "test_role2" ] }, "admin": {"credential": mock.MagicMock()}, "task": mock.MagicMock() } @mock.patch("%s.osclients" % CTX) def test_add_role(self, mock_osclients): fc = fakes.FakeClients() mock_osclients.Clients.return_value = fc self.create_default_roles_and_patch_add_remove_functions(fc) ctx = roles.RoleGenerator(self.context) ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"}, {"id": "u2", "tenant_id": "t2"}] result = ctx._add_role(mock.MagicMock(), self.context["config"]["roles"][0]) expected = {"id": "r1", "name": "test_role1"} self.assertEqual(expected, result) @mock.patch("%s.osclients" % CTX) def test_add_role_which_does_not_exist(self, mock_osclients): fc = fakes.FakeClients() mock_osclients.Clients.return_value = fc self.create_default_roles_and_patch_add_remove_functions(fc) ctx = roles.RoleGenerator(self.context) ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"}, {"id": "u2", "tenant_id": "t2"}] ex = self.assertRaises(exceptions.NoSuchRole, ctx._add_role, mock.MagicMock(), "unknown_role") expected = "There is no role with name `unknown_role`." self.assertEqual(expected, str(ex)) @mock.patch("%s.osclients" % CTX) def test_remove_role(self, mock_osclients): role = mock.MagicMock() fc = fakes.FakeClients() mock_osclients.Clients.return_value = fc self.create_default_roles_and_patch_add_remove_functions(fc) ctx = roles.RoleGenerator(self.context) ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"}, {"id": "u2", "tenant_id": "t2"}] ctx._remove_role(mock.MagicMock(), role) calls = [ mock.call("u1", role["id"], tenant="t1"), mock.call("u2", role["id"], tenant="t2"), ] mock_keystone = mock_osclients.Clients().keystone() mock_keystone.roles.remove_user_role.assert_has_calls(calls) @mock.patch("%s.osclients" % CTX) def test_setup_and_cleanup(self, mock_osclients): fc = fakes.FakeClients() mock_osclients.Clients.return_value = fc self.create_default_roles_and_patch_add_remove_functions(fc) with roles.RoleGenerator(self.context) as ctx: ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"}, {"id": "u2", "tenant_id": "t2"}] ctx.setup() calls = [ mock.call("u1", "r1", tenant="t1"), mock.call("u2", "r1", tenant="t2"), mock.call("u1", "r2", tenant="t1"), mock.call("u2", "r2", tenant="t2") ] fc.keystone().roles.add_user_role.assert_has_calls(calls) self.assertEqual( 4, fc.keystone().roles.add_user_role.call_count) self.assertEqual( 0, fc.keystone().roles.remove_user_role.call_count) self.assertEqual(2, len(ctx.context["roles"])) self.assertEqual(2, len(fc.keystone().roles.list())) # Cleanup (called by content manager) self.assertEqual(2, len(fc.keystone().roles.list())) self.assertEqual(4, fc.keystone().roles.add_user_role.call_count) self.assertEqual(4, fc.keystone().roles.remove_user_role.call_count) calls = [ mock.call("u1", "r1", tenant="t1"), mock.call("u2", "r1", tenant="t2"), mock.call("u1", "r2", tenant="t1"), mock.call("u2", "r2", tenant="t2") ] fc.keystone().roles.remove_user_role.assert_has_calls(calls)
amit0701/rally
tests/unit/plugins/openstack/context/keystone/test_roles.py
Python
apache-2.0
5,275
# python3 # pylint: disable=g-bad-file-header # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Analysis for catch.""" from typing import Optional, Sequence from bsuite.experiments.catch import sweep from bsuite.utils import plotting import pandas as pd import plotnine as gg NUM_EPISODES = sweep.NUM_EPISODES BASE_REGRET = 1.6 TAGS = sweep.TAGS def score(df: pd.DataFrame) -> float: """Output a single score for catch.""" return plotting.ave_regret_score( df, baseline_regret=BASE_REGRET, episode=sweep.NUM_EPISODES) def plot_learning(df: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: """Simple learning curves for catch.""" p = plotting.plot_regret_learning( df, sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES) p += gg.geom_hline( gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75) return p def plot_seeds(df_in: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None, colour_var: Optional[str] = None) -> gg.ggplot: """Plot the returns through time individually by run.""" df = df_in.copy() df['average_return'] = 1.0 - (df.total_regret.diff() / df.episode.diff()) p = plotting.plot_individual_returns( df_in=df, max_episode=NUM_EPISODES, return_column='average_return', colour_var=colour_var, yintercept=1., sweep_vars=sweep_vars, ) return p + gg.ylab('average episodic return')
deepmind/bsuite
bsuite/experiments/catch/analysis.py
Python
apache-2.0
2,122
#!/usr/bin/env python3 import argparse import logging import os import sys from pylib.common.utils.misc_utils import MiscUtils from pylib.pygitlab import Gitlab ######################################## # add script folder to python path # allow running script from any folder location. # without need to mess with PYTHONPATH env variable script_path = os.path.realpath(__file__) sys.path.insert(0, script_path) # print('\n'.join(sys.path)) ######################################### # Author: Adam Richards # find group id using: bin/gitlab_groups.py --list # export call: bin/gitlab_group_vars.py --id 793 --export --file ~/domino_vars.json # import call: bin/gitlab_group_vars.py --id 1659 --import --file ~/domino_vars.json try: gl = Gitlab() utils = MiscUtils() except Exception as e: print(e, file=sys.stderr) if __name__ == "__main__": logging.basicConfig( level=logging.ERROR, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # raw output flag raw_output = False # debug flag debug_enabled = False parser = argparse.ArgumentParser( description="Program: {0}".format(sys.argv[0])) # options to load into 'command' property parser.add_argument('--list', dest='command', action='store_const', const='LIST', default=None, help='list all groups variables ') parser.add_argument('--export', dest='command', action='store_const', const='EXPORT', default=None, help='export all group variables to a file') parser.add_argument('--import', dest='command', action='store_const', const='IMPORT', default=None, help='import all group variables from a file') parser.add_argument('--file', dest='filename', action='store', help='file to be used for import or export') parser.add_argument('--id', dest='group_id', action='store', help='gitlab group id') parser.add_argument('--debug', dest='debug_enabled', action='store_const', const=True, default=False, help='enable debug logging') parser.add_argument('--json', dest='json_output', action='store_const', const=True, default=False, help='output json results') args = parser.parse_args() # check if 0 args passed to script. the script name is always argv[0] if(len(sys.argv) == 1): parser.print_help() exit(1) debug_enabled = args.debug_enabled if debug_enabled: gl.setLogLevel(logging.DEBUG) json_output = args.json_output filename = args.filename if debug_enabled: print("Command line arguments object: \n{0}".format( utils.object_to_json(args))) gl.setup() # process --export command if args.command == 'EXPORT': group_vars_obj = gl.group_vars_get(args.group_id, page_size=20) utils.file_write_as_json(filename, group_vars_obj) exit(0) # process --import command if args.command == 'IMPORT': group_vars_obj = utils.file_read_as_json(filename) result_ar = gl.group_vars_put(args.group_id, group_vars_obj) if json_output: result_ar_json = utils.object_to_json_pretty(result_ar) print(result_ar_json) else: for item in result_ar: print("key: {0}, status: {1}".format( item['key'], item['status'])) exit(0) # process --list command if args.command == 'LIST': group_vars_obj = gl.group_vars_get(args.group_id, page_size=20) if json_output: group_var_json = utils.object_to_json_pretty(group_vars_obj) print(group_var_json) else: for item in group_vars_obj: # print("Name: {0}, Id: {1}, Path: {2}".format(item['name'],item['id'],item['web_url'])) print("{0}".format(item['key'])) exit(0)
abrichards5/ABRUnixScripts
bin/gitlab_group_vars.py
Python
apache-2.0
4,029
from utils.header import MagicField, Field from load_command import LoadCommandCommand, LoadCommandHeader class SourceVersionField(Field): def display(self, header): if self.mnemonic: value = self._get_value(header) a = (value >> 40) & 0xffffff b = (value >> 30) & 0x3ff c = (value >> 20) & 0x3ff d = (value >> 10) & 0x3ff e = value & 0x3ff return '%d.%d.%d.%d.%d' % (a, b, c, d, e) return super(SourceVersionField, self).display(header) class SourceVersionCommand(LoadCommandHeader): ENDIAN = None FIELDS = ( MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_SOURCE_VERSION']: 'LC_SOURCE_VERSION'}), Field('cmdsize', 'I'), SourceVersionField('version', 'Q'), ) def __init__(self, bytes_=None, **kwargs): self.version = None super(SourceVersionCommand, self).__init__('source_version_command', bytes_, **kwargs)
hkkwok/MachOTool
mach_o/headers/source_version_command.py
Python
apache-2.0
983
#!/usr/bin/python2.7 import os import sys base_dir = os.path.dirname(os.path.realpath(__file__)) python_modules_dir = os.path.join(base_dir,"python-modules") sys.path.append(python_modules_dir) curdir = os.path.abspath(os.path.dirname(sys.argv[0])) from ACEStream.Plugin.EngineConsole import start apptype = 'acestream' start(apptype, curdir)
aplicatii-romanesti/allinclusive-kodi-pi
.kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/start.py
Python
apache-2.0
345
# Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Iterable, List, Tuple, Type from absl.testing import absltest from absl.testing import parameterized import tensorflow as tf from tensorflow_federated.proto.v0 import computation_pb2 as pb from tensorflow_federated.python.common_libs import structure from tensorflow_federated.python.core.impl.compiler import intrinsic_defs from tensorflow_federated.python.core.impl.computation import computation_impl from tensorflow_federated.python.core.impl.context_stack import context_stack_impl from tensorflow_federated.python.core.impl.executors import eager_tf_executor from tensorflow_federated.python.core.impl.executors import executor_test_utils from tensorflow_federated.python.core.impl.executors import executor_value_base from tensorflow_federated.python.core.impl.executors import federated_resolving_strategy from tensorflow_federated.python.core.impl.executors import federating_executor from tensorflow_federated.python.core.impl.executors import reference_resolving_executor from tensorflow_federated.python.core.impl.types import computation_types from tensorflow_federated.python.core.impl.types import placements from tensorflow_federated.python.core.impl.types import type_serialization def all_isinstance(objs: Iterable[Any], classinfo: Type[Any]) -> bool: return all(isinstance(x, classinfo) for x in objs) def create_test_executor( number_of_clients: int = 3) -> federating_executor.FederatingExecutor: def create_bottom_stack(): executor = eager_tf_executor.EagerTFExecutor() return reference_resolving_executor.ReferenceResolvingExecutor(executor) factory = federated_resolving_strategy.FederatedResolvingStrategy.factory({ placements.SERVER: create_bottom_stack(), placements.CLIENTS: [ create_bottom_stack() for _ in range(number_of_clients) ], }) return federating_executor.FederatingExecutor(factory, create_bottom_stack()) def get_named_parameters_for_supported_intrinsics() -> List[Tuple[str, Any]]: # pyformat: disable return [ ('intrinsic_def_federated_aggregate', *executor_test_utils.create_whimsy_intrinsic_def_federated_aggregate()), ('intrinsic_def_federated_apply', *executor_test_utils.create_whimsy_intrinsic_def_federated_apply()), ('intrinsic_def_federated_broadcast', *executor_test_utils.create_whimsy_intrinsic_def_federated_broadcast()), ('intrinsic_def_federated_eval_at_clients', *executor_test_utils.create_whimsy_intrinsic_def_federated_eval_at_clients()), ('intrinsic_def_federated_eval_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_eval_at_server()), ('intrinsic_def_federated_map', *executor_test_utils.create_whimsy_intrinsic_def_federated_map()), ('intrinsic_def_federated_map_all_equal', *executor_test_utils.create_whimsy_intrinsic_def_federated_map_all_equal()), ('intrinsic_def_federated_mean', *executor_test_utils.create_whimsy_intrinsic_def_federated_mean()), ('intrinsic_def_federated_select', *executor_test_utils.create_whimsy_intrinsic_def_federated_select()), ('intrinsic_def_federated_sum', *executor_test_utils.create_whimsy_intrinsic_def_federated_sum()), ('intrinsic_def_federated_value_at_clients', *executor_test_utils.create_whimsy_intrinsic_def_federated_value_at_clients()), ('intrinsic_def_federated_value_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_value_at_server()), ('intrinsic_def_federated_weighted_mean', *executor_test_utils.create_whimsy_intrinsic_def_federated_weighted_mean()), ('intrinsic_def_federated_zip_at_clients', *executor_test_utils.create_whimsy_intrinsic_def_federated_zip_at_clients()), ('intrinsic_def_federated_zip_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_zip_at_server()), ] # pyformat: enable class FederatingExecutorCreateValueTest(executor_test_utils.AsyncTestCase, parameterized.TestCase): # pyformat: disable @parameterized.named_parameters([ ('placement_literal', *executor_test_utils.create_whimsy_placement_literal()), ('computation_intrinsic', *executor_test_utils.create_whimsy_computation_intrinsic()), ('computation_lambda', *executor_test_utils.create_whimsy_computation_lambda_empty()), ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_empty()), ('federated_type_at_clients', *executor_test_utils.create_whimsy_value_at_clients()), ('federated_type_at_clients_all_equal', *executor_test_utils.create_whimsy_value_at_clients_all_equal()), ('federated_type_at_server', *executor_test_utils.create_whimsy_value_at_server()), ('unplaced_type', *executor_test_utils.create_whimsy_value_unplaced()), ] + get_named_parameters_for_supported_intrinsics()) # pyformat: enable def test_returns_value_with_value_and_type(self, value, type_signature): executor = create_test_executor() result = self.run_sync(executor.create_value(value, type_signature)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.compact_representation()) # pyformat: disable @parameterized.named_parameters([ ('placement_literal', *executor_test_utils.create_whimsy_placement_literal()), ('computation_intrinsic', *executor_test_utils.create_whimsy_computation_intrinsic()), ('computation_lambda', *executor_test_utils.create_whimsy_computation_lambda_empty()), ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_empty()), ]) # pyformat: enable def test_returns_value_with_value_only(self, value, type_signature): executor = create_test_executor() result = self.run_sync(executor.create_value(value)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.compact_representation()) # pyformat: disable @parameterized.named_parameters([ ('computation_intrinsic', *executor_test_utils.create_whimsy_computation_intrinsic()), ('computation_lambda', *executor_test_utils.create_whimsy_computation_lambda_empty()), ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_empty()), ]) # pyformat: enable def test_returns_value_with_computation_impl(self, proto, type_signature): executor = create_test_executor() value = computation_impl.ConcreteComputation( proto, context_stack_impl.context_stack) result = self.run_sync(executor.create_value(value, type_signature)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.compact_representation()) # pyformat: disable @parameterized.named_parameters([ ('federated_type_at_clients', *executor_test_utils.create_whimsy_value_at_clients()), ('federated_type_at_clients_all_equal', *executor_test_utils.create_whimsy_value_at_clients_all_equal()), ('federated_type_at_server', *executor_test_utils.create_whimsy_value_at_server()), ('unplaced_type', *executor_test_utils.create_whimsy_value_unplaced()), ] + get_named_parameters_for_supported_intrinsics()) # pyformat: enable def test_raises_type_error_with_value_only(self, value, type_signature): del type_signature # Unused. executor = create_test_executor() with self.assertRaises(TypeError): self.run_sync(executor.create_value(value)) # pyformat: disable @parameterized.named_parameters([ ('placement_literal', *executor_test_utils.create_whimsy_placement_literal()), ('computation_call', *executor_test_utils.create_whimsy_computation_call()), ('computation_intrinsic', *executor_test_utils.create_whimsy_computation_intrinsic()), ('computation_lambda', *executor_test_utils.create_whimsy_computation_lambda_empty()), ('computation_selection', *executor_test_utils.create_whimsy_computation_selection()), ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_empty()), ('computation_tuple', *executor_test_utils.create_whimsy_computation_tuple()), ('federated_type_at_clients', *executor_test_utils.create_whimsy_value_at_clients()), ('federated_type_at_clients_all_equal', *executor_test_utils.create_whimsy_value_at_clients_all_equal()), ('federated_type_at_server', *executor_test_utils.create_whimsy_value_at_server()), ('unplaced_type', *executor_test_utils.create_whimsy_value_unplaced()), ] + get_named_parameters_for_supported_intrinsics()) # pyformat: enable def test_raises_type_error_with_value_and_bad_type(self, value, type_signature): del type_signature # Unused. executor = create_test_executor() bad_type_signature = computation_types.TensorType(tf.string) with self.assertRaises(TypeError): self.run_sync(executor.create_value(value, bad_type_signature)) # pyformat: disable @parameterized.named_parameters([ ('computation_call', *executor_test_utils.create_whimsy_computation_call()), ('computation_placement', *executor_test_utils.create_whimsy_computation_placement()), ('computation_reference', *executor_test_utils.create_whimsy_computation_reference()), ('computation_selection', *executor_test_utils.create_whimsy_computation_selection()), ('computation_tuple', *executor_test_utils.create_whimsy_computation_tuple()), ]) # pyformat: enable def test_raises_value_error_with_value(self, value, type_signature): executor = create_test_executor() with self.assertRaises(ValueError): self.run_sync(executor.create_value(value, type_signature)) def test_raises_value_error_with_unrecognized_computation_intrinsic(self): executor = create_test_executor() type_signature = computation_types.TensorType(tf.int32) # A `ValueError` will be raised because `create_value` can not recognize the # following intrinsic, because it has not been added to the intrinsic # registry. type_signature = computation_types.TensorType(tf.int32) value = pb.Computation( type=type_serialization.serialize_type(type_signature), intrinsic=pb.Intrinsic(uri='unregistered_intrinsic')) with self.assertRaises(ValueError): self.run_sync(executor.create_value(value, type_signature)) def test_raises_value_error_with_unrecognized_computation_selection(self): executor = create_test_executor() source, _ = executor_test_utils.create_whimsy_computation_tuple() type_signature = computation_types.StructType([]) # A `ValueError` will be raised because `create_value` can not handle the # following `pb.Selection`, because does not set either a name or an index # field. value = pb.Computation( type=type_serialization.serialize_type(type_signature), selection=pb.Selection(source=source)) with self.assertRaises(ValueError): self.run_sync(executor.create_value(value, type_signature)) # pyformat: disable @parameterized.named_parameters([ ('intrinsic_def_federated_aggregate', *executor_test_utils.create_whimsy_intrinsic_def_federated_aggregate()), ('intrinsic_def_federated_apply', *executor_test_utils.create_whimsy_intrinsic_def_federated_apply()), ('intrinsic_def_federated_eval_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_eval_at_server()), ('intrinsic_def_federated_mean', *executor_test_utils.create_whimsy_intrinsic_def_federated_mean()), ('intrinsic_def_federated_sum', *executor_test_utils.create_whimsy_intrinsic_def_federated_sum()), ('intrinsic_def_federated_value_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_value_at_server()), ('intrinsic_def_federated_weighted_mean', *executor_test_utils.create_whimsy_intrinsic_def_federated_weighted_mean()), ('intrinsic_def_federated_zip_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_zip_at_server()), ('federated_type_at_server', *executor_test_utils.create_whimsy_value_at_server()), ]) # pyformat: enable def test_raises_value_error_with_no_target_executor_server( self, value, type_signature): factory = federated_resolving_strategy.FederatedResolvingStrategy.factory({ placements.CLIENTS: eager_tf_executor.EagerTFExecutor(), }) executor = federating_executor.FederatingExecutor( factory, eager_tf_executor.EagerTFExecutor()) value, type_signature = executor_test_utils.create_whimsy_value_at_server() with self.assertRaises(ValueError): self.run_sync(executor.create_value(value, type_signature)) def test_raises_value_error_with_unexpected_federated_type_at_clients(self): executor = create_test_executor() value = [10, 20] type_signature = computation_types.at_clients(tf.int32) with self.assertRaises(ValueError): self.run_sync(executor.create_value(value, type_signature)) def test_raises_type_error_with_unexpected_federated_type_at_clients_all_equal( self): executor = create_test_executor() value = [10] * 3 type_signature = computation_types.at_clients(tf.int32, all_equal=True) with self.assertRaises(TypeError): self.run_sync(executor.create_value(value, type_signature)) class FederatingExecutorCreateCallTest(executor_test_utils.AsyncTestCase, parameterized.TestCase): # pyformat: disable @parameterized.named_parameters([ ('intrinsic_def_federated_aggregate', *executor_test_utils.create_whimsy_intrinsic_def_federated_aggregate(), [executor_test_utils.create_whimsy_value_at_clients(), executor_test_utils.create_whimsy_value_unplaced(), executor_test_utils.create_whimsy_computation_tensorflow_add(), executor_test_utils.create_whimsy_computation_tensorflow_add(), executor_test_utils.create_whimsy_computation_tensorflow_identity()], 43.0), ('intrinsic_def_federated_apply', *executor_test_utils.create_whimsy_intrinsic_def_federated_apply(), [executor_test_utils.create_whimsy_computation_tensorflow_identity(), executor_test_utils.create_whimsy_value_at_server()], 10.0), ('intrinsic_def_federated_broadcast', *executor_test_utils.create_whimsy_intrinsic_def_federated_broadcast(), [executor_test_utils.create_whimsy_value_at_server()], 10.0), ('intrinsic_def_federated_eval_at_clients', *executor_test_utils.create_whimsy_intrinsic_def_federated_eval_at_clients(), [executor_test_utils.create_whimsy_computation_tensorflow_constant()], [10.0] * 3), ('intrinsic_def_federated_eval_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_eval_at_server(), [executor_test_utils.create_whimsy_computation_tensorflow_constant()], 10.0), ('intrinsic_def_federated_map', *executor_test_utils.create_whimsy_intrinsic_def_federated_map(), [executor_test_utils.create_whimsy_computation_tensorflow_identity(), executor_test_utils.create_whimsy_value_at_clients()], [10.0, 11.0, 12.0]), ('intrinsic_def_federated_map_all_equal', *executor_test_utils.create_whimsy_intrinsic_def_federated_map_all_equal(), [executor_test_utils.create_whimsy_computation_tensorflow_identity(), executor_test_utils.create_whimsy_value_at_clients_all_equal()], 10.0), ('intrinsic_def_federated_mean', *executor_test_utils.create_whimsy_intrinsic_def_federated_mean(), [executor_test_utils.create_whimsy_value_at_clients()], 11.0), ('intrinsic_def_federated_select', *executor_test_utils.create_whimsy_intrinsic_def_federated_select(), executor_test_utils.create_whimsy_federated_select_args(), executor_test_utils.create_whimsy_federated_select_expected_result(), ), ('intrinsic_def_federated_sum', *executor_test_utils.create_whimsy_intrinsic_def_federated_sum(), [executor_test_utils.create_whimsy_value_at_clients()], 33.0), ('intrinsic_def_federated_value_at_clients', *executor_test_utils.create_whimsy_intrinsic_def_federated_value_at_clients(), [executor_test_utils.create_whimsy_value_unplaced()], 10.0), ('intrinsic_def_federated_value_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_value_at_server(), [executor_test_utils.create_whimsy_value_unplaced()], 10.0), ('intrinsic_def_federated_weighted_mean', *executor_test_utils.create_whimsy_intrinsic_def_federated_weighted_mean(), [executor_test_utils.create_whimsy_value_at_clients(), executor_test_utils.create_whimsy_value_at_clients()], 11.060606), ('intrinsic_def_federated_zip_at_clients', *executor_test_utils.create_whimsy_intrinsic_def_federated_zip_at_clients(), [executor_test_utils.create_whimsy_value_at_clients(), executor_test_utils.create_whimsy_value_at_clients()], [structure.Struct([(None, 10.0), (None, 10.0)]), structure.Struct([(None, 11.0), (None, 11.0)]), structure.Struct([(None, 12.0), (None, 12.0)])]), ('intrinsic_def_federated_zip_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_zip_at_server(), [executor_test_utils.create_whimsy_value_at_server(), executor_test_utils.create_whimsy_value_at_server()], structure.Struct([(None, 10.0), (None, 10.0)])), ('computation_intrinsic', *executor_test_utils.create_whimsy_computation_intrinsic(), [executor_test_utils.create_whimsy_computation_tensorflow_constant()], 10.0), ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_identity(), [executor_test_utils.create_whimsy_value_unplaced()], 10.0), ]) # pyformat: enable def test_returns_value_with_comp_and_arg(self, comp, comp_type, args, expected_result): executor = create_test_executor() comp = self.run_sync(executor.create_value(comp, comp_type)) elements = [self.run_sync(executor.create_value(*x)) for x in args] if len(elements) > 1: arg = self.run_sync(executor.create_struct(elements)) else: arg = elements[0] result = self.run_sync(executor.create_call(comp, arg)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), comp_type.result.compact_representation()) actual_result = self.run_sync(result.compute()) self.assert_maybe_list_equal(actual_result, expected_result) def assert_maybe_list_equal(self, actual_result, expected_result): if (all_isinstance([actual_result, expected_result], list) or all_isinstance([actual_result, expected_result], tf.data.Dataset)): for actual_element, expected_element in zip(actual_result, expected_result): self.assert_maybe_list_equal(actual_element, expected_element) else: self.assertEqual(actual_result, expected_result) def test_returns_value_with_intrinsic_def_federated_eval_at_clients_and_random( self): executor = create_test_executor(number_of_clients=3) comp, comp_type = executor_test_utils.create_whimsy_intrinsic_def_federated_eval_at_clients( ) arg, arg_type = executor_test_utils.create_whimsy_computation_tensorflow_random( ) comp = self.run_sync(executor.create_value(comp, comp_type)) arg = self.run_sync(executor.create_value(arg, arg_type)) result = self.run_sync(executor.create_call(comp, arg)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), comp_type.result.compact_representation()) actual_result = self.run_sync(result.compute()) unique_results = set([x.numpy() for x in actual_result]) if len(actual_result) != len(unique_results): self.fail( 'Expected the result to contain different random numbers, found {}.' .format(actual_result)) # pyformat: disable @parameterized.named_parameters([ ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_empty()), ]) # pyformat: enable def test_returns_value_with_comp_only(self, comp, comp_type): executor = create_test_executor() comp = self.run_sync(executor.create_value(comp, comp_type)) result = self.run_sync(executor.create_call(comp)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), comp_type.result.compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = [] self.assertCountEqual(actual_result, expected_result) def test_raises_type_error_with_unembedded_comp(self): executor = create_test_executor() comp, _ = executor_test_utils.create_whimsy_computation_tensorflow_identity( ) arg, arg_type = executor_test_utils.create_whimsy_value_unplaced() arg = self.run_sync(executor.create_value(arg, arg_type)) with self.assertRaises(TypeError): self.run_sync(executor.create_call(comp, arg)) def test_raises_type_error_with_unembedded_arg(self): executor = create_test_executor() comp, comp_type = executor_test_utils.create_whimsy_computation_tensorflow_identity( ) arg, _ = executor_test_utils.create_whimsy_value_unplaced() comp = self.run_sync(executor.create_value(comp, comp_type)) with self.assertRaises(TypeError): self.run_sync(executor.create_call(comp, arg)) # pyformat: disable @parameterized.named_parameters([ ('computation_intrinsic', *executor_test_utils.create_whimsy_computation_intrinsic()), ('computation_lambda', *executor_test_utils.create_whimsy_computation_lambda_identity()), ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_identity()), ] + get_named_parameters_for_supported_intrinsics()) # pyformat: enable def test_raises_type_error_with_comp_and_bad_arg(self, comp, comp_type): executor = create_test_executor() bad_arg = 'string' bad_arg_type = computation_types.TensorType(tf.string) comp = self.run_sync(executor.create_value(comp, comp_type)) arg = self.run_sync(executor.create_value(bad_arg, bad_arg_type)) with self.assertRaises(TypeError): self.run_sync(executor.create_call(comp, arg)) # pyformat: disable @parameterized.named_parameters([ ('computation_lambda', *executor_test_utils.create_whimsy_computation_lambda_empty()), ('federated_type_at_clients', *executor_test_utils.create_whimsy_value_at_clients()), ('federated_type_at_clients_all_equal', *executor_test_utils.create_whimsy_value_at_clients_all_equal()), ('federated_type_at_server', *executor_test_utils.create_whimsy_value_at_server()), ('unplaced_type', *executor_test_utils.create_whimsy_value_unplaced()), ]) # pyformat: enable def test_raises_value_error_with_comp(self, comp, comp_type): executor = create_test_executor() comp = self.run_sync(executor.create_value(comp, comp_type)) with self.assertRaises(ValueError): self.run_sync(executor.create_call(comp)) def test_raises_not_implemented_error_with_intrinsic_def_federated_secure_sum_bitwidth( self): executor = create_test_executor() comp, comp_type = executor_test_utils.create_whimsy_intrinsic_def_federated_secure_sum_bitwidth( ) arg_1 = [10, 11, 12] arg_1_type = computation_types.at_clients(tf.int32, all_equal=False) arg_2 = 10 arg_2_type = computation_types.TensorType(tf.int32) comp = self.run_sync(executor.create_value(comp, comp_type)) arg_1 = self.run_sync(executor.create_value(arg_1, arg_1_type)) arg_2 = self.run_sync(executor.create_value(arg_2, arg_2_type)) args = self.run_sync(executor.create_struct([arg_1, arg_2])) with self.assertRaises(NotImplementedError): self.run_sync(executor.create_call(comp, args)) def test_raises_not_implemented_error_with_unimplemented_intrinsic(self): executor = create_test_executor() # `whimsy_intrinsic` definition is needed to allow lookup. whimsy_intrinsic = intrinsic_defs.IntrinsicDef( 'WHIMSY_INTRINSIC', 'whimsy_intrinsic', computation_types.AbstractType('T')) type_signature = computation_types.TensorType(tf.int32) comp = pb.Computation( intrinsic=pb.Intrinsic(uri='whimsy_intrinsic'), type=type_serialization.serialize_type(type_signature)) del whimsy_intrinsic comp = self.run_sync(executor.create_value(comp)) with self.assertRaises(NotImplementedError): self.run_sync(executor.create_call(comp)) class FederatingExecutorCreateStructTest(executor_test_utils.AsyncTestCase, parameterized.TestCase): # pyformat: disable @parameterized.named_parameters([ ('federated_type_at_clients', *executor_test_utils.create_whimsy_value_at_clients()), ('federated_type_at_clients_all_equal', *executor_test_utils.create_whimsy_value_at_clients_all_equal()), ('federated_type_at_server', *executor_test_utils.create_whimsy_value_at_server()), ('unplaced_type', *executor_test_utils.create_whimsy_value_unplaced()), ]) # pyformat: enable def test_returns_value_with_elements_value(self, value, type_signature): executor = create_test_executor() element = self.run_sync(executor.create_value(value, type_signature)) elements = [element] * 3 type_signature = computation_types.StructType([type_signature] * 3) result = self.run_sync(executor.create_struct(elements)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = [self.run_sync(element.compute())] * 3 self.assertCountEqual(actual_result, expected_result) def test_returns_value_with_elements_value_placement_literal(self): executor = create_test_executor() value, type_signature = executor_test_utils.create_whimsy_placement_literal( ) element = self.run_sync(executor.create_value(value, type_signature)) elements = [element] * 3 type_signature = computation_types.StructType([type_signature] * 3) result = self.run_sync(executor.create_struct(elements)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.compact_representation()) # pyformat: disable @parameterized.named_parameters([ ('intrinsic_def_federated_eval_at_server', *executor_test_utils.create_whimsy_intrinsic_def_federated_eval_at_server(), *executor_test_utils.create_whimsy_computation_tensorflow_constant()), ('computation_intrinsic', *executor_test_utils.create_whimsy_computation_intrinsic(), *executor_test_utils.create_whimsy_computation_tensorflow_constant()), ]) # pyformat: enable def test_returns_value_with_elements_fn_and_arg(self, fn, fn_type, arg, arg_type): executor = create_test_executor() fn = self.run_sync(executor.create_value(fn, fn_type)) arg = self.run_sync(executor.create_value(arg, arg_type)) element = self.run_sync(executor.create_call(fn, arg)) elements = [element] * 3 type_signature = computation_types.StructType([fn_type.result] * 3) result = self.run_sync(executor.create_struct(elements)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = [self.run_sync(element.compute())] * 3 self.assertCountEqual(actual_result, expected_result) # pyformat: disable @parameterized.named_parameters([ ('computation_tensorflow', *executor_test_utils.create_whimsy_computation_tensorflow_empty()), ]) # pyformat: enable def test_returns_value_with_elements_fn_only(self, fn, fn_type): executor = create_test_executor() fn = self.run_sync(executor.create_value(fn, fn_type)) element = self.run_sync(executor.create_call(fn)) elements = [element] * 3 type_signature = computation_types.StructType([fn_type.result] * 3) result = self.run_sync(executor.create_struct(elements)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = [self.run_sync(element.compute())] * 3 self.assertCountEqual(actual_result, expected_result) def test_raises_type_error_with_unembedded_elements(self): executor = create_test_executor() element, _ = executor_test_utils.create_whimsy_value_unplaced() elements = [element] * 3 with self.assertRaises(TypeError): self.run_sync(executor.create_struct(elements)) class FederatingExecutorCreateSelectionTest(executor_test_utils.AsyncTestCase): def test_returns_value_with_source_and_index_computation_tensorflow(self): executor = create_test_executor() source, type_signature = executor_test_utils.create_whimsy_computation_tensorflow_tuple( ) source = self.run_sync(executor.create_value(source, type_signature)) source = self.run_sync(executor.create_call(source)) result = self.run_sync(executor.create_selection(source, 0)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.result[0].compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = self.run_sync(source.compute())[0] self.assertEqual(actual_result, expected_result) def test_returns_value_with_source_and_index_structure(self): executor = create_test_executor() element, element_type = executor_test_utils.create_whimsy_value_unplaced() element = self.run_sync(executor.create_value(element, element_type)) elements = [element] * 3 type_signature = computation_types.StructType([element_type] * 3) source = self.run_sync(executor.create_struct(elements)) result = self.run_sync(executor.create_selection(source, 0)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature[0].compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = self.run_sync(source.compute())[0] self.assertEqual(actual_result, expected_result) def test_returns_value_with_source_and_name_computation_tensorflow(self): executor = create_test_executor() source, type_signature = executor_test_utils.create_whimsy_computation_tensorflow_tuple( ) source = self.run_sync(executor.create_value(source, type_signature)) source = self.run_sync(executor.create_call(source)) result = self.run_sync(executor.create_selection(source, 0)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature.result['a'].compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = self.run_sync(source.compute())['a'] self.assertEqual(actual_result, expected_result) def test_returns_value_with_source_and_name_structure(self): executor = create_test_executor() element, element_type = executor_test_utils.create_whimsy_value_unplaced() names = ['a', 'b', 'c'] element = self.run_sync(executor.create_value(element, element_type)) elements = structure.Struct((n, element) for n in names) type_signature = computation_types.StructType( (n, element_type) for n in names) source = self.run_sync(executor.create_struct(elements)) result = self.run_sync(executor.create_selection(source, 0)) self.assertIsInstance(result, executor_value_base.ExecutorValue) self.assertEqual(result.type_signature.compact_representation(), type_signature['a'].compact_representation()) actual_result = self.run_sync(result.compute()) expected_result = self.run_sync(source.compute())['a'] self.assertEqual(actual_result, expected_result) def test_raises_type_error_with_unembedded_source(self): executor = create_test_executor() element, element_type = executor_test_utils.create_whimsy_value_unplaced() element = self.run_sync(executor.create_value(element, element_type)) source = [element] * 3 with self.assertRaises(TypeError): self.run_sync(executor.create_selection(source, 0)) def test_raises_type_error_with_not_tuple_type(self): executor = create_test_executor() element, element_type = executor_test_utils.create_whimsy_value_unplaced() source = self.run_sync(executor.create_value(element, element_type)) with self.assertRaises(TypeError): self.run_sync(executor.create_selection(source, 0)) def test_raises_value_error_with_unrecognized_generic_zero(self): executor = create_test_executor() value = intrinsic_defs.GENERIC_ZERO type_signature = computation_types.StructType( [computation_types.TensorType(tf.int32)] * 3) source = self.run_sync(executor.create_value(value, type_signature)) with self.assertRaises(ValueError): self.run_sync(executor.create_selection(source, 0)) if __name__ == '__main__': absltest.main()
tensorflow/federated
tensorflow_federated/python/core/impl/executors/federating_executor_test.py
Python
apache-2.0
35,600
# Copyright 2013, Big Switch Networks # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.exceptions import ValidationError # noqa from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard.dashboards.project.routers.extensions.routerrules\ import rulemanager LOG = logging.getLogger(__name__) class RuleCIDRField(forms.IPField): """Extends IPField to allow ('any','external') keywords and requires CIDR """ def __init__(self, *args, **kwargs): kwargs['mask'] = True super(RuleCIDRField, self).__init__(*args, **kwargs) def validate(self, value): keywords = ['any', 'external'] if value in keywords: self.ip = value else: if '/' not in value: raise ValidationError(_("Input must be in CIDR format")) super(RuleCIDRField, self).validate(value) class AddRouterRule(forms.SelfHandlingForm): source = RuleCIDRField(label=_("Source CIDR"), widget=forms.TextInput(), required=True) destination = RuleCIDRField(label=_("Destination CIDR"), widget=forms.TextInput(), required=True) action = forms.ChoiceField(label=_("Action"), required=True) nexthops = forms.MultiIPField(label=_("Optional: Next Hop " "Addresses (comma delimited)"), widget=forms.TextInput(), required=False) router_id = forms.CharField(label=_("Router ID"), widget=forms.TextInput(attrs={'readonly': 'readonly'})) failure_url = 'horizon:project:routers:detail' def __init__(self, request, *args, **kwargs): super(AddRouterRule, self).__init__(request, *args, **kwargs) self.fields['action'].choices = [('permit', _('Permit')), ('deny', _('Deny'))] def handle(self, request, data, **kwargs): try: if 'rule_to_delete' in request.POST: rulemanager.remove_rules(request, [request.POST['rule_to_delete']], router_id=data['router_id']) except Exception: exceptions.handle(request, _('Unable to delete router rule.')) try: if 'nexthops' not in data: data['nexthops'] = '' if data['source'] == '0.0.0.0/0': data['source'] = 'any' if data['destination'] == '0.0.0.0/0': data['destination'] = 'any' rule = {'action': data['action'], 'source': data['source'], 'destination': data['destination'], 'nexthops': data['nexthops'].split(',')} rulemanager.add_rule(request, router_id=data['router_id'], newrule=rule) msg = _('Router rule added') LOG.debug(msg) messages.success(request, msg) return True except Exception as e: msg = _('Failed to add router rule %s') % e LOG.info(msg) messages.error(request, msg) redirect = reverse(self.failure_url, args=[data['router_id']]) exceptions.handle(request, msg, redirect=redirect)
spandanb/horizon
openstack_dashboard/dashboards/project/routers/extensions/routerrules/forms.py
Python
apache-2.0
4,117
import calendar c = calendar.TextCalendar(calendar.SUNDAY) c.prmonth(2017, 7)
jasonwee/asus-rt-n14uhp-mrtg
src/lesson_dates_and_times/calendar_textcalendar.py
Python
apache-2.0
79
class Loader(object): """ Base class of every Loaders """ def __init__(self, scheduler): """ :param host: the scheduler """ self._scheduler = scheduler self._nodes = [] self._links = [] def get_nodes(self): """ :return: the loaded nodes or an empty list """ return self._nodes def get_links(self): """ :return: the loaded links or an empty list """ return self._links class JSONLoader(Loader): """ A JSON Loader that can load data which follows the structure: { "nodes":{ "NodeName1":{ "inputs": [list of inputs] "outputs": [list of outputs] }, ... } "links":{ "LinkName1":{ "out":{ "node": "NameNodeN" # MUST be in "nodes" "attr": "AttributeName" }, "in":{ "node": "NameNodeN" # MUST be in "nodes" "attr": "AttributeName" } }, ... } } """ def __init__(self, scheduler, config_data): super(JSONLoader, self).__init__(scheduler) # load the nodes self._prepare_nodes(config_data['nodes']) # then the links self._prepare_links(config_data['links']) def _find_in_nodes(self, str_node): for node in self._nodes: if str_node == node: return node def _prepare_nodes(self, nodes): for name, data in nodes.items(): self._nodes.append(name) def _prepare_links(self, links): for data in links: in_data = data["input"] out_data = data["output"] in_node = self._find_in_nodes(in_data['node']) if in_node is None: raise AttributeError("The input node "+in_data['node']+" is not initialised.") out_node = self._find_in_nodes(out_data['node']) if in_node is None: raise AttributeError("The out node "+out_data['node']+" is not initialised.") self._scheduler.create_data_link(out_node, out_data['attribute'], in_node, in_data['attribute'])
IntegrCiTy/obnl
obnl/core/impl/loaders.py
Python
apache-2.0
2,342
from __future__ import absolute_import from __future__ import unicode_literals from mb.lib.memoize import memoize class SomeClass: def __init__(self): self._x = 0 def _the_test(self, number): self._x += 1 return number * self._x @memoize def TestCache1(self, number): return self._the_test(number) @memoize("self", "number") def TestCache2(self, number, **kw): tmp = self._the_test(kw["number2"]) return self._the_test(tmp - number) def test_NoArgumentsPassed_UsesAllArgumentsForCache(): someClass = SomeClass() assert someClass._the_test(5) == 5 assert someClass.TestCache1(5) == 10 assert someClass.TestCache1(5) == 10 def test_ArgumentsPassedToUseForCache_UsesArgumentsForCache(): someClass = SomeClass() assert someClass.TestCache2(5, number2=10) == 10 assert someClass.TestCache2(5, number2=10) == 10
silverbp/master-builder
tests/memoize_test.py
Python
apache-2.0
916
## # Copyright (c) 2005-2015 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from twisted.internet.defer import inlineCallbacks from calendarserver.tools.resources import migrateResources from twistedcaldav.test.util import StoreTestCase from txdav.who.test.support import InMemoryDirectoryService from twext.who.directory import DirectoryRecord from txdav.who.idirectory import RecordType as CalRecordType from txdav.who.directory import CalendarDirectoryRecordMixin class TestRecord(DirectoryRecord, CalendarDirectoryRecordMixin): pass class MigrateResourcesTest(StoreTestCase): @inlineCallbacks def setUp(self): yield super(MigrateResourcesTest, self).setUp() self.store = self.storeUnderTest() self.sourceService = InMemoryDirectoryService(None) fieldName = self.sourceService.fieldName records = ( TestRecord( self.sourceService, { fieldName.uid: u"location1", fieldName.shortNames: (u"loc1",), fieldName.recordType: CalRecordType.location, } ), TestRecord( self.sourceService, { fieldName.uid: u"location2", fieldName.shortNames: (u"loc2",), fieldName.recordType: CalRecordType.location, } ), TestRecord( self.sourceService, { fieldName.uid: u"resource1", fieldName.shortNames: (u"res1",), fieldName.recordType: CalRecordType.resource, } ), ) yield self.sourceService.updateRecords(records, create=True) @inlineCallbacks def test_migrateResources(self): # Record location1 has not been migrated record = yield self.directory.recordWithUID(u"location1") self.assertEquals(record, None) # Migrate location1, location2, and resource1 yield migrateResources(self.sourceService, self.directory) record = yield self.directory.recordWithUID(u"location1") self.assertEquals(record.uid, u"location1") self.assertEquals(record.shortNames[0], u"loc1") record = yield self.directory.recordWithUID(u"location2") self.assertEquals(record.uid, u"location2") self.assertEquals(record.shortNames[0], u"loc2") record = yield self.directory.recordWithUID(u"resource1") self.assertEquals(record.uid, u"resource1") self.assertEquals(record.shortNames[0], u"res1") # Add a new location to the sourceService, and modify an existing # location fieldName = self.sourceService.fieldName newRecords = ( TestRecord( self.sourceService, { fieldName.uid: u"location1", fieldName.shortNames: (u"newloc1",), fieldName.recordType: CalRecordType.location, } ), TestRecord( self.sourceService, { fieldName.uid: u"location3", fieldName.shortNames: (u"loc3",), fieldName.recordType: CalRecordType.location, } ), ) yield self.sourceService.updateRecords(newRecords, create=True) yield migrateResources(self.sourceService, self.directory) # Ensure an existing record does not get migrated again; verified by # seeing if shortNames changed, which they should not: record = yield self.directory.recordWithUID(u"location1") self.assertEquals(record.uid, u"location1") self.assertEquals(record.shortNames[0], u"loc1") # Ensure new record does get migrated record = yield self.directory.recordWithUID(u"location3") self.assertEquals(record.uid, u"location3") self.assertEquals(record.shortNames[0], u"loc3")
red-hood/calendarserver
calendarserver/tools/test/test_resources.py
Python
apache-2.0
4,593
#!/usr/bin/env python import os from glob import glob if os.environ.get('USE_SETUPTOOLS'): from setuptools import setup setup_kwargs = dict(zip_safe=0) else: from distutils.core import setup setup_kwargs = dict() storage_dirs = [] for subdir in ('whisper', 'ceres', 'rrd', 'log', 'log/webapp'): storage_dirs.append( ('storage/%s' % subdir, []) ) webapp_content = {} for root, dirs, files in os.walk('webapp/content'): for filename in files: filepath = os.path.join(root, filename) if root not in webapp_content: webapp_content[root] = [] webapp_content[root].append(filepath) conf_files = [ ('conf', glob('conf/*.example')) ] examples = [ ('examples', glob('examples/example-*')) ] setup( name='graphite-web', version='0.10.0-alpha', url='https://launchpad.net/graphite', author='Chris Davis', author_email='[email protected]', license='Apache Software License 2.0', description='Enterprise scalable realtime graphing', package_dir={'' : 'webapp'}, packages=[ 'graphite', 'graphite.account', 'graphite.browser', 'graphite.cli', 'graphite.composer', 'graphite.dashboard', 'graphite.events', 'graphite.finders', 'graphite.graphlot', 'graphite.metrics', 'graphite.render', 'graphite.version', 'graphite.whitelist', ], package_data={'graphite' : ['templates/*', 'local_settings.py.example']}, scripts=glob('bin/*'), data_files=webapp_content.items() + storage_dirs + conf_files + examples, **setup_kwargs )
SEJeff/graphite-web
setup.py
Python
apache-2.0
1,530
from tornado.httpserver import HTTPRequest from ..functions import parse_request def test_get_stats(): """ Test parse_request for 'Get information on the stats returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/_stats', }, 'parsed_request': { 'call': '_stats', 'cluster': True, 'indices': [], 'scripted': False }, } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_get_search(): """ Test parse_request for 'Search by GET' returns correctly parsed request. """ request = { # Search by GET 'args': { 'method': 'GET', 'uri': '/twitter/tweet/_search?q=user:kimchy', }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['twitter'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_create_by_put(): """ Test parse_request for 'Create by PUT' returns correctly parsed request. """ request = { 'args': { 'method': 'PUT', 'uri': '/twitter/tweet/1', 'body': '''{ "user" : "kimchy", "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch" }''' }, 'parsed_request': { 'call': '_document', 'cluster': False, 'indices': ['twitter'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_search_by_multi_index_get(): """ Test parse_request for 'Search by GET, MULTI INDEX' returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/twitter,index1,index2/tweet/_search?q=user:kimchy' }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['twitter', 'index1', 'index2'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_delete_index(): """ Test parse_request for 'Delete the articles index' returns correctly parsed request. """ request = { 'args': { 'method': 'DELETE', 'uri': '/articles' }, 'parsed_request': { 'call': '_document', 'cluster': False, 'indices': ['articles'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_create_document_with_post(): """ Test parse_request for 'Create a new article document with POST' returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/articles/article', 'body': '{"title" : "Two", "tags" : ["foo", "bar"]}' }, 'parsed_request': { 'call': '_document', 'cluster': False, 'indices': ['articles'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_update_document_with_script(): """ Test parse_request for 'Update via POST with script' returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/test/type1/1/_update', # Note that in the python heredoc syntax # the backslashes have to be escaped 'body': '''{ "script" : "ctx._source.text = \\"some text\\"" }''' }, 'parsed_request': { 'call': '_update', 'cluster': False, 'indices': ['test'], 'scripted': True } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_update_document_without_script(): """ Test parse_request for 'Update via POST without script returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/test/type1/1/_update', 'body': '''{ "doc" : { "name" : "new_name" } }''' }, 'parsed_request': { 'call': '_update', 'cluster': False, 'indices': ['test'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_query_by_post(): """ Test parse_request for 'Query via POST without script fields returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/articles/_search?pretty=true', 'body': ''' { "query" : { "query_string" : {"query" : "T*"} }, "facets" : { "tags" : { "terms" : {"field" : "tags"} } } } ''' }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['articles'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_query_by_post_with_script_fields(): """ Query via POST with script fields returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/articles/_search?pretty=true', 'body': ''' { "query" : { "query_string" : {"query" : "T*"} }, "script_fields" : { "test1" : { "script" : "doc['my_field_name'].value * 2" }, "test2" : { "script" : "doc['my_field_name'].value * factor", "params" : { "factor" : 2.0 } } } } ''' }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['articles'], 'scripted': True } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_all_settings(): """ Test parse_request for '_all _settings GET' returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/_all/_settings', }, 'parsed_request': { 'indices': ['_all'], 'cluster': False, 'call': '_settings', 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_home(): """ Test parse_request for '_all _settings GET' returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/', }, 'parsed_request': { 'indices': [], 'cluster': True, 'call': '_home', 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request']
HatPull/tornado-elasticsearch-proxy
es_proxy/tests/test_parse_request.py
Python
apache-2.0
8,311
#!/usr/bin/env python # Copyright 2016 Criteo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific lanbg_guage governing permissions and # limitations under the License. from __future__ import print_function import unittest from biggraphite import graphite_utils as bg_graphite_utils class TestGraphiteUtils(unittest.TestCase): def test_accessor_from_settings(self): import types settings = types.ModuleType("settings") settings.BG_DRIVER = "memory" accessor = bg_graphite_utils.accessor_from_settings(settings) self.assertNotEquals(accessor, None) def test_cache_from_settings(self): import types settings = types.ModuleType("settings") settings.BG_CACHE = "memory" settings.BG_CACHE_SIZE = 10 settings.BG_CACHE_TTL = 60 settings.BG_CACHE_SYNC = False cache = bg_graphite_utils.cache_from_settings('fake', settings) self.assertNotEquals(cache, None) if __name__ == "__main__": unittest.main()
dpanth3r/biggraphite
tests/test_graphite_utils.py
Python
apache-2.0
1,444
# -*- coding: utf-8 -*- ''' The crypt module manages all of the cryptography functions for minions and masters, encrypting and decrypting payloads, preparing messages, and authenticating peers ''' # Import python libs import os import sys import time import hmac import shutil import hashlib import logging # Import third party libs try: from M2Crypto import RSA, EVP from Crypto.Cipher import AES except ImportError: # No need for crypt in local mode pass # Import salt libs import salt.utils import salt.payload import salt.utils.verify import salt.version from salt.exceptions import ( AuthenticationError, SaltClientError, SaltReqTimeoutError ) log = logging.getLogger(__name__) def dropfile(cachedir, user=None): ''' Set an aes dropfile to update the publish session key ''' dfnt = os.path.join(cachedir, '.dfnt') dfn = os.path.join(cachedir, '.dfn') def ready(): ''' Because MWorker._update_aes uses second-precision mtime to detect changes to the file, we must avoid writing two versions with the same mtime. Note that this only makes rapid updates in serial safe: concurrent updates could still both pass this check and then write two different keys with the same mtime. ''' try: stats = os.stat(dfn) except os.error: # Not there, go ahead and write it return True else: if stats.st_mtime == time.time(): # The mtime is the current time, we must # wait until time has moved on. return False else: return True while not ready(): log.warning('Waiting before writing {0}'.format(dfn)) time.sleep(1) aes = Crypticle.generate_key_string() mask = os.umask(191) with salt.utils.fopen(dfnt, 'w+') as fp_: fp_.write(aes) if user: try: import pwd uid = pwd.getpwnam(user).pw_uid os.chown(dfnt, uid, -1) shutil.move(dfnt, dfn) except (KeyError, ImportError, OSError, IOError): pass os.umask(mask) def gen_keys(keydir, keyname, keysize, user=None): ''' Generate a keypair for use with salt ''' base = os.path.join(keydir, keyname) priv = '{0}.pem'.format(base) pub = '{0}.pub'.format(base) gen = RSA.gen_key(keysize, 65537, callback=lambda x, y, z: None) cumask = os.umask(191) gen.save_key(priv, None) os.umask(cumask) gen.save_pub_key(pub) os.chmod(priv, 256) if user: try: import pwd uid = pwd.getpwnam(user).pw_uid os.chown(priv, uid, -1) os.chown(pub, uid, -1) except (KeyError, ImportError, OSError): # The specified user was not found, allow the backup systems to # report the error pass return priv def sign_message(privkey_path, message): ''' Use M2Crypto's EVP ("Envelope") functions to sign a message. Returns the signature. ''' log.debug('salt.crypt.sign_message: Loading private key') evp_rsa = EVP.load_key(privkey_path) evp_rsa.sign_init() evp_rsa.sign_update(message) log.debug('salt.crypt.sign_message: Signing message.') return evp_rsa.sign_final() def verify_signature(pubkey_path, message, signature): ''' Use M2Crypto's EVP ("Envelope") functions to verify the signature on a message. Returns True for valid signature. ''' # Verify that the signature is valid log.debug('salt.crypt.verify_signature: Loading public key') pubkey = RSA.load_pub_key(pubkey_path) verify_evp = EVP.PKey() verify_evp.assign_rsa(pubkey) verify_evp.verify_init() verify_evp.verify_update(message) log.debug('salt.crypt.verify_signature: Verifying signature') result = verify_evp.verify_final(signature) return result class MasterKeys(dict): ''' The Master Keys class is used to manage the public key pair used for authentication by the master. ''' def __init__(self, opts): super(MasterKeys, self).__init__() self.opts = opts self.pub_path = os.path.join(self.opts['pki_dir'], 'master.pub') self.rsa_path = os.path.join(self.opts['pki_dir'], 'master.pem') self.key = self.__get_keys() self.token = self.__gen_token() def __get_keys(self): ''' Returns a key objects for the master ''' if os.path.exists(self.rsa_path): key = RSA.load_key(self.rsa_path) log.debug('Loaded master key: {0}'.format(self.rsa_path)) else: log.info('Generating keys: {0}'.format(self.opts['pki_dir'])) gen_keys(self.opts['pki_dir'], 'master', self.opts['keysize'], self.opts.get('user')) key = RSA.load_key(self.rsa_path) return key def __gen_token(self): ''' Generate the authentication token ''' return self.key.private_encrypt('salty bacon', 5) def get_pub_str(self): ''' Return the string representation of the public key ''' if not os.path.isfile(self.pub_path): key = self.__get_keys() key.save_pub_key(self.pub_path) return salt.utils.fopen(self.pub_path, 'r').read() class Auth(object): ''' The Auth class provides the sequence for setting up communication with the master server from a minion. ''' def __init__(self, opts): self.opts = opts self.token = Crypticle.generate_key_string() self.serial = salt.payload.Serial(self.opts) self.pub_path = os.path.join(self.opts['pki_dir'], 'minion.pub') self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem') if 'syndic_master' in self.opts: self.mpub = 'syndic_master.pub' elif 'alert_master' in self.opts: self.mpub = 'monitor_master.pub' else: self.mpub = 'minion_master.pub' def get_keys(self): ''' Returns a key objects for the minion ''' # Make sure all key parent directories are accessible user = self.opts.get('user', 'root') salt.utils.verify.check_path_traversal(self.opts['pki_dir'], user) if os.path.exists(self.rsa_path): key = RSA.load_key(self.rsa_path) log.debug('Loaded minion key: {0}'.format(self.rsa_path)) else: log.info('Generating keys: {0}'.format(self.opts['pki_dir'])) gen_keys(self.opts['pki_dir'], 'minion', self.opts['keysize'], self.opts.get('user')) key = RSA.load_key(self.rsa_path) return key def gen_token(self, clear_tok): ''' Encrypt a string with the minion private key to verify identity with the master. ''' return self.get_keys().private_encrypt(clear_tok, 5) def minion_sign_in_payload(self): ''' Generates the payload used to authenticate with the master server. This payload consists of the passed in id_ and the ssh public key to encrypt the AES key sent back form the master. ''' payload = {} key = self.get_keys() tmp_pub = salt.utils.mkstemp() key.save_pub_key(tmp_pub) payload['enc'] = 'clear' payload['load'] = {} payload['load']['cmd'] = '_auth' payload['load']['id'] = self.opts['id'] try: pub = RSA.load_pub_key( os.path.join(self.opts['pki_dir'], self.mpub) ) payload['load']['token'] = pub.public_encrypt(self.token, RSA.pkcs1_oaep_padding) except Exception: pass with salt.utils.fopen(tmp_pub, 'r') as fp_: payload['load']['pub'] = fp_.read() os.remove(tmp_pub) return payload def decrypt_aes(self, payload, master_pub=True): ''' This function is used to decrypt the aes seed phrase returned from the master server, the seed phrase is decrypted with the ssh rsa host key. Pass in the encrypted aes key. Returns the decrypted aes seed key, a string ''' log.debug('Decrypting the current master AES key') key = self.get_keys() key_str = key.private_decrypt(payload['aes'], RSA.pkcs1_oaep_padding) if 'sig' in payload: m_path = os.path.join(self.opts['pki_dir'], self.mpub) if os.path.exists(m_path): try: mkey = RSA.load_pub_key(m_path) except Exception: return '', '' digest = hashlib.sha256(key_str).hexdigest() m_digest = mkey.public_decrypt(payload['sig'], 5) if m_digest != digest: return '', '' else: return '', '' if '_|-' in key_str: return key_str.split('_|-') else: if 'token' in payload: token = key.private_decrypt(payload['token'], RSA.pkcs1_oaep_padding) return key_str, token elif not master_pub: return key_str, '' return '', '' def verify_master(self, payload): ''' Verify that the master is the same one that was previously accepted ''' m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) if os.path.isfile(m_pub_fn) and not self.opts['open_mode']: local_master_pub = salt.utils.fopen(m_pub_fn).read() if payload['pub_key'] != local_master_pub: # This is not the last master we connected to log.error('The master key has changed, the salt master could ' 'have been subverted, verify salt master\'s public ' 'key') return '' try: aes, token = self.decrypt_aes(payload) if token != self.token: log.error( 'The master failed to decrypt the random minion token' ) return '' except Exception: log.error( 'The master failed to decrypt the random minion token' ) return '' return aes else: salt.utils.fopen(m_pub_fn, 'w+').write(payload['pub_key']) aes, token = self.decrypt_aes(payload, False) return aes def sign_in(self, timeout=60, safe=True): ''' Send a sign in request to the master, sets the key information and returns a dict containing the master publish interface to bind to and the decrypted aes key for transport decryption. ''' auth = {} m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) try: self.opts['master_ip'] = salt.utils.dns_check( self.opts['master'], True, self.opts['ipv6'] ) except SaltClientError as e: if safe: log.warning('SaltClientError: {0}'.format(e)) return 'retry' raise SaltClientError if self.opts['master_ip'] not in self.opts['master_uri']: self.opts['master_uri'] = (self.opts['master_uri'].replace( self.opts['master_uri'].split(':')[1][2:], self.opts['master_ip'])) sreq = salt.payload.SREQ( self.opts['master_uri'], ) try: payload = sreq.send_auto( self.minion_sign_in_payload(), timeout=timeout ) except SaltReqTimeoutError as e: if safe: log.warning('SaltReqTimeoutError: {0}'.format(e)) return 'retry' raise SaltClientError if 'load' in payload: if 'ret' in payload['load']: if not payload['load']['ret']: log.critical( 'The Salt Master has rejected this minion\'s public ' 'key!\nTo repair this issue, delete the public key ' 'for this minion on the Salt Master and restart this ' 'minion.\nOr restart the Salt Master in open mode to ' 'clean out the keys. The Salt Minion will now exit.' ) sys.exit(0) else: log.error( 'The Salt Master has cached the public key for this ' 'node, this salt minion will wait for {0} seconds ' 'before attempting to re-authenticate'.format( self.opts['acceptance_wait_time'] ) ) return 'retry' auth['aes'] = self.verify_master(payload) if not auth['aes']: log.critical( 'The Salt Master server\'s public key did not authenticate!\n' 'The master may need to be updated if it is a version of Salt ' 'lower than {0}, or\n' 'If you are confident that you are connecting to a valid Salt ' 'Master, then remove the master public key and restart the ' 'Salt Minion.\nThe master public key can be found ' 'at:\n{1}'.format(salt.version.__version__, m_pub_fn) ) sys.exit(42) if self.opts.get('master_finger', False): if salt.utils.pem_finger(m_pub_fn) != self.opts['master_finger']: log.critical( 'The specified fingerprint in the master configuration ' 'file:\n{0}\nDoes not match the authenticating master\'s ' 'key:\n{1}\nVerify that the configured fingerprint ' 'matches the fingerprint of the correct master and that ' 'this minion is not subject to a man in the middle attack' .format( self.opts['master_finger'], salt.utils.pem_finger(m_pub_fn) ) ) sys.exit(42) auth['publish_port'] = payload['publish_port'] return auth class Crypticle(object): ''' Authenticated encryption class Encryption algorithm: AES-CBC Signing algorithm: HMAC-SHA256 ''' PICKLE_PAD = 'pickle::' AES_BLOCK_SIZE = 16 SIG_SIZE = hashlib.sha256().digest_size def __init__(self, opts, key_string, key_size=192): self.keys = self.extract_keys(key_string, key_size) self.key_size = key_size self.serial = salt.payload.Serial(opts) @classmethod def generate_key_string(cls, key_size=192): key = os.urandom(key_size // 8 + cls.SIG_SIZE) return key.encode('base64').replace('\n', '') @classmethod def extract_keys(cls, key_string, key_size): key = key_string.decode('base64') assert len(key) == key_size / 8 + cls.SIG_SIZE, 'invalid key' return key[:-cls.SIG_SIZE], key[-cls.SIG_SIZE:] def encrypt(self, data): ''' encrypt data with AES-CBC and sign it with HMAC-SHA256 ''' aes_key, hmac_key = self.keys pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE data = data + pad * chr(pad) iv_bytes = os.urandom(self.AES_BLOCK_SIZE) cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) data = iv_bytes + cypher.encrypt(data) sig = hmac.new(hmac_key, data, hashlib.sha256).digest() return data + sig def decrypt(self, data): ''' verify HMAC-SHA256 signature and decrypt data with AES-CBC ''' aes_key, hmac_key = self.keys sig = data[-self.SIG_SIZE:] data = data[:-self.SIG_SIZE] mac_bytes = hmac.new(hmac_key, data, hashlib.sha256).digest() if len(mac_bytes) != len(sig): log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') result = 0 for zipped_x, zipped_y in zip(mac_bytes, sig): result |= ord(zipped_x) ^ ord(zipped_y) if result != 0: log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') iv_bytes = data[:self.AES_BLOCK_SIZE] data = data[self.AES_BLOCK_SIZE:] cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) data = cypher.decrypt(data) return data[:-ord(data[-1])] def dumps(self, obj): ''' Serialize and encrypt a python object ''' return self.encrypt(self.PICKLE_PAD + self.serial.dumps(obj)) def loads(self, data): ''' Decrypt and un-serialize a python object ''' data = self.decrypt(data) # simple integrity check to verify that we got meaningful data if not data.startswith(self.PICKLE_PAD): return {} return self.serial.loads(data[len(self.PICKLE_PAD):]) class SAuth(Auth): ''' Set up an object to maintain the standalone authentication session with the salt master ''' def __init__(self, opts): super(SAuth, self).__init__(opts) self.crypticle = self.__authenticate() def __authenticate(self): ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master aes key. ''' while True: creds = self.sign_in( self.opts['auth_timeout'], self.opts.get('_safe_auth', True) ) if creds == 'retry': if self.opts.get('caller'): print('Minion failed to authenticate with the master, ' 'has the minion key been accepted?') sys.exit(2) time.sleep(self.opts['acceptance_wait_time']) continue break return Crypticle(self.opts, creds['aes'])
MadeiraCloud/salt
sources/salt/crypt.py
Python
apache-2.0
18,535
import re import string import sys from pyspark import SparkContext exclude = set(string.punctuation) def get_hash_tag(word, rmPunc): pattern = re.compile("^#(.*)") m = pattern.match(word) tag = None if m: match = m.groups() for m_word in match: tag = ''.join(letter for letter in m_word if letter not in rmPunc) if tag is not None: return tag sc = SparkContext("local", "Finidng Hash Tags") rmPunc = sc.broadcast(exclude) mydata = sc.textFile("hdfs://<hostname>:<port>/path/to/parsedata<first job output>") wordsRDD = mydata.flatMap( lambda line : line.split("\t")[1].split(" ")) tagsRDD = wordsRDD.map( lambda word : get_hash_tag(word, rmPunc.value)) hashtagsRDD = tagsRDD.filter( lambda word : word is not None) hashtagsRDD.saveAsTextFile("hdfs://<hostname>:<port>/path/to/hashtags")
malli3131/SparkApps
Batch_sentiment/spark_hashtag.py
Python
apache-2.0
828
"""Base Command class, and related routines""" import sys import os import socket import urllib2 import urllib from cStringIO import StringIO import traceback import time from pip.log import logger from pip.baseparser import parser, ConfigOptionParser, UpdatingDefaultsHelpFormatter from pip.exceptions import InstallationError, UninstallationError from pip.venv import restart_in_venv __all__ = ['command_dict', 'Command', 'load_all_commands', 'load_command', 'command_names'] command_dict = {} class Command(object): name = None usage = None hidden = False def __init__(self): assert self.name self.parser = ConfigOptionParser( usage=self.usage, prog='%s %s' % (sys.argv[0], self.name), version=parser.version, formatter=UpdatingDefaultsHelpFormatter(), name=self.name) for option in parser.option_list: if not option.dest or option.dest == 'help': # -h, --version, etc continue self.parser.add_option(option) command_dict[self.name] = self def merge_options(self, initial_options, options): # Make sure we have all global options carried over for attr in ['log', 'venv', 'proxy', 'venv_base', 'require_venv', 'respect_venv', 'log_explicit_levels', 'log_file', 'timeout', 'default_vcs', 'skip_requirements_regex']: setattr(options, attr, getattr(initial_options, attr) or getattr(options, attr)) options.quiet += initial_options.quiet options.verbose += initial_options.verbose def main(self, complete_args, args, initial_options): options, args = self.parser.parse_args(args) self.merge_options(initial_options, options) if options.require_venv and not options.venv: # If a venv is required check if it can really be found if not os.environ.get('VIRTUAL_ENV'): print 'Could not find an activated virtualenv (required).' sys.exit(3) # Automatically install in currently activated venv if required options.respect_venv = True if args and args[-1] == '___VENV_RESTART___': ## FIXME: We don't do anything this this value yet: venv_location = args[-2] args = args[:-2] options.venv = None else: # If given the option to respect the activated environment # check if no venv is given as a command line parameter if options.respect_venv and os.environ.get('VIRTUAL_ENV'): if options.venv and os.path.exists(options.venv): # Make sure command line venv and environmental are the same if (os.path.realpath(os.path.expanduser(options.venv)) != os.path.realpath(os.environ.get('VIRTUAL_ENV'))): print ("Given virtualenv (%s) doesn't match " "currently activated virtualenv (%s)." % (options.venv, os.environ.get('VIRTUAL_ENV'))) sys.exit(3) else: options.venv = os.environ.get('VIRTUAL_ENV') print 'Using already activated environment %s' % options.venv level = 1 # Notify level += options.verbose level -= options.quiet level = logger.level_for_integer(4-level) complete_log = [] logger.consumers.extend( [(level, sys.stdout), (logger.DEBUG, complete_log.append)]) if options.log_explicit_levels: logger.explicit_levels = True if options.venv: if options.verbose > 0: # The logger isn't setup yet print 'Running in environment %s' % options.venv site_packages=False if options.site_packages: site_packages=True restart_in_venv(options.venv, options.venv_base, site_packages, complete_args) # restart_in_venv should actually never return, but for clarity... return ## FIXME: not sure if this sure come before or after venv restart if options.log: log_fp = open_logfile_append(options.log) logger.consumers.append((logger.DEBUG, log_fp)) else: log_fp = None socket.setdefaulttimeout(options.timeout or None) setup_proxy_handler(options.proxy) exit = 0 try: self.run(options, args) except (InstallationError, UninstallationError), e: logger.fatal(str(e)) logger.info('Exception information:\n%s' % format_exc()) exit = 1 except: logger.fatal('Exception:\n%s' % format_exc()) exit = 2 if log_fp is not None: log_fp.close() if exit: log_fn = options.log_file text = '\n'.join(complete_log) logger.fatal('Storing complete log in %s' % log_fn) log_fp = open_logfile_append(log_fn) log_fp.write(text) log_fp.close() return exit ## FIXME: should get moved somewhere else: def setup_proxy_handler(proxystr=''): """Set the proxy handler given the option passed on the command line. If an empty string is passed it looks at the HTTP_PROXY environment variable. """ proxy = get_proxy(proxystr) if proxy: proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy}) opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler) urllib2.install_opener(opener) def get_proxy(proxystr=''): """Get the proxy given the option passed on the command line. If an empty string is passed it looks at the HTTP_PROXY environment variable.""" if not proxystr: proxystr = os.environ.get('HTTP_PROXY', '') if proxystr: if '@' in proxystr: user_password, server_port = proxystr.split('@', 1) if ':' in user_password: user, password = user_password.split(':', 1) else: user = user_password import getpass prompt = 'Password for %s@%s: ' % (user, server_port) password = urllib.quote(getpass.getpass(prompt)) return '%s:%s@%s' % (user, password, server_port) else: return proxystr else: return None def format_exc(exc_info=None): if exc_info is None: exc_info = sys.exc_info() out = StringIO() traceback.print_exception(*exc_info, **dict(file=out)) return out.getvalue() def open_logfile_append(filename): """Open the named log file in append mode. If the file already exists, a separator will also be printed to the file to separate past activity from current activity. """ exists = os.path.exists(filename) log_fp = open(filename, 'a') if exists: print >> log_fp, '-'*60 print >> log_fp, '%s run on %s' % (sys.argv[0], time.strftime('%c')) return log_fp def load_command(name): full_name = 'pip.commands.%s' % name if full_name in sys.modules: return try: __import__(full_name) except ImportError: pass def load_all_commands(): for name in command_names(): load_command(name) def command_names(): dir = os.path.join(os.path.dirname(__file__), 'commands') names = [] for name in os.listdir(dir): if name.endswith('.py') and os.path.isfile(os.path.join(dir, name)): names.append(os.path.splitext(name)[0]) return names
2013Commons/HUE-SHARK
build/env/lib/python2.7/site-packages/pip-0.6.3-py2.7.egg/pip/basecommand.py
Python
apache-2.0
7,777
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( package="google.devtools.clouderrorreporting.v1beta1", manifest={ "ResolutionStatus", "ErrorGroup", "TrackingIssue", "ErrorEvent", "ServiceContext", "ErrorContext", "HttpRequestContext", "SourceLocation", }, ) class ResolutionStatus(proto.Enum): r"""Resolution status of an error group.""" RESOLUTION_STATUS_UNSPECIFIED = 0 OPEN = 1 ACKNOWLEDGED = 2 RESOLVED = 3 MUTED = 4 class ErrorGroup(proto.Message): r"""Description of a group of similar error events. Attributes: name (str): The group resource name. Example: <code>projects/my-project-123/groups/CNSgkpnppqKCUw</code> group_id (str): Group IDs are unique for a given project. If the same kind of error occurs in different service contexts, it will receive the same group ID. tracking_issues (Sequence[google.cloud.errorreporting_v1beta1.types.TrackingIssue]): Associated tracking issues. resolution_status (google.cloud.errorreporting_v1beta1.types.ResolutionStatus): Error group's resolution status. An unspecified resolution status will be interpreted as OPEN """ name = proto.Field(proto.STRING, number=1,) group_id = proto.Field(proto.STRING, number=2,) tracking_issues = proto.RepeatedField( proto.MESSAGE, number=3, message="TrackingIssue", ) resolution_status = proto.Field(proto.ENUM, number=5, enum="ResolutionStatus",) class TrackingIssue(proto.Message): r"""Information related to tracking the progress on resolving the error. Attributes: url (str): A URL pointing to a related entry in an issue tracking system. Example: ``https://github.com/user/project/issues/4`` """ url = proto.Field(proto.STRING, number=1,) class ErrorEvent(proto.Message): r"""An error event which is returned by the Error Reporting system. Attributes: event_time (google.protobuf.timestamp_pb2.Timestamp): Time when the event occurred as provided in the error report. If the report did not contain a timestamp, the time the error was received by the Error Reporting system is used. service_context (google.cloud.errorreporting_v1beta1.types.ServiceContext): The ``ServiceContext`` for which this error was reported. message (str): The stack trace that was reported or logged by the service. context (google.cloud.errorreporting_v1beta1.types.ErrorContext): Data about the context in which the error occurred. """ event_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,) service_context = proto.Field(proto.MESSAGE, number=2, message="ServiceContext",) message = proto.Field(proto.STRING, number=3,) context = proto.Field(proto.MESSAGE, number=5, message="ErrorContext",) class ServiceContext(proto.Message): r"""Describes a running service that sends errors. Its version changes over time and multiple versions can run in parallel. Attributes: service (str): An identifier of the service, such as the name of the executable, job, or Google App Engine service name. This field is expected to have a low number of values that are relatively stable over time, as opposed to ``version``, which can be changed whenever new code is deployed. Contains the service name for error reports extracted from Google App Engine logs or ``default`` if the App Engine default service is used. version (str): Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app. resource_type (str): Type of the MonitoredResource. List of possible values: https://cloud.google.com/monitoring/api/resources Value is set automatically for incoming errors and must not be set when reporting errors. """ service = proto.Field(proto.STRING, number=2,) version = proto.Field(proto.STRING, number=3,) resource_type = proto.Field(proto.STRING, number=4,) class ErrorContext(proto.Message): r"""A description of the context in which an error occurred. This data should be provided by the application when reporting an error, unless the error report has been generated automatically from Google App Engine logs. Attributes: http_request (google.cloud.errorreporting_v1beta1.types.HttpRequestContext): The HTTP request which was processed when the error was triggered. user (str): The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. See ``affected_users_count`` in ``ErrorGroupStats``. report_location (google.cloud.errorreporting_v1beta1.types.SourceLocation): The location in the source code where the decision was made to report the error, usually the place where it was logged. For a logged exception this would be the source line where the exception is logged, usually close to the place where it was caught. """ http_request = proto.Field(proto.MESSAGE, number=1, message="HttpRequestContext",) user = proto.Field(proto.STRING, number=2,) report_location = proto.Field(proto.MESSAGE, number=3, message="SourceLocation",) class HttpRequestContext(proto.Message): r"""HTTP request data that is related to a reported error. This data should be provided by the application when reporting an error, unless the error report has been generated automatically from Google App Engine logs. Attributes: method (str): The type of HTTP request, such as ``GET``, ``POST``, etc. url (str): The URL of the request. user_agent (str): The user agent information that is provided with the request. referrer (str): The referrer information that is provided with the request. response_status_code (int): The HTTP response status code for the request. remote_ip (str): The IP address from which the request originated. This can be IPv4, IPv6, or a token which is derived from the IP address, depending on the data that has been provided in the error report. """ method = proto.Field(proto.STRING, number=1,) url = proto.Field(proto.STRING, number=2,) user_agent = proto.Field(proto.STRING, number=3,) referrer = proto.Field(proto.STRING, number=4,) response_status_code = proto.Field(proto.INT32, number=5,) remote_ip = proto.Field(proto.STRING, number=6,) class SourceLocation(proto.Message): r"""Indicates a location in the source code of the service for which errors are reported. ``functionName`` must be provided by the application when reporting an error, unless the error report contains a ``message`` with a supported exception stack trace. All fields are optional for the later case. Attributes: file_path (str): The source code filename, which can include a truncated relative path, or a full path from a production machine. line_number (int): 1-based. 0 indicates that the line number is unknown. function_name (str): Human-readable name of a function or method. The value can include optional context like the class or package name. For example, ``my.package.MyClass.method`` in case of Java. """ file_path = proto.Field(proto.STRING, number=1,) line_number = proto.Field(proto.INT32, number=2,) function_name = proto.Field(proto.STRING, number=4,) __all__ = tuple(sorted(__protobuf__.manifest))
googleapis/python-error-reporting
google/cloud/errorreporting_v1beta1/types/common.py
Python
apache-2.0
9,405
"""Lists clusters.""" from baseCmd import * from baseResponse import * class listClustersCmd (baseCmd): typeInfo = {} def __init__(self): self.isAsync = "false" """lists clusters by allocation state""" self.allocationstate = None self.typeInfo['allocationstate'] = 'string' """lists clusters by cluster type""" self.clustertype = None self.typeInfo['clustertype'] = 'string' """lists clusters by hypervisor type""" self.hypervisor = None self.typeInfo['hypervisor'] = 'string' """lists clusters by the cluster ID""" self.id = None self.typeInfo['id'] = 'uuid' """List by keyword""" self.keyword = None self.typeInfo['keyword'] = 'string' """whether this cluster is managed by cloudstack""" self.managedstate = None self.typeInfo['managedstate'] = 'string' """lists clusters by the cluster name""" self.name = None self.typeInfo['name'] = 'string' """""" self.page = None self.typeInfo['page'] = 'integer' """""" self.pagesize = None self.typeInfo['pagesize'] = 'integer' """lists clusters by Pod ID""" self.podid = None self.typeInfo['podid'] = 'uuid' """flag to display the capacity of the clusters""" self.showcapacities = None self.typeInfo['showcapacities'] = 'boolean' """lists clusters by Zone ID""" self.zoneid = None self.typeInfo['zoneid'] = 'uuid' self.required = [] class listClustersResponse (baseResponse): typeInfo = {} def __init__(self): """the cluster ID""" self.id = None self.typeInfo['id'] = 'string' """the allocation state of the cluster""" self.allocationstate = None self.typeInfo['allocationstate'] = 'string' """the type of the cluster""" self.clustertype = None self.typeInfo['clustertype'] = 'string' """The cpu overcommit ratio of the cluster""" self.cpuovercommitratio = None self.typeInfo['cpuovercommitratio'] = 'string' """the hypervisor type of the cluster""" self.hypervisortype = None self.typeInfo['hypervisortype'] = 'string' """whether this cluster is managed by cloudstack""" self.managedstate = None self.typeInfo['managedstate'] = 'string' """The memory overcommit ratio of the cluster""" self.memoryovercommitratio = None self.typeInfo['memoryovercommitratio'] = 'string' """the cluster name""" self.name = None self.typeInfo['name'] = 'string' """the Pod ID of the cluster""" self.podid = None self.typeInfo['podid'] = 'string' """the Pod name of the cluster""" self.podname = None self.typeInfo['podname'] = 'string' """the Zone ID of the cluster""" self.zoneid = None self.typeInfo['zoneid'] = 'string' """the Zone name of the cluster""" self.zonename = None self.typeInfo['zonename'] = 'string' """the capacity of the Cluster""" self.capacity = [] class capacity: def __init__(self): """"the total capacity available""" self.capacitytotal = None """"the capacity currently in use""" self.capacityused = None """"the Cluster ID""" self.clusterid = None """"the Cluster name""" self.clustername = None """"the percentage of capacity currently in use""" self.percentused = None """"the Pod ID""" self.podid = None """"the Pod name""" self.podname = None """"the capacity type""" self.type = None """"the Zone ID""" self.zoneid = None """"the Zone name""" self.zonename = None
MissionCriticalCloud/marvin
marvin/cloudstackAPI/listClusters.py
Python
apache-2.0
3,916
# -*- coding: utf-8 -*- # Minio Python Library for Amazon S3 compatible cloud storage, (C) 2015 Minio, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys try: from urllib.parse import urlparse as compat_urllib_parse except ImportError: # python 2 from urlparse import urlparse as compat_urllib_parse strtype = None if sys.version_info < (3, 0): strtype = basestring else: strtype = str
krishnasrinivas/minio-py
tests/unit/compat.py
Python
apache-2.0
917
from azure.servicebus import ServiceBusService, Message, Queue class azure_service_bus_listener(object): def __init__(self, azure_settings): self.bus_service = ServiceBusService( service_namespace= azure_settings['name_space'], shared_access_key_name = azure_settings['key_name'], shared_access_key_value = azure_settings['key_value']) self.queue_name = azure_settings['queue_name'] def wait_for_message(self, on_receive_target, on_timeout_target): # just in case it isn't there self.create_queue() message = self.bus_service.receive_queue_message(self.queue_name, peek_lock=False) if (message.body == None): print("[ASB_Listener]: No Message Received") on_timeout_target() else: message_string = message.body.decode('utf-8') on_receive_target(message_string) def create_queue(self): q_opt = Queue() q_opt.max_size_in_megabytes = '1024' q_opt.default_message_time_to_live = 'PT1M' self.bus_service.create_queue(self.queue_name, q_opt)
rumdood/martinique
src/PiDaemon/azure_service_bus_listener.py
Python
apache-2.0
1,135
import os import logging import time from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.backends import default_backend from googleapiclient import discovery, errors logger = logging.getLogger(__name__) crm = discovery.build("cloudresourcemanager", "v1") iam = discovery.build("iam", "v1") compute = discovery.build("compute", "v1") VERSION = "v1" RAY = "ray-autoscaler" DEFAULT_SERVICE_ACCOUNT_ID = RAY + "-sa-" + VERSION SERVICE_ACCOUNT_EMAIL_TEMPLATE = ( "{account_id}@{project_id}.iam.gserviceaccount.com") DEFAULT_SERVICE_ACCOUNT_CONFIG = { "displayName": "Ray Autoscaler Service Account ({})".format(VERSION), } DEFAULT_SERVICE_ACCOUNT_ROLES = ("roles/storage.objectAdmin", "roles/compute.admin") MAX_POLLS = 12 POLL_INTERVAL = 5 def wait_for_crm_operation(operation): """Poll for cloud resource manager operation until finished.""" logger.info("wait_for_crm_operation: " "Waiting for operation {} to finish...".format(operation)) for _ in range(MAX_POLLS): result = crm.operations().get(name=operation["name"]).execute() if "error" in result: raise Exception(result["error"]) if "done" in result and result["done"]: logger.info("wait_for_crm_operation: Operation done.") break time.sleep(POLL_INTERVAL) return result def wait_for_compute_global_operation(project_name, operation): """Poll for global compute operation until finished.""" logger.info("wait_for_compute_global_operation: " "Waiting for operation {} to finish...".format( operation["name"])) for _ in range(MAX_POLLS): result = compute.globalOperations().get( project=project_name, operation=operation["name"], ).execute() if "error" in result: raise Exception(result["error"]) if result["status"] == "DONE": logger.info("wait_for_compute_global_operation: " "Operation done.") break time.sleep(POLL_INTERVAL) return result def key_pair_name(i, region, project_id, ssh_user): """Returns the ith default gcp_key_pair_name.""" key_name = "{}_gcp_{}_{}_{}".format(RAY, region, project_id, ssh_user, i) return key_name def key_pair_paths(key_name): """Returns public and private key paths for a given key_name.""" public_key_path = os.path.expanduser("~/.ssh/{}.pub".format(key_name)) private_key_path = os.path.expanduser("~/.ssh/{}.pem".format(key_name)) return public_key_path, private_key_path def generate_rsa_key_pair(): """Create public and private ssh-keys.""" key = rsa.generate_private_key( backend=default_backend(), public_exponent=65537, key_size=2048) public_key = key.public_key().public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH).decode("utf-8") pem = key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()).decode("utf-8") return public_key, pem def bootstrap_gcp(config): config = _configure_project(config) config = _configure_iam_role(config) config = _configure_key_pair(config) config = _configure_subnet(config) return config def _configure_project(config): """Setup a Google Cloud Platform Project. Google Compute Platform organizes all the resources, such as storage buckets, users, and instances under projects. This is different from aws ec2 where everything is global. """ project_id = config["provider"].get("project_id") assert config["provider"]["project_id"] is not None, ( "'project_id' must be set in the 'provider' section of the autoscaler" " config. Notice that the project id must be globally unique.") project = _get_project(project_id) if project is None: # Project not found, try creating it _create_project(project_id) project = _get_project(project_id) assert project is not None, "Failed to create project" assert project["lifecycleState"] == "ACTIVE", ( "Project status needs to be ACTIVE, got {}".format( project["lifecycleState"])) config["provider"]["project_id"] = project["projectId"] return config def _configure_iam_role(config): """Setup a gcp service account with IAM roles. Creates a gcp service acconut and binds IAM roles which allow it to control control storage/compute services. Specifically, the head node needs to have an IAM role that allows it to create further gce instances and store items in google cloud storage. TODO: Allow the name/id of the service account to be configured """ email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format( account_id=DEFAULT_SERVICE_ACCOUNT_ID, project_id=config["provider"]["project_id"]) service_account = _get_service_account(email, config) if service_account is None: logger.info("_configure_iam_role: " "Creating new service account {}".format( DEFAULT_SERVICE_ACCOUNT_ID)) service_account = _create_service_account( DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config) assert service_account is not None, "Failed to create service account" _add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES) config["head_node"]["serviceAccounts"] = [{ "email": service_account["email"], # NOTE: The amount of access is determined by the scope + IAM # role of the service account. Even if the cloud-platform scope # gives (scope) access to the whole cloud-platform, the service # account is limited by the IAM rights specified below. "scopes": ["https://www.googleapis.com/auth/cloud-platform"] }] return config def _configure_key_pair(config): """Configure SSH access, using an existing key pair if possible. Creates a project-wide ssh key that can be used to access all the instances unless explicitly prohibited by instance config. The ssh-keys created by ray are of format: [USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME] where: [USERNAME] is the user for the SSH key, specified in the config. [KEY_VALUE] is the public SSH key value. """ if "ssh_private_key" in config["auth"]: return config ssh_user = config["auth"]["ssh_user"] project = compute.projects().get( project=config["provider"]["project_id"]).execute() # Key pairs associated with project meta data. The key pairs are general, # and not just ssh keys. ssh_keys_str = next( (item for item in project["commonInstanceMetadata"].get("items", []) if item["key"] == "ssh-keys"), {}).get("value", "") ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else [] # Try a few times to get or create a good key pair. key_found = False for i in range(10): key_name = key_pair_name(i, config["provider"]["region"], config["provider"]["project_id"], ssh_user) public_key_path, private_key_path = key_pair_paths(key_name) for ssh_key in ssh_keys: key_parts = ssh_key.split(" ") if len(key_parts) != 3: continue if key_parts[2] == ssh_user and os.path.exists(private_key_path): # Found a key key_found = True break # Create a key since it doesn't exist locally or in GCP if not key_found and not os.path.exists(private_key_path): logger.info("_configure_key_pair: " "Creating new key pair {}".format(key_name)) public_key, private_key = generate_rsa_key_pair() _create_project_ssh_key_pair(project, public_key, ssh_user) with open(private_key_path, "w") as f: f.write(private_key) os.chmod(private_key_path, 0o600) with open(public_key_path, "w") as f: f.write(public_key) key_found = True break if key_found: break assert key_found, "SSH keypair for user {} not found for {}".format( ssh_user, private_key_path) assert os.path.exists(private_key_path), ( "Private key file {} not found for user {}" "".format(private_key_path, ssh_user)) logger.info("_configure_key_pair: " "Private key not specified in config, using" "{}".format(private_key_path)) config["auth"]["ssh_private_key"] = private_key_path return config def _configure_subnet(config): """Pick a reasonable subnet if not specified by the config.""" # Rationale: avoid subnet lookup if the network is already # completely manually configured if ("networkInterfaces" in config["head_node"] and "networkInterfaces" in config["worker_nodes"]): return config subnets = _list_subnets(config) if not subnets: raise NotImplementedError("Should be able to create subnet.") # TODO: make sure that we have usable subnet. Maybe call # compute.subnetworks().listUsable? For some reason it didn't # work out-of-the-box default_subnet = subnets[0] if "networkInterfaces" not in config["head_node"]: config["head_node"]["networkInterfaces"] = [{ "subnetwork": default_subnet["selfLink"], "accessConfigs": [{ "name": "External NAT", "type": "ONE_TO_ONE_NAT", }], }] if "networkInterfaces" not in config["worker_nodes"]: config["worker_nodes"]["networkInterfaces"] = [{ "subnetwork": default_subnet["selfLink"], "accessConfigs": [{ "name": "External NAT", "type": "ONE_TO_ONE_NAT", }], }] return config def _list_subnets(config): response = compute.subnetworks().list( project=config["provider"]["project_id"], region=config["provider"]["region"]).execute() return response["items"] def _get_subnet(config, subnet_id): subnet = compute.subnetworks().get( project=config["provider"]["project_id"], region=config["provider"]["region"], subnetwork=subnet_id, ).execute() return subnet def _get_project(project_id): try: project = crm.projects().get(projectId=project_id).execute() except errors.HttpError as e: if e.resp.status != 403: raise project = None return project def _create_project(project_id): operation = crm.projects().create(body={ "projectId": project_id, "name": project_id }).execute() result = wait_for_crm_operation(operation) return result def _get_service_account(account, config): project_id = config["provider"]["project_id"] full_name = ("projects/{project_id}/serviceAccounts/{account}" "".format(project_id=project_id, account=account)) try: service_account = iam.projects().serviceAccounts().get( name=full_name).execute() except errors.HttpError as e: if e.resp.status != 404: raise service_account = None return service_account def _create_service_account(account_id, account_config, config): project_id = config["provider"]["project_id"] service_account = iam.projects().serviceAccounts().create( name="projects/{project_id}".format(project_id=project_id), body={ "accountId": account_id, "serviceAccount": account_config, }).execute() return service_account def _add_iam_policy_binding(service_account, roles): """Add new IAM roles for the service account.""" project_id = service_account["projectId"] email = service_account["email"] member_id = "serviceAccount:" + email policy = crm.projects().getIamPolicy( resource=project_id, body={}).execute() already_configured = True for role in roles: role_exists = False for binding in policy["bindings"]: if binding["role"] == role: if member_id not in binding["members"]: binding["members"].append(member_id) already_configured = False role_exists = True if not role_exists: already_configured = False policy["bindings"].append({ "members": [member_id], "role": role, }) if already_configured: # In some managed environments, an admin needs to grant the # roles, so only call setIamPolicy if needed. return result = crm.projects().setIamPolicy( resource=project_id, body={ "policy": policy, }).execute() return result def _create_project_ssh_key_pair(project, public_key, ssh_user): """Inserts an ssh-key into project commonInstanceMetadata""" key_parts = public_key.split(" ") # Sanity checks to make sure that the generated key matches expectation assert len(key_parts) == 2, key_parts assert key_parts[0] == "ssh-rsa", key_parts new_ssh_meta = "{ssh_user}:ssh-rsa {key_value} {ssh_user}".format( ssh_user=ssh_user, key_value=key_parts[1]) common_instance_metadata = project["commonInstanceMetadata"] items = common_instance_metadata.get("items", []) ssh_keys_i = next( (i for i, item in enumerate(items) if item["key"] == "ssh-keys"), None) if ssh_keys_i is None: items.append({"key": "ssh-keys", "value": new_ssh_meta}) else: ssh_keys = items[ssh_keys_i] ssh_keys["value"] += "\n" + new_ssh_meta items[ssh_keys_i] = ssh_keys common_instance_metadata["items"] = items operation = compute.projects().setCommonInstanceMetadata( project=project["name"], body=common_instance_metadata).execute() response = wait_for_compute_global_operation(project["name"], operation) return response
stephanie-wang/ray
python/ray/autoscaler/gcp/config.py
Python
apache-2.0
14,384
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class V1ServiceAccount(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ V1ServiceAccount - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'kind': 'str', 'api_version': 'str', 'metadata': 'V1ObjectMeta', 'secrets': 'list[V1ObjectReference]', 'image_pull_secrets': 'list[V1LocalObjectReference]' } self.attribute_map = { 'kind': 'kind', 'api_version': 'apiVersion', 'metadata': 'metadata', 'secrets': 'secrets', 'image_pull_secrets': 'imagePullSecrets' } self._kind = None self._api_version = None self._metadata = None self._secrets = None self._image_pull_secrets = None @property def kind(self): """ Gets the kind of this V1ServiceAccount. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds :return: The kind of this V1ServiceAccount. :rtype: str """ return self._kind @kind.setter def kind(self, kind): """ Sets the kind of this V1ServiceAccount. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds :param kind: The kind of this V1ServiceAccount. :type: str """ self._kind = kind @property def api_version(self): """ Gets the api_version of this V1ServiceAccount. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources :return: The api_version of this V1ServiceAccount. :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """ Sets the api_version of this V1ServiceAccount. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources :param api_version: The api_version of this V1ServiceAccount. :type: str """ self._api_version = api_version @property def metadata(self): """ Gets the metadata of this V1ServiceAccount. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :return: The metadata of this V1ServiceAccount. :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """ Sets the metadata of this V1ServiceAccount. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :param metadata: The metadata of this V1ServiceAccount. :type: V1ObjectMeta """ self._metadata = metadata @property def secrets(self): """ Gets the secrets of this V1ServiceAccount. Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md :return: The secrets of this V1ServiceAccount. :rtype: list[V1ObjectReference] """ return self._secrets @secrets.setter def secrets(self, secrets): """ Sets the secrets of this V1ServiceAccount. Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md :param secrets: The secrets of this V1ServiceAccount. :type: list[V1ObjectReference] """ self._secrets = secrets @property def image_pull_secrets(self): """ Gets the image_pull_secrets of this V1ServiceAccount. ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret :return: The image_pull_secrets of this V1ServiceAccount. :rtype: list[V1LocalObjectReference] """ return self._image_pull_secrets @image_pull_secrets.setter def image_pull_secrets(self, image_pull_secrets): """ Sets the image_pull_secrets of this V1ServiceAccount. ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret :param image_pull_secrets: The image_pull_secrets of this V1ServiceAccount. :type: list[V1LocalObjectReference] """ self._image_pull_secrets = image_pull_secrets def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
danielfrg/jupyterhub-kubernetes_spawner
kubernetes_spawner/swagger_client/models/v1_service_account.py
Python
apache-2.0
8,051
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright 2013 Canonical Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import contextlib import mock from oslo_utils import uuidutils from oslo_vmware import exceptions as vexc from oslo_vmware.objects import datastore as ds_obj from oslo_vmware import pbm from nova import exception from nova.network import model as network_model from nova import test from nova.tests.unit import fake_instance from nova.tests.unit.virt.vmwareapi import fake from nova.tests.unit.virt.vmwareapi import stubs from nova.virt.vmwareapi import constants from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import vm_util class partialObject(object): def __init__(self, path='fake-path'): self.path = path self.fault = fake.DataObject() class VMwareVMUtilTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVMUtilTestCase, self).setUp() fake.reset() stubs.set_stubs(self.stubs) vm_util.vm_refs_cache_reset() self._instance = fake_instance.fake_instance_obj( None, **{'id': 7, 'name': 'fake!', 'uuid': uuidutils.generate_uuid(), 'vcpus': 2, 'memory_mb': 2048}) def _test_get_stats_from_cluster(self, connection_state="connected", maintenance_mode=False): ManagedObjectRefs = [fake.ManagedObjectReference("host1", "HostSystem"), fake.ManagedObjectReference("host2", "HostSystem")] hosts = fake._convert_to_array_of_mor(ManagedObjectRefs) respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool") prop_dict = {'host': hosts, 'resourcePool': respool} hardware = fake.DataObject() hardware.numCpuCores = 8 hardware.numCpuThreads = 16 hardware.vendor = "Intel" hardware.cpuModel = "Intel(R) Xeon(R)" runtime_host_1 = fake.DataObject() runtime_host_1.connectionState = "connected" runtime_host_1.inMaintenanceMode = False runtime_host_2 = fake.DataObject() runtime_host_2.connectionState = connection_state runtime_host_2.inMaintenanceMode = maintenance_mode prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware), fake.Prop(name="runtime_summary", val=runtime_host_1)] prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware), fake.Prop(name="runtime_summary", val=runtime_host_2)] fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.ObjectContent("prop_list_host1", prop_list_host_1)) fake_objects.add_object(fake.ObjectContent("prop_list_host1", prop_list_host_2)) respool_resource_usage = fake.DataObject() respool_resource_usage.maxUsage = 5368709120 respool_resource_usage.overallUsage = 2147483648 def fake_call_method(*args): if "get_dynamic_properties" in args: return prop_dict elif "get_properties_for_a_collection_of_objects" in args: return fake_objects else: return respool_resource_usage session = fake.FakeSession() with mock.patch.object(session, '_call_method', fake_call_method): result = vm_util.get_stats_from_cluster(session, "cluster1") mem_info = {} if connection_state == "connected" and not maintenance_mode: vcpus = 32 else: vcpus = 16 mem_info['total'] = 5120 mem_info['free'] = 3072 expected_stats = {'vcpus': vcpus, 'mem': mem_info} self.assertEqual(expected_stats, result) def test_get_stats_from_cluster_hosts_connected_and_active(self): self._test_get_stats_from_cluster() def test_get_stats_from_cluster_hosts_disconnected_and_active(self): self._test_get_stats_from_cluster(connection_state="disconnected") def test_get_stats_from_cluster_hosts_connected_and_maintenance(self): self._test_get_stats_from_cluster(maintenance_mode=True) def test_get_host_ref_no_hosts_in_cluster(self): self.assertRaises(exception.NoValidHost, vm_util.get_host_ref, fake.FakeObjectRetrievalSession(""), 'fake_cluster') def test_get_resize_spec(self): vcpus = 2 memory_mb = 2048 extra_specs = vm_util.ExtraSpecs() fake_factory = fake.FakeFactory() result = vm_util.get_vm_resize_spec(fake_factory, vcpus, memory_mb, extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.memoryMB = memory_mb expected.numCPUs = vcpus cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo') cpuAllocation.reservation = 0 cpuAllocation.limit = -1 cpuAllocation.shares = fake_factory.create('ns0:SharesInfo') cpuAllocation.shares.level = 'normal' cpuAllocation.shares.shares = 0 expected.cpuAllocation = cpuAllocation self.assertEqual(expected, result) def test_get_resize_spec_with_limits(self): vcpus = 2 memory_mb = 2048 cpu_limits = vm_util.CpuLimits(cpu_limit=7, cpu_reservation=6) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_resize_spec(fake_factory, vcpus, memory_mb, extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.memoryMB = memory_mb expected.numCPUs = vcpus cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo') cpuAllocation.reservation = 6 cpuAllocation.limit = 7 cpuAllocation.shares = fake_factory.create('ns0:SharesInfo') cpuAllocation.shares.level = 'normal' cpuAllocation.shares.shares = 0 expected.cpuAllocation = cpuAllocation self.assertEqual(expected, result) def test_get_cdrom_attach_config_spec(self): fake_factory = fake.FakeFactory() datastore = fake.Datastore() result = vm_util.get_cdrom_attach_config_spec(fake_factory, datastore, "/tmp/foo.iso", 200, 0) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = 'add' device_change.device = fake_factory.create('ns0:VirtualCdrom') device_change.device.controllerKey = 200 device_change.device.unitNumber = 0 device_change.device.key = -1 connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo') connectable.allowGuestControl = False connectable.startConnected = True connectable.connected = True device_change.device.connectable = connectable backing = fake_factory.create('ns0:VirtualCdromIsoBackingInfo') backing.fileName = '/tmp/foo.iso' backing.datastore = datastore device_change.device.backing = backing expected.deviceChange.append(device_change) self.assertEqual(expected, result) def test_lsilogic_controller_spec(self): # Test controller spec returned for lsiLogic sas adapter type config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101, adapter_type="lsiLogicsas") self.assertEqual("ns0:VirtualLsiLogicSASController", config_spec.device.obj_name) def test_paravirtual_controller_spec(self): # Test controller spec returned for paraVirtual adapter type config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101, adapter_type="paraVirtual") self.assertEqual("ns0:ParaVirtualSCSIController", config_spec.device.obj_name) def _vmdk_path_and_adapter_type_devices(self, filename, parent=None): # Test the adapter_type returned for a lsiLogic sas controller controller_key = 1000 disk = fake.VirtualDisk() disk.controllerKey = controller_key disk_backing = fake.VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = filename disk.capacityInBytes = 1024 if parent: disk_backing.parent = parent disk.backing = disk_backing # Ephemeral disk e_disk = fake.VirtualDisk() e_disk.controllerKey = controller_key disk_backing = fake.VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = '[test_datastore] uuid/ephemeral_0.vmdk' e_disk.capacityInBytes = 512 e_disk.backing = disk_backing controller = fake.VirtualLsiLogicSASController() controller.key = controller_key devices = [disk, e_disk, controller] return devices def test_get_vmdk_path_and_adapter_type(self): filename = '[test_datastore] uuid/uuid.vmdk' devices = self._vmdk_path_and_adapter_type_devices(filename) session = fake.FakeSession() with mock.patch.object(session, '_call_method', return_value=devices): vmdk = vm_util.get_vmdk_info(session, None) self.assertEqual('lsiLogicsas', vmdk.adapter_type) self.assertEqual('[test_datastore] uuid/ephemeral_0.vmdk', vmdk.path) self.assertEqual(512, vmdk.capacity_in_bytes) self.assertEqual(devices[1], vmdk.device) def test_get_vmdk_path_and_adapter_type_with_match(self): n_filename = '[test_datastore] uuid/uuid.vmdk' devices = self._vmdk_path_and_adapter_type_devices(n_filename) session = fake.FakeSession() with mock.patch.object(session, '_call_method', return_value=devices): vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid') self.assertEqual('lsiLogicsas', vmdk.adapter_type) self.assertEqual(n_filename, vmdk.path) self.assertEqual(1024, vmdk.capacity_in_bytes) self.assertEqual(devices[0], vmdk.device) def test_get_vmdk_path_and_adapter_type_with_nomatch(self): n_filename = '[test_datastore] diuu/diuu.vmdk' session = fake.FakeSession() devices = self._vmdk_path_and_adapter_type_devices(n_filename) with mock.patch.object(session, '_call_method', return_value=devices): vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid') self.assertIsNone(vmdk.adapter_type) self.assertIsNone(vmdk.path) self.assertEqual(0, vmdk.capacity_in_bytes) self.assertIsNone(vmdk.device) def test_get_vmdk_adapter_type(self): # Test for the adapter_type to be used in vmdk descriptor # Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic # and ParaVirtual vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic") self.assertEqual("lsiLogic", vmdk_adapter_type) vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas") self.assertEqual("lsiLogic", vmdk_adapter_type) vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual") self.assertEqual("lsiLogic", vmdk_adapter_type) vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter") self.assertEqual("dummyAdapter", vmdk_adapter_type) def test_get_scsi_adapter_type(self): vm = fake.VirtualMachine() devices = vm.get("config.hardware.device").VirtualDevice scsi_controller = fake.VirtualLsiLogicController() ide_controller = fake.VirtualIDEController() devices.append(scsi_controller) devices.append(ide_controller) fake._update_object("VirtualMachine", vm) # return the scsi type, not ide hardware_device = vm.get("config.hardware.device") self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vm_util.get_scsi_adapter_type(hardware_device)) def test_get_scsi_adapter_type_with_error(self): vm = fake.VirtualMachine() devices = vm.get("config.hardware.device").VirtualDevice scsi_controller = fake.VirtualLsiLogicController() ide_controller = fake.VirtualIDEController() devices.append(scsi_controller) devices.append(ide_controller) fake._update_object("VirtualMachine", vm) # the controller is not suitable since the device under this controller # has exceeded SCSI_MAX_CONNECT_NUMBER for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER): scsi_controller.device.append('device' + str(i)) hardware_device = vm.get("config.hardware.device") self.assertRaises(exception.StorageError, vm_util.get_scsi_adapter_type, hardware_device) def test_find_allocated_slots(self): disk1 = fake.VirtualDisk(200, 0) disk2 = fake.VirtualDisk(200, 1) disk3 = fake.VirtualDisk(201, 1) ide0 = fake.VirtualIDEController(200) ide1 = fake.VirtualIDEController(201) scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7) devices = [disk1, disk2, disk3, ide0, ide1, scsi0] taken = vm_util._find_allocated_slots(devices) self.assertEqual([0, 1], sorted(taken[200])) self.assertEqual([1], taken[201]) self.assertEqual([7], taken[1000]) def test_allocate_controller_key_and_unit_number_ide_default(self): # Test that default IDE controllers are used when there is a free slot # on them disk1 = fake.VirtualDisk(200, 0) disk2 = fake.VirtualDisk(200, 1) ide0 = fake.VirtualIDEController(200) ide1 = fake.VirtualIDEController(201) devices = [disk1, disk2, ide0, ide1] (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( None, devices, 'ide') self.assertEqual(201, controller_key) self.assertEqual(0, unit_number) self.assertIsNone(controller_spec) def test_allocate_controller_key_and_unit_number_ide(self): # Test that a new controller is created when there is no free slot on # the default IDE controllers ide0 = fake.VirtualIDEController(200) ide1 = fake.VirtualIDEController(201) devices = [ide0, ide1] for controller_key in [200, 201]: for unit_number in [0, 1]: disk = fake.VirtualDisk(controller_key, unit_number) devices.append(disk) factory = fake.FakeFactory() (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( factory, devices, 'ide') self.assertEqual(-101, controller_key) self.assertEqual(0, unit_number) self.assertIsNotNone(controller_spec) def test_allocate_controller_key_and_unit_number_scsi(self): # Test that we allocate on existing SCSI controller if there is a free # slot on it devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)] for unit_number in range(7): disk = fake.VirtualDisk(1000, unit_number) devices.append(disk) factory = fake.FakeFactory() (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( factory, devices, 'lsiLogic') self.assertEqual(1000, controller_key) self.assertEqual(8, unit_number) self.assertIsNone(controller_spec) def test_get_vnc_config_spec(self): fake_factory = fake.FakeFactory() result = vm_util.get_vnc_config_spec(fake_factory, 7) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] remote_display_vnc_enabled = fake_factory.create('ns0:OptionValue') remote_display_vnc_enabled.value = 'true' remote_display_vnc_enabled.key = 'RemoteDisplay.vnc.enabled' expected.extraConfig.append(remote_display_vnc_enabled) remote_display_vnc_port = fake_factory.create('ns0:OptionValue') remote_display_vnc_port.value = 7 remote_display_vnc_port.key = 'RemoteDisplay.vnc.port' expected.extraConfig.append(remote_display_vnc_port) remote_display_vnc_keymap = fake_factory.create('ns0:OptionValue') remote_display_vnc_keymap.value = 'en-us' remote_display_vnc_keymap.key = 'RemoteDisplay.vnc.keyMap' expected.extraConfig.append(remote_display_vnc_keymap) self.assertEqual(expected, result) def _create_fake_vms(self): fake_vms = fake.FakeRetrieveResult() OptionValue = collections.namedtuple('OptionValue', ['key', 'value']) for i in range(10): vm = fake.ManagedObject() opt_val = OptionValue(key='', value=5900 + i) vm.set(vm_util.VNC_CONFIG_KEY, opt_val) fake_vms.add_object(vm) return fake_vms def test_get_vnc_port(self): fake_vms = self._create_fake_vms() self.flags(vnc_port=5900, group='vmware') self.flags(vnc_port_total=10000, group='vmware') actual = vm_util.get_vnc_port( fake.FakeObjectRetrievalSession(fake_vms)) self.assertEqual(actual, 5910) def test_get_vnc_port_exhausted(self): fake_vms = self._create_fake_vms() self.flags(vnc_port=5900, group='vmware') self.flags(vnc_port_total=10, group='vmware') self.assertRaises(exception.ConsolePortRangeExhausted, vm_util.get_vnc_port, fake.FakeObjectRetrievalSession(fake_vms)) def test_get_all_cluster_refs_by_name_none(self): fake_objects = fake.FakeRetrieveResult() refs = vm_util.get_all_cluster_refs_by_name( fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster']) self.assertEqual({}, refs) def test_get_all_cluster_refs_by_name_exists(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.ClusterComputeResource(name='cluster')) refs = vm_util.get_all_cluster_refs_by_name( fake.FakeObjectRetrievalSession(fake_objects), ['cluster']) self.assertEqual(1, len(refs)) def test_get_all_cluster_refs_by_name_missing(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(partialObject(path='cluster')) refs = vm_util.get_all_cluster_refs_by_name( fake.FakeObjectRetrievalSession(fake_objects), ['cluster']) self.assertEqual({}, refs) def test_propset_dict_simple(self): ObjectContent = collections.namedtuple('ObjectContent', ['propSet']) DynamicProperty = collections.namedtuple('Property', ['name', 'val']) object = ObjectContent(propSet=[ DynamicProperty(name='foo', val="bar")]) propdict = vm_util.propset_dict(object.propSet) self.assertEqual("bar", propdict['foo']) def test_propset_dict_complex(self): ObjectContent = collections.namedtuple('ObjectContent', ['propSet']) DynamicProperty = collections.namedtuple('Property', ['name', 'val']) MoRef = collections.namedtuple('Val', ['value']) object = ObjectContent(propSet=[ DynamicProperty(name='foo', val="bar"), DynamicProperty(name='some.thing', val=MoRef(value='else')), DynamicProperty(name='another.thing', val='value')]) propdict = vm_util.propset_dict(object.propSet) self.assertEqual("bar", propdict['foo']) self.assertTrue(hasattr(propdict['some.thing'], 'value')) self.assertEqual("else", propdict['some.thing'].value) self.assertEqual("value", propdict['another.thing']) def _test_detach_virtual_disk_spec(self, destroy_disk=False): virtual_device_config = vm_util.detach_virtual_disk_spec( fake.FakeFactory(), 'fake_device', destroy_disk) self.assertEqual('remove', virtual_device_config.operation) self.assertEqual('fake_device', virtual_device_config.device) self.assertEqual('ns0:VirtualDeviceConfigSpec', virtual_device_config.obj_name) if destroy_disk: self.assertEqual('destroy', virtual_device_config.fileOperation) else: self.assertFalse(hasattr(virtual_device_config, 'fileOperation')) def test_detach_virtual_disk_spec(self): self._test_detach_virtual_disk_spec(destroy_disk=False) def test_detach_virtual_disk_destroy_spec(self): self._test_detach_virtual_disk_spec(destroy_disk=True) def test_get_vm_create_spec(self): extra_specs = vm_util.ExtraSpecs() fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.name = self._instance.uuid expected.instanceUuid = self._instance.uuid expected.deviceChange = [] expected.numCPUs = 2 expected.version = None expected.memoryMB = 2048 expected.guestId = 'otherGuest' expected.extraConfig = [] extra_config = fake_factory.create("ns0:OptionValue") extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' expected.extraConfig.append(extra_config) expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True self.assertEqual(expected, result) def test_get_vm_create_spec_with_allocations(self): cpu_limits = vm_util.CpuLimits(cpu_limit=7, cpu_reservation=6) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] expected.guestId = 'otherGuest' expected.instanceUuid = self._instance.uuid expected.memoryMB = self._instance.memory_mb expected.name = self._instance.uuid expected.numCPUs = self._instance.vcpus expected.version = None expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.limit = 7 cpu_allocation.reservation = 6 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'normal' cpu_allocation.shares.shares = 0 expected.cpuAllocation = cpu_allocation expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.key = 'nvp.vm-uuid' extra_config.value = self._instance.uuid expected.extraConfig.append(extra_config) self.assertEqual(expected, result) def test_get_vm_create_spec_with_limit(self): cpu_limits = vm_util.CpuLimits(cpu_limit=7) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.instanceUuid = self._instance.uuid expected.name = self._instance.uuid expected.deviceChange = [] expected.extraConfig = [] extra_config = fake_factory.create("ns0:OptionValue") extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' expected.extraConfig.append(extra_config) expected.memoryMB = 2048 expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.version = None expected.guestId = 'otherGuest' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.afterPowerOn = True expected.tools.afterResume = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.beforeGuestStandby = True cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.limit = 7 cpu_allocation.reservation = 0 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'normal' cpu_allocation.shares.shares = 0 expected.cpuAllocation = cpu_allocation expected.numCPUs = 2 self.assertEqual(expected, result) def test_get_vm_create_spec_with_share(self): cpu_limits = vm_util.CpuLimits(cpu_shares_level='high') extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.instanceUuid = self._instance.uuid expected.name = self._instance.uuid expected.deviceChange = [] expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = self._instance.uuid extra_config.key = 'nvp.vm-uuid' expected.extraConfig.append(extra_config) expected.memoryMB = 2048 expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.type = 'instance' expected.managedBy.extensionKey = 'org.openstack.compute' expected.version = None expected.guestId = 'otherGuest' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.beforeGuestStandby = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.afterResume = True expected.tools.afterPowerOn = True cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.reservation = 0 cpu_allocation.limit = -1 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'high' cpu_allocation.shares.shares = 0 expected.cpuAllocation = cpu_allocation expected.numCPUs = 2 self.assertEqual(expected, result) def test_get_vm_create_spec_with_share_custom(self): cpu_limits = vm_util.CpuLimits(cpu_shares_level='custom', cpu_shares_share=1948) extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits) fake_factory = fake.FakeFactory() result = vm_util.get_vm_create_spec(fake_factory, self._instance, 'fake-datastore', [], extra_specs) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.files = fake_factory.create('ns0:VirtualMachineFileInfo') expected.files.vmPathName = '[fake-datastore]' expected.instanceUuid = self._instance.uuid expected.name = self._instance.uuid expected.deviceChange = [] expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.key = 'nvp.vm-uuid' extra_config.value = self._instance.uuid expected.extraConfig.append(extra_config) expected.memoryMB = 2048 expected.managedBy = fake_factory.create('ns0:ManagedByInfo') expected.managedBy.extensionKey = 'org.openstack.compute' expected.managedBy.type = 'instance' expected.version = None expected.guestId = 'otherGuest' expected.tools = fake_factory.create('ns0:ToolsConfigInfo') expected.tools.beforeGuestStandby = True expected.tools.beforeGuestReboot = True expected.tools.beforeGuestShutdown = True expected.tools.afterResume = True expected.tools.afterPowerOn = True cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo') cpu_allocation.reservation = 0 cpu_allocation.limit = -1 cpu_allocation.shares = fake_factory.create('ns0:SharesInfo') cpu_allocation.shares.level = 'custom' cpu_allocation.shares.shares = 1948 expected.cpuAllocation = cpu_allocation expected.numCPUs = 2 self.assertEqual(expected, result) def test_create_vm(self): method_list = ['CreateVM_Task', 'get_dynamic_property'] def fake_call_method(module, method, *args, **kwargs): expected_method = method_list.pop(0) self.assertEqual(expected_method, method) if (expected_method == 'CreateVM_Task'): return 'fake_create_vm_task' elif (expected_method == 'get_dynamic_property'): task_info = mock.Mock(state="success", result="fake_vm_ref") return task_info else: self.fail('Should not get here....') def fake_wait_for_task(self, *args): task_info = mock.Mock(state="success", result="fake_vm_ref") return task_info session = fake.FakeSession() fake_call_mock = mock.Mock(side_effect=fake_call_method) fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task) with contextlib.nested( mock.patch.object(session, '_wait_for_task', fake_wait_mock), mock.patch.object(session, '_call_method', fake_call_mock) ) as (wait_for_task, call_method): vm_ref = vm_util.create_vm( session, self._instance, 'fake_vm_folder', 'fake_config_spec', 'fake_res_pool_ref') self.assertEqual('fake_vm_ref', vm_ref) call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task', 'fake_vm_folder', config='fake_config_spec', pool='fake_res_pool_ref') wait_for_task.assert_called_once_with('fake_create_vm_task') @mock.patch.object(vm_util.LOG, 'warning') def test_create_vm_invalid_guestid(self, mock_log_warn): """Ensure we warn when create_vm() fails after we passed an unrecognised guestId """ found = [False] def fake_log_warn(msg, values): if not isinstance(values, dict): return if values.get('ostype') == 'invalid_os_type': found[0] = True mock_log_warn.side_effect = fake_log_warn session = driver.VMwareAPISession() config_spec = vm_util.get_vm_create_spec( session.vim.client.factory, self._instance, 'fake-datastore', [], vm_util.ExtraSpecs(), os_type='invalid_os_type') self.assertRaises(vexc.VMwareDriverException, vm_util.create_vm, session, self._instance, 'folder', config_spec, 'res-pool') self.assertTrue(found[0]) def test_convert_vif_model(self): expected = "VirtualE1000" result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000) self.assertEqual(expected, result) expected = "VirtualE1000e" result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E) self.assertEqual(expected, result) types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualVmxnet", "VirtualVmxnet3"] for type in types: self.assertEqual(type, vm_util.convert_vif_model(type)) self.assertRaises(exception.Invalid, vm_util.convert_vif_model, "InvalidVifModel") def test_power_on_instance_with_vm_ref(self): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_call_method, fake_wait_for_task): vm_util.power_on_instance(session, self._instance, vm_ref='fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_power_on_instance_without_vm_ref(self): session = fake.FakeSession() with contextlib.nested( mock.patch.object(vm_util, "get_vm_ref", return_value='fake-vm-ref'), mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task): vm_util.power_on_instance(session, self._instance) fake_get_vm_ref.assert_called_once_with(session, self._instance) fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_power_on_instance_with_exception(self): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task", side_effect=exception.NovaException('fake')), ) as (fake_call_method, fake_wait_for_task): self.assertRaises(exception.NovaException, vm_util.power_on_instance, session, self._instance, vm_ref='fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_power_on_instance_with_power_state_exception(self): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object( session, "_wait_for_task", side_effect=vexc.InvalidPowerStateException), ) as (fake_call_method, fake_wait_for_task): vm_util.power_on_instance(session, self._instance, vm_ref='fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOnVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') def test_create_virtual_disk(self): session = fake.FakeSession() dm = session.vim.service_content.virtualDiskManager with contextlib.nested( mock.patch.object(vm_util, "get_vmdk_create_spec", return_value='fake-spec'), mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_get_spec, fake_call_method, fake_wait_for_task): vm_util.create_virtual_disk(session, 'fake-dc-ref', 'fake-adapter-type', 'fake-disk-type', 'fake-path', 7) fake_get_spec.assert_called_once_with( session.vim.client.factory, 7, 'fake-adapter-type', 'fake-disk-type') fake_call_method.assert_called_once_with( session.vim, "CreateVirtualDisk_Task", dm, name='fake-path', datacenter='fake-dc-ref', spec='fake-spec') fake_wait_for_task.assert_called_once_with('fake-task') def test_copy_virtual_disk(self): session = fake.FakeSession() dm = session.vim.service_content.virtualDiskManager with contextlib.nested( mock.patch.object(session, "_call_method", return_value='fake-task'), mock.patch.object(session, "_wait_for_task"), ) as (fake_call_method, fake_wait_for_task): vm_util.copy_virtual_disk(session, 'fake-dc-ref', 'fake-source', 'fake-dest') fake_call_method.assert_called_once_with( session.vim, "CopyVirtualDisk_Task", dm, sourceName='fake-source', sourceDatacenter='fake-dc-ref', destName='fake-dest') fake_wait_for_task.assert_called_once_with('fake-task') def _create_fake_vm_objects(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.VirtualMachine()) return fake_objects def test_get_values(self): objects = self._create_fake_vm_objects() query = vm_util.get_values_from_object_properties( fake.FakeObjectRetrievalSession(objects), objects) self.assertEqual('poweredOn', query['runtime.powerState']) self.assertEqual('guestToolsRunning', query['summary.guest.toolsRunningStatus']) self.assertEqual('toolsOk', query['summary.guest.toolsStatus']) def test_reconfigure_vm(self): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, '_call_method', return_value='fake_reconfigure_task'), mock.patch.object(session, '_wait_for_task') ) as (_call_method, _wait_for_task): vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec') _call_method.assert_called_once_with(mock.ANY, 'ReconfigVM_Task', 'fake-ref', spec='fake-spec') _wait_for_task.assert_called_once_with( 'fake_reconfigure_task') def test_get_network_attach_config_spec_opaque(self): vif_info = {'network_name': 'br-int', 'mac_address': '00:00:00:ca:fe:01', 'network_ref': {'type': 'OpaqueNetwork', 'network-id': 'fake-network-id', 'network-type': 'opaque'}, 'iface_id': 7, 'vif_model': 'VirtualE1000'} fake_factory = fake.FakeFactory() result = vm_util.get_network_attach_config_spec( fake_factory, vif_info, 1) card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo' expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = vif_info['iface_id'] extra_config.key = 'nvp.iface-id.1' expected.extraConfig.append(extra_config) expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = 'add' device = fake_factory.create('ns0:VirtualE1000') device.macAddress = vif_info['mac_address'] device.addressType = 'manual' connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo') connectable.allowGuestControl = True connectable.startConnected = True connectable.connected = True device.connectable = connectable backing = fake_factory.create(card) backing.opaqueNetworkType = vif_info['network_ref']['network-type'] backing.opaqueNetworkId = vif_info['network_ref']['network-id'] device.backing = backing device.key = -47 device.wakeOnLanEnabled = True device_change.device = device expected.deviceChange.append(device_change) self.assertEqual(expected, result) def test_get_network_attach_config_spec_dvs(self): vif_info = {'network_name': 'br100', 'mac_address': '00:00:00:ca:fe:01', 'network_ref': {'type': 'DistributedVirtualPortgroup', 'dvsw': 'fake-network-id', 'dvpg': 'fake-group'}, 'iface_id': 7, 'vif_model': 'VirtualE1000'} fake_factory = fake.FakeFactory() result = vm_util.get_network_attach_config_spec( fake_factory, vif_info, 1) port = 'ns0:DistributedVirtualSwitchPortConnection' backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo' expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = vif_info['iface_id'] extra_config.key = 'nvp.iface-id.1' expected.extraConfig.append(extra_config) expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = 'add' device = fake_factory.create('ns0:VirtualE1000') device.macAddress = vif_info['mac_address'] device.key = -47 device.addressType = 'manual' device.wakeOnLanEnabled = True device.backing = fake_factory.create(backing) device.backing.port = fake_factory.create(port) device.backing.port.portgroupKey = vif_info['network_ref']['dvpg'] device.backing.port.switchUuid = vif_info['network_ref']['dvsw'] connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo') connectable.allowGuestControl = True connectable.connected = True connectable.startConnected = True device.connectable = connectable device_change.device = device expected.deviceChange.append(device_change) self.assertEqual(expected, result) def test_get_network_detach_config_spec(self): fake_factory = fake.FakeFactory() result = vm_util.get_network_detach_config_spec( fake_factory, 'fake-device', 2) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.extraConfig = [] extra_config = fake_factory.create('ns0:OptionValue') extra_config.value = 'free' extra_config.key = 'nvp.iface-id.2' expected.extraConfig.append(extra_config) expected.deviceChange = [] device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec') device_change.device = 'fake-device' device_change.operation = 'remove' expected.deviceChange.append(device_change) self.assertEqual(expected, result) @mock.patch.object(vm_util, "get_vm_ref") def test_power_off_instance(self, fake_get_ref): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object(session, '_wait_for_task') ) as (fake_call_method, fake_wait_for_task): vm_util.power_off_instance(session, self._instance, 'fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') self.assertFalse(fake_get_ref.called) @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref") def test_power_off_instance_no_vm_ref(self, fake_get_ref): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object(session, '_wait_for_task') ) as (fake_call_method, fake_wait_for_task): vm_util.power_off_instance(session, self._instance) fake_get_ref.assert_called_once_with(session, self._instance) fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') @mock.patch.object(vm_util, "get_vm_ref") def test_power_off_instance_with_exception(self, fake_get_ref): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object(session, '_wait_for_task', side_effect=exception.NovaException('fake')) ) as (fake_call_method, fake_wait_for_task): self.assertRaises(exception.NovaException, vm_util.power_off_instance, session, self._instance, 'fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') self.assertFalse(fake_get_ref.called) @mock.patch.object(vm_util, "get_vm_ref") def test_power_off_instance_power_state_exception(self, fake_get_ref): session = fake.FakeSession() with contextlib.nested( mock.patch.object(session, '_call_method', return_value='fake-task'), mock.patch.object( session, '_wait_for_task', side_effect=vexc.InvalidPowerStateException) ) as (fake_call_method, fake_wait_for_task): vm_util.power_off_instance(session, self._instance, 'fake-vm-ref') fake_call_method.assert_called_once_with(session.vim, "PowerOffVM_Task", 'fake-vm-ref') fake_wait_for_task.assert_called_once_with('fake-task') self.assertFalse(fake_get_ref.called) def test_get_vm_create_spec_updated_hw_version(self): extra_specs = vm_util.ExtraSpecs(hw_version='vmx-08') result = vm_util.get_vm_create_spec(fake.FakeFactory(), self._instance, 'fake-datastore', [], extra_specs=extra_specs) self.assertEqual('vmx-08', result.version) def test_vm_create_spec_with_profile_spec(self): datastore = ds_obj.Datastore('fake-ds-ref', 'fake-ds-name') extra_specs = vm_util.ExtraSpecs() create_spec = vm_util.get_vm_create_spec(fake.FakeFactory(), self._instance, datastore.name, [], extra_specs, profile_spec='fake_profile_spec') self.assertEqual(['fake_profile_spec'], create_spec.vmProfile) @mock.patch.object(pbm, 'get_profile_id_by_name') def test_get_storage_profile_spec(self, mock_retrieve_profile_id): fake_profile_id = fake.DataObject() fake_profile_id.uniqueId = 'fake_unique_id' mock_retrieve_profile_id.return_value = fake_profile_id profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(), 'fake_policy') self.assertEqual('ns0:VirtualMachineDefinedProfileSpec', profile_spec.obj_name) self.assertEqual(fake_profile_id.uniqueId, profile_spec.profileId) @mock.patch.object(pbm, 'get_profile_id_by_name') def test_storage_spec_empty_profile(self, mock_retrieve_profile_id): mock_retrieve_profile_id.return_value = None profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(), 'fake_policy') self.assertIsNone(profile_spec) def test_get_ephemeral_name(self): filename = vm_util.get_ephemeral_name(0) self.assertEqual('ephemeral_0.vmdk', filename) def test_detach_and_delete_devices_config_spec(self): fake_devices = ['device1', 'device2'] fake_factory = fake.FakeFactory() result = vm_util._detach_and_delete_devices_config_spec(fake_factory, fake_devices) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') expected.deviceChange = [] device1 = fake_factory.create('ns0:VirtualDeviceConfigSpec') device1.device = 'device1' device1.operation = 'remove' device1.fileOperation = 'destroy' expected.deviceChange.append(device1) device2 = fake_factory.create('ns0:VirtualDeviceConfigSpec') device2.device = 'device2' device2.operation = 'remove' device2.fileOperation = 'destroy' expected.deviceChange.append(device2) self.assertEqual(expected, result) @mock.patch.object(vm_util, 'reconfigure_vm') def test_detach_devices_from_vm(self, mock_reconfigure): fake_devices = ['device1', 'device2'] session = fake.FakeSession() vm_util.detach_devices_from_vm(session, 'fake-ref', fake_devices) mock_reconfigure.assert_called_once_with(session, 'fake-ref', mock.ANY) def test_get_vm_boot_spec(self): disk = fake.VirtualDisk() disk.key = 7 fake_factory = fake.FakeFactory() result = vm_util.get_vm_boot_spec(fake_factory, disk) expected = fake_factory.create('ns0:VirtualMachineConfigSpec') boot_disk = fake_factory.create( 'ns0:VirtualMachineBootOptionsBootableDiskDevice') boot_disk.deviceKey = disk.key boot_options = fake_factory.create('ns0:VirtualMachineBootOptions') boot_options.bootOrder = [boot_disk] expected.bootOptions = boot_options self.assertEqual(expected, result) def _get_devices(self, filename): devices = fake._create_array_of_type('VirtualDevice') devices.VirtualDevice = self._vmdk_path_and_adapter_type_devices( filename) return devices def test_find_rescue_device(self): filename = '[test_datastore] uuid/uuid-rescue.vmdk' devices = self._get_devices(filename) device = vm_util.find_rescue_device(devices, self._instance) self.assertEqual(filename, device.backing.fileName) def test_find_rescue_device_not_found(self): filename = '[test_datastore] uuid/uuid.vmdk' devices = self._get_devices(filename) self.assertRaises(exception.NotFound, vm_util.find_rescue_device, devices, self._instance) def test_validate_cpu_limits(self): cpu_limits = vm_util.CpuLimits(cpu_shares_level='high', cpu_shares_share=1948) self.assertRaises(exception.InvalidInput, cpu_limits.validate) cpu_limits = vm_util.CpuLimits(cpu_shares_level='fira') self.assertRaises(exception.InvalidInput, cpu_limits.validate) @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop) class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase): # N.B. Mocking on the class only mocks test_*(), but we need # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in # setUp causes object initialisation to fail. Not mocking in tests results # in vim calls not using FakeVim. @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop) def setUp(self): super(VMwareVMUtilGetHostRefTestCase, self).setUp() fake.reset() vm_util.vm_refs_cache_reset() self.session = driver.VMwareAPISession() # Create a fake VirtualMachine running on a known host self.host_ref = fake._db_content['HostSystem'].keys()[0] self.vm_ref = fake.create_vm(host_ref=self.host_ref) @mock.patch.object(vm_util, 'get_vm_ref') def test_get_host_ref_for_vm(self, mock_get_vm_ref): mock_get_vm_ref.return_value = self.vm_ref ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance') mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance') self.assertEqual(self.host_ref, ret) @mock.patch.object(vm_util, 'get_vm_ref') def test_get_host_name_for_vm(self, mock_get_vm_ref): mock_get_vm_ref.return_value = self.vm_ref host = fake._get_object(self.host_ref) ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance') mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance') self.assertEqual(host.name, ret)
blueboxgroup/nova
nova/tests/unit/virt/vmwareapi/test_vm_util.py
Python
apache-2.0
58,050
#!/usr/bin/python # coding=utf-8 # Copyright 2017 yaitza. All Rights Reserved. # # https://yaitza.github.io/2017-04-26-Python-FilterPicture # # My Code hope to usefull for you. # =================================================================== __author__ = "yaitza" __date__ = "2017-04-26 13:59" import os from PIL import Image class ImageHandler: def __init__(self, pic_path): self.pic_path = pic_path def getPicSize(self, maxSize, minSize): try: img = Image.open(self.pic_path) except IOError: print self.pic_path + " Error!" return if max(img.size) <= maxSize and max(img.size) >= minSize and min(img.size) >= minSize and min(img.size) <= maxSize: return img.size return class FileHandler: def __init__(self, file_path): self.file_path = file_path def getAllFiles(self): fileList = os.listdir(self.file_path) file_dir = [] for file in fileList: file_dir.append(self.file_path + "/" + file) useFileList = [] for file in file_dir: im = ImageHandler(file) if im.getPicSize(1204, 480) is not None: useFileList.append(file) return useFileList if __name__ == "__main__": file_path = "E:/内容素材/图片/美女图" uipath = unicode(file_path, "utf8") fh = FileHandler(uipath) fh.getAllFiles()
yaitza/python
PROJECT2017/Tools/FilterPicSize.py
Python
apache-2.0
1,440
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TestCases for Dataset, including create, config, run, etc. """ from __future__ import print_function import paddle import paddle.fluid as fluid import paddle.compat as cpt import paddle.fluid.core as core import numpy as np import os import shutil import unittest class TestDataset(unittest.TestCase): """ TestCases for Dataset. """ def setUp(self): self.use_data_loader = False self.epoch_num = 10 self.drop_last = False def test_dataset_create(self): """ Testcase for dataset create. """ try: dataset = paddle.distributed.InMemoryDataset() except: self.assertTrue(False) try: dataset = paddle.distributed.QueueDataset() except: self.assertTrue(False) try: dataset = paddle.distributed.fleet.dataset.FileInstantDataset() except: self.assertTrue(False) try: dataset = paddle.distributed.fleet.dataset.MyOwnDataset() self.assertTrue(False) except: self.assertTrue(True) def test_config(self): """ Testcase for python config. """ dataset = fluid.InMemoryDataset() dataset.set_parse_ins_id(True) dataset.set_parse_content(True) dataset._set_trainer_num(1) self.assertTrue(dataset.parse_ins_id) self.assertTrue(dataset.parse_content) self.assertEqual(dataset.trainer_num, 1) def test_shuffle_by_uid(self): """ Testcase for shuffle_by_uid. """ dataset = paddle.distributed.InMemoryDataset() dataset._set_uid_slot('6048') dataset._set_shuffle_by_uid(True) def test_run_with_dump(self): """ Testcase for InMemoryDataset from create to run. """ with open("test_run_with_dump_a.txt", "w") as f: data = "1 a 1 a 1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 b 1 b 1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 c 1 c 1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_run_with_dump_b.txt", "w") as f: data = "1 d 1 d 1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 e 1 e 1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 f 1 f 1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 g 1 g 1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.update_settings(pipe_command="cat1") dataset._init_distributed_settings( parse_ins_id=True, parse_content=True, fea_eval=True, candidate_size=10000) dataset.set_filelist( ["test_run_with_dump_a.txt", "test_run_with_dump_b.txt"]) dataset.load_into_memory() dataset.local_shuffle() paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) startup_program = paddle.static.Program() main_program = paddle.static.Program() exe.run(startup_program) for i in range(2): try: exe.train_from_dataset(main_program, dataset) except ImportError as e: pass except Exception as e: self.assertTrue(False) os.remove("./test_run_with_dump_a.txt") os.remove("./test_run_with_dump_b.txt") def test_dataset_config(self): """ Testcase for dataset configuration. """ dataset = fluid.core.Dataset("MultiSlotDataset") dataset.set_thread_num(12) dataset.set_filelist(["a.txt", "b.txt", "c.txt"]) dataset.set_trainer_num(4) dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") dataset.set_download_cmd("./read_from_afs my_fs_name my_fs_ugi") dataset.set_enable_pv_merge(False) thread_num = dataset.get_thread_num() self.assertEqual(thread_num, 12) filelist = dataset.get_filelist() self.assertEqual(len(filelist), 3) self.assertEqual(filelist[0], "a.txt") self.assertEqual(filelist[1], "b.txt") self.assertEqual(filelist[2], "c.txt") trainer_num = dataset.get_trainer_num() self.assertEqual(trainer_num, 4) name, ugi = dataset.get_hdfs_config() self.assertEqual(name, "my_fs_name") self.assertEqual(ugi, "my_fs_ugi") download_cmd = dataset.get_download_cmd() self.assertEqual(download_cmd, "./read_from_afs my_fs_name my_fs_ugi") def test_set_download_cmd(self): """ Testcase for InMemoryDataset from create to run. """ filename1 = "afs:test_in_memory_dataset_run_a.txt" filename2 = "afs:test_in_memory_dataset_run_b.txt" with open(filename1, "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open(filename2, "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", download_cmd="cat", use_var=slots_vars) dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) startup_program = paddle.static.Program() main_program = paddle.static.Program() exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places(), self.drop_last) for i in range(self.epoch_num): for data in data_loader(): exe.run(main_program, feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset(main_program, dataset) except Exception as e: self.assertTrue(False) os.remove(filename1) os.remove(filename2) def test_in_memory_dataset_run(self): """ Testcase for InMemoryDataset from create to run. """ with open("test_in_memory_dataset_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_in_memory_dataset_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset._init_distributed_settings(fea_eval=True, candidate_size=1) dataset.set_filelist([ "test_in_memory_dataset_run_a.txt", "test_in_memory_dataset_run_b.txt" ]) dataset.load_into_memory() dataset.slots_shuffle(["slot1"]) dataset.local_shuffle() dataset._set_generate_unique_feasigns(True, 15) dataset._generate_local_tables_unlock(0, 11, 1, 25, 15) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places(), self.drop_last) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset(fluid.default_main_program(), dataset) except Exception as e: self.assertTrue(False) os.remove("./test_in_memory_dataset_run_a.txt") os.remove("./test_in_memory_dataset_run_b.txt") def test_in_memory_dataset_masterpatch(self): """ Testcase for InMemoryDataset from create to run. """ with open("test_in_memory_dataset_masterpatch_a.txt", "w") as f: data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 id1 1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 id2 1 1 1 1 1 0 1 0\n" data += "1 id3 1 0 1 0 1 1 1 1\n" data += "1 id3 1 1 1 1 1 0 1 0\n" data += "1 id4 1 0 1 0 1 1 1 1\n" data += "1 id4 1 0 1 0 1 1 1 1\n" data += "1 id5 1 1 1 1 1 0 1 0\n" data += "1 id5 1 1 1 1 1 0 1 0\n" f.write(data) with open("test_in_memory_dataset_masterpatch_b.txt", "w") as f: data = "1 id6 1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 id6 1 1 2 3 4 4 6 6 6 6 1 5\n" data += "1 id6 1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 id6 1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): for slot in slots[:2]: var = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) for slot in slots[2:]: var = fluid.layers.data( name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=1, pipe_command="cat", use_var=slots_vars) dataset._init_distributed_settings(parse_ins_id=True) dataset.set_filelist([ "test_in_memory_dataset_masterpatch_a.txt", "test_in_memory_dataset_masterpatch_b.txt" ]) dataset.load_into_memory() dataset.local_shuffle() exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) for i in range(2): try: exe.train_from_dataset(train_program, dataset) except ImportError as e: pass except Exception as e: self.assertTrue(False) #dataset._set_merge_by_lineid(2) dataset.update_settings(merge_size=2) dataset.dataset.merge_by_lineid() os.remove("./test_in_memory_dataset_masterpatch_a.txt") os.remove("./test_in_memory_dataset_masterpatch_b.txt") def test_in_memory_dataset_masterpatch1(self): """ Testcase for InMemoryDataset from create to run. """ with open("test_in_memory_dataset_masterpatch1_a.txt", "w") as f: data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 id1 1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 id2 1 1 1 1 1 0 1 0\n" data += "1 id3 1 0 1 0 1 1 1 1\n" data += "1 id3 1 1 1 1 1 0 1 0\n" data += "1 id4 1 0 1 0 1 1 1 1\n" data += "1 id4 1 0 1 0 1 1 1 1\n" data += "1 id5 1 1 1 1 1 0 1 0\n" data += "1 id5 1 1 1 1 1 0 1 0\n" f.write(data) with open("test_in_memory_dataset_masterpatch1_b.txt", "w") as f: data = "1 id6 1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 id6 1 1 2 3 4 4 6 6 6 6 1 5\n" data += "1 id6 1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 id6 1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots_vars = [] train_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(train_program, startup_program): var1 = fluid.layers.data( name="slot1", shape=[1], dtype="int64", lod_level=0) var2 = fluid.layers.data( name="slot2", shape=[1], dtype="int64", lod_level=0) var3 = fluid.layers.data( name="slot3", shape=[1], dtype="float32", lod_level=0) var4 = fluid.layers.data( name="slot4", shape=[1], dtype="float32", lod_level=0) slots_vars = [var1, var2, var3, var4] dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=1, pipe_command="cat", use_var=slots_vars) dataset._init_distributed_settings(parse_ins_id=True) dataset.set_filelist([ "test_in_memory_dataset_masterpatch1_a.txt", "test_in_memory_dataset_masterpatch1_b.txt" ]) dataset.load_into_memory() dataset.local_shuffle() exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) for i in range(2): try: exe.train_from_dataset(train_program, dataset) except ImportError as e: pass except Exception as e: self.assertTrue(False) dataset._set_merge_by_lineid(2) dataset.dataset.merge_by_lineid() os.remove("./test_in_memory_dataset_masterpatch1_a.txt") os.remove("./test_in_memory_dataset_masterpatch1_b.txt") def test_in_memory_dataset_run_2(self): """ Testcase for InMemoryDataset from create to run. Use CUDAPlace Use float type id """ with open("test_in_memory_dataset_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_in_memory_dataset_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.set_filelist([ "test_in_memory_dataset_run_a.txt", "test_in_memory_dataset_run_b.txt" ]) dataset.load_into_memory() dataset.local_shuffle() exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda( ) else fluid.CUDAPlace(0)) exe.run(fluid.default_startup_program()) for i in range(2): try: exe.train_from_dataset(fluid.default_main_program(), dataset) exe.train_from_dataset( fluid.default_main_program(), dataset, thread=1) exe.train_from_dataset( fluid.default_main_program(), dataset, thread=2) exe.train_from_dataset( fluid.default_main_program(), dataset, thread=2) exe.train_from_dataset( fluid.default_main_program(), dataset, thread=3) exe.train_from_dataset( fluid.default_main_program(), dataset, thread=4) except ImportError as e: pass except Exception as e: self.assertTrue(False) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places(), self.drop_last) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset(fluid.default_main_program(), dataset) except Exception as e: self.assertTrue(False) dataset._set_merge_by_lineid(2) dataset._set_parse_ins_id(False) dataset._set_fleet_send_sleep_seconds(2) dataset.preload_into_memory() dataset.wait_preload_done() dataset.release_memory() dataset.preload_into_memory(1) dataset.wait_preload_done() dataset.dataset.merge_by_lineid() dataset.release_memory() dataset._set_merge_by_lineid(30) dataset._set_parse_ins_id(False) dataset.load_into_memory() dataset.dataset.merge_by_lineid() dataset.update_settings( batch_size=1, thread_num=2, input_type=1, pipe_command="cat", use_var=[], fs_name="", fs_ugi="", download_cmd="cat", merge_size=-1, parse_ins_id=False, parse_content=False, fleet_send_batch_size=2, fleet_send_sleep_seconds=2, fea_eval=True) fleet_ptr = fluid.core.Fleet() fleet_ptr.set_client2client_config(1, 1, 1) fleet_ptr.get_cache_threshold(0) os.remove("./test_in_memory_dataset_run_a.txt") os.remove("./test_in_memory_dataset_run_b.txt") def test_queue_dataset_run(self): """ Testcase for QueueDataset from create to run. """ with open("test_queue_dataset_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_queue_dataset_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.QueueDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.set_filelist( ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places(), self.drop_last) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset(fluid.default_main_program(), dataset) except Exception as e: self.assertTrue(False) dataset2 = paddle.distributed.QueueDataset() dataset2.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.set_filelist([]) try: exe.train_from_dataset(fluid.default_main_program(), dataset2) except ImportError as e: print("warning: we skip trainer_desc_pb2 import problem in windows") except Exception as e: self.assertTrue(False) if os.path.exists("./test_queue_dataset_run_a.txt"): os.remove("./test_queue_dataset_run_a.txt") if os.path.exists("./test_queue_dataset_run_b.txt"): os.remove("./test_queue_dataset_run_b.txt") def test_queue_dataset_run_2(self): """ Testcase for QueueDataset from create to run. Use CUDAPlace Use float type id """ with open("test_queue_dataset_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_queue_dataset_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.QueueDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.set_filelist( ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]) exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda( ) else fluid.CUDAPlace(0)) exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places(), self.drop_last) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset(fluid.default_main_program(), dataset) except Exception as e: self.assertTrue(False) if os.path.exists("./test_queue_dataset_run_a.txt"): os.remove("./test_queue_dataset_run_a.txt") if os.path.exists("./test_queue_dataset_run_b.txt"): os.remove("./test_queue_dataset_run_b.txt") def test_queue_dataset_run_3(self): """ Testcase for QueueDataset from create to run. Use CUDAPlace Use float type id """ with open("test_queue_dataset_run_a.txt", "w") as f: data = "2 1 2 2 5 4 2 2 7 2 1 3\n" data += "2 6 2 2 1 4 2 2 4 2 2 3\n" data += "2 5 2 2 9 9 2 2 7 2 1 3\n" data += "2 7 2 2 1 9 2 3 7 2 5 3\n" f.write(data) with open("test_queue_dataset_run_b.txt", "w") as f: data = "2 1 2 2 5 4 2 2 7 2 1 3\n" data += "2 6 2 2 1 4 2 2 4 2 2 3\n" data += "2 5 2 2 9 9 2 2 7 2 1 3\n" data += "2 7 2 2 1 9 2 3 7 2 5 3\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: var = fluid.data( name=slot, shape=[None, 1], dtype="int64", lod_level=1) slots_vars.append(var) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=1, thread_num=2, input_type=1, pipe_command="cat", use_var=slots_vars) dataset.set_filelist( ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]) dataset.load_into_memory() exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda( ) else fluid.CUDAPlace(0)) exe.run(fluid.default_startup_program()) if self.use_data_loader: data_loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places(), self.drop_last) for i in range(self.epoch_num): for data in data_loader(): exe.run(fluid.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset(fluid.default_main_program(), dataset) except Exception as e: self.assertTrue(False) if os.path.exists("./test_queue_dataset_run_a.txt"): os.remove("./test_queue_dataset_run_a.txt") if os.path.exists("./test_queue_dataset_run_b.txt"): os.remove("./test_queue_dataset_run_b.txt") class TestDatasetWithDataLoader(TestDataset): """ Test Dataset With Data Loader class. TestCases. """ def setUp(self): """ Test Dataset With Data Loader, setUp. """ self.use_data_loader = True self.epoch_num = 10 self.drop_last = False class TestDatasetWithFetchHandler(unittest.TestCase): """ Test Dataset With Fetch Handler. TestCases. """ def net(self): """ Test Dataset With Fetch Handler. TestCases. """ slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] poolings = [] for slot in slots: data = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) var = fluid.layers.cast(x=data, dtype='float32') pool = fluid.layers.sequence_pool(input=var, pool_type='AVERAGE') slots_vars.append(data) poolings.append(pool) concated = fluid.layers.concat(poolings, axis=1) fc = fluid.layers.fc(input=concated, act='tanh', size=32) return slots_vars, fc def get_dataset(self, inputs, files): """ Test Dataset With Fetch Handler. TestCases. Args: inputs(list): inputs of get_dataset files(list): files of get_dataset """ dataset = paddle.distributed.QueueDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=inputs) dataset.set_filelist(files) return dataset def setUp(self): """ Test Dataset With Fetch Handler. TestCases. """ with open("test_queue_dataset_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_queue_dataset_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) def tearDown(self): """ Test Dataset With Fetch Handler. TestCases. """ os.remove("./test_queue_dataset_run_a.txt") os.remove("./test_queue_dataset_run_b.txt") def test_dataset_none(self): """ Test Dataset With Fetch Handler. TestCases. """ slots_vars, out = self.net() files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"] dataset = self.get_dataset(slots_vars, files) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) # test dataset->None try: exe.train_from_dataset(fluid.default_main_program(), None) except ImportError as e: print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: error_msg = "dataset is need and should be initialized" self.assertEqual(error_msg, cpt.get_exception_message(e)) except Exception as e: self.assertTrue(False) def test_infer_from_dataset(self): """ Test Dataset With Fetch Handler. TestCases. """ slots_vars, out = self.net() files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"] dataset = self.get_dataset(slots_vars, files) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) try: exe.infer_from_dataset(fluid.default_main_program(), dataset) except ImportError as e: print("warning: we skip trainer_desc_pb2 import problem in windows") except Exception as e: self.assertTrue(False) def test_fetch_handler(self): """ Test Dataset With Fetch Handler. TestCases. """ slots_vars, out = self.net() files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"] dataset = self.get_dataset(slots_vars, files) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) fh = fluid.executor.FetchHandler(out.name) fh.help() try: exe.train_from_dataset( program=fluid.default_main_program(), dataset=dataset, fetch_handler=fh) except ImportError as e: print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: error_msg = "dataset is need and should be initialized" self.assertEqual(error_msg, cpt.get_exception_message(e)) except Exception as e: self.assertTrue(False) class TestDataset2(unittest.TestCase): """ TestCases for Dataset. """ def setUp(self): """ TestCases for Dataset. """ self.use_data_loader = False self.epoch_num = 10 self.drop_last = False def test_dataset_fleet(self): """ Testcase for InMemoryDataset from create to run. """ self.skipTest("parameter server will add pslib UT later") with open("test_in_memory_dataset2_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_in_memory_dataset2_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet with fluid.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: var = fluid.layers.data(\ name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) fake_cost = \ fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1]) fake_cost = fluid.layers.mean(fake_cost) with fluid.scope_guard(scope): place = fluid.CPUPlace() exe = fluid.Executor(place) try: fleet.init() except ImportError as e: print("warning: no mpi4py") adam = fluid.optimizer.Adam(learning_rate=0.000005) try: adam = fleet.distributed_optimizer(adam) adam.minimize([fake_cost], [scope]) except AttributeError as e: print("warning: no mpi") except ImportError as e: print("warning: no mpi4py") exe.run(startup_program) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.set_filelist([ "test_in_memory_dataset2_run_a.txt", "test_in_memory_dataset2_run_b.txt" ]) dataset.load_into_memory() fleet._opt_info = None fleet._fleet_ptr = None os.remove("./test_in_memory_dataset2_run_a.txt") os.remove("./test_in_memory_dataset2_run_b.txt") def test_dataset_fleet2(self): """ Testcase for InMemoryDataset from create to run. """ with open("test_in_memory_dataset2_run2_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_in_memory_dataset2_run2_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet with fluid.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: var = fluid.layers.data(\ name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) fake_cost = \ fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1]) fake_cost = fluid.layers.mean(fake_cost) with fluid.scope_guard(scope): place = fluid.CPUPlace() exe = fluid.Executor(place) try: fleet.init() except ImportError as e: print("warning: no mpi4py") adam = fluid.optimizer.Adam(learning_rate=0.000005) try: adam = fleet.distributed_optimizer( adam, strategy={ "fs_uri": "fs_uri_xxx", "fs_user": "fs_user_xxx", "fs_passwd": "fs_passwd_xxx", "fs_hadoop_bin": "fs_hadoop_bin_xxx" }) adam.minimize([fake_cost], [scope]) except AttributeError as e: print("warning: no mpi") except ImportError as e: print("warning: no mpi4py") exe.run(startup_program) dataset = paddle.distributed.InMemoryDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.set_filelist([ "test_in_memory_dataset2_run2_a.txt", "test_in_memory_dataset2_run2_b.txt" ]) dataset.load_into_memory() try: dataset.global_shuffle(fleet) except: print("warning: catch expected error") fleet._opt_info = None fleet._fleet_ptr = None dataset = paddle.distributed.InMemoryDataset() dataset.init(fs_name="", fs_ugi="") d = paddle.distributed.fleet.DatasetBase() try: dataset._set_feed_type("MultiSlotInMemoryDataFeed") except: print("warning: catch expected error") dataset.thread_num = 0 try: dataset._prepare_to_run() except: print("warning: catch expected error") try: dataset.preprocess_instance() except: print("warning: catch expected error") try: dataset.set_current_phase(1) except: print("warning: catch expected error") try: dataset.postprocess_instance() except: print("warning: catch expected error") dataset._set_fleet_send_batch_size(1024) try: dataset.global_shuffle() except: print("warning: catch expected error") #dataset.get_pv_data_size() dataset.get_memory_data_size() dataset.get_shuffle_data_size() dataset = paddle.distributed.QueueDataset() try: dataset.local_shuffle() except: print("warning: catch expected error") try: dataset.global_shuffle() except: print("warning: catch expected error") dataset = paddle.distributed.fleet.FileInstantDataset() try: dataset.local_shuffle() except: print("warning: catch expected error") try: dataset.global_shuffle() except: print("warning: catch expected error") os.remove("./test_in_memory_dataset2_run2_a.txt") os.remove("./test_in_memory_dataset2_run2_b.txt") def test_bosps_dataset_fleet2(self): """ Testcase for InMemoryDataset from create to run. """ with open("test_in_memory_dataset2_run2_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_in_memory_dataset2_run2_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) train_program = fluid.Program() startup_program = fluid.Program() scope = fluid.Scope() from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet with fluid.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: var = fluid.layers.data(\ name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) fake_cost = \ fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1]) fake_cost = fluid.layers.mean(fake_cost) with fluid.scope_guard(scope): place = fluid.CPUPlace() exe = fluid.Executor(place) try: fleet.init() except ImportError as e: print("warning: no mpi4py") adam = fluid.optimizer.Adam(learning_rate=0.000005) try: adam = fleet.distributed_optimizer( adam, strategy={ "fs_uri": "fs_uri_xxx", "fs_user": "fs_user_xxx", "fs_passwd": "fs_passwd_xxx", "fs_hadoop_bin": "fs_hadoop_bin_xxx" }) adam.minimize([fake_cost], [scope]) except AttributeError as e: print("warning: no mpi") except ImportError as e: print("warning: no mpi4py") exe.run(startup_program) dataset = paddle.distributed.fleet.BoxPSDataset() dataset.init( batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars) dataset.set_filelist([ "test_in_memory_dataset2_run2_a.txt", "test_in_memory_dataset2_run2_b.txt" ]) dataset.load_into_memory() try: dataset.global_shuffle(fleet) except: print("warning: catch expected error") fleet._opt_info = None fleet._fleet_ptr = None dataset = paddle.distributed.fleet.BoxPSDataset() dataset.init( rank_offset="", pv_batch_size=1, fs_name="", fs_ugi="", data_feed_type="MultiSlotInMemoryDataFeed", parse_logkey=True, merge_by_sid=True, enable_pv_merge=True) d = paddle.distributed.fleet.DatasetBase() try: dataset._set_feed_type("MultiSlotInMemoryDataFeed") except: print("warning: catch expected error") dataset.thread_num = 0 try: dataset._prepare_to_run() except: print("warning: catch expected error") dataset._set_parse_logkey(True) dataset._set_merge_by_sid(True) dataset._set_enable_pv_merge(True) try: dataset.preprocess_instance() except: print("warning: catch expected error") try: dataset.set_current_phase(1) except: print("warning: catch expected error") try: dataset.postprocess_instance() except: print("warning: catch expected error") dataset._set_fleet_send_batch_size(1024) try: dataset.global_shuffle() except: print("warning: catch expected error") #dataset.get_pv_data_size() dataset.get_memory_data_size() dataset.get_shuffle_data_size() if __name__ == '__main__': unittest.main()
PaddlePaddle/Paddle
python/paddle/fluid/tests/unittests/test_dataset.py
Python
apache-2.0
43,925
# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo.db import exception as db_exc import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy.orm import scoped_session from neutron.api.v2 import attributes as attr from neutron.common import constants from neutron.common import uos_constants from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import _uos_sgrule_default_cfg as sg_cfg from neutron.extensions import securitygroup as ext_sg from neutron.openstack.common import excutils from neutron.openstack.common import timeutils from neutron.openstack.common import uuidutils IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP, constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6} class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, models_v2.TimestampMixin): """Represents a v2 neutron security group.""" __table_args__ = ( sa.UniqueConstraint('tenant_id', 'name', name='uniq_sg0tenant_id0name'), ) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) class SecurityGroupPortBinding(model_base.BASEV2): """Represents binding between neutron ports and security profiles.""" port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id", ondelete='CASCADE'), primary_key=True) security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id"), primary_key=True) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load security group bindings ports = orm.relationship( models_v2.Port, backref=orm.backref("security_groups", lazy='joined', cascade='delete')) class SecurityGroupRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron security group rule.""" security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=False) remote_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=True) direction = sa.Column(sa.Enum('ingress', 'egress', name='securitygrouprules_direction')) ethertype = sa.Column(sa.String(40)) protocol = sa.Column(sa.String(40)) port_range_min = sa.Column(sa.Integer) port_range_max = sa.Column(sa.Integer) remote_ip_prefix = sa.Column(sa.String(255)) security_group = orm.relationship( SecurityGroup, backref=orm.backref('rules', cascade='all,delete'), primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") source_group = orm.relationship( SecurityGroup, backref=orm.backref('source_rules', cascade='all,delete'), primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): """Mixin class to add security group to db_base_plugin_v2.""" __native_bulk_support = True def __init_sg_db_mixin__(self): sg_cfg.DefaultSGRulesConfig.get_valid_rules() def create_security_group_bulk(self, context, security_group_rule): return self._create_bulk('security_group', context, security_group_rule) def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are a default security group for a given tenant if it does not exist. """ s = security_group['security_group'] tenant_id = self._get_tenant_id_for_create(context, s) if not default_sg: self._ensure_default_security_group(context, tenant_id) try: with context.session.begin(subtransactions=True): security_group_db = SecurityGroup(id=s.get('id') or ( uuidutils.generate_uuid()), description=s['description'], tenant_id=tenant_id, name=s['name'], created_at=timeutils.utcnow() ) utils.make_default_name(security_group_db, uos_constants.UOS_PRE_SG) context.session.add(security_group_db) for ethertype in ext_sg.sg_supported_ethertypes: if s.get('name') == 'default': # Allow intercommunication ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='ingress', ethertype=ethertype, source_group=security_group_db) context.session.add(ingress_rule) egress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='egress', ethertype=ethertype) context.session.add(egress_rule) # gongysh UOS default ingress port open for default_rule in (sg_cfg.DefaultSGRulesConfig. sg_default_rules): if (s.get('name') != 'default' and 'self' == default_rule.protocol): # Allow intercommunication ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='ingress', ethertype=ethertype, source_group=security_group_db) context.session.add(ingress_rule) elif ('self' != default_rule.protocol): ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction=default_rule.direction, ethertype=ethertype, protocol=default_rule.protocol, port_range_min=default_rule.port_range_min, port_range_max=default_rule.port_range_max) context.session.add(ingress_rule) except db_exc.DBDuplicateEntry as e: if e.columns == ['tenant_id', 'name']: if default_sg: raise else: raise ext_sg.SecurityGroupDuplicateName(name=s['name']) return self._make_security_group_dict(security_group_db, process_extensions=False) def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, default_sg=False): # If default_sg is True do not call _ensure_default_security_group() # so this can be done recursively. Context.tenant_id is checked # because all the unit tests do not explicitly set the context on # GETS. TODO(arosen) context handling can probably be improved here. if not default_sg and context.tenant_id: self._ensure_default_security_group(context, context.tenant_id) marker_obj = self._get_marker_obj(context, 'security_group', limit, marker) return self._get_collection(context, SecurityGroup, self._make_security_group_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_security_groups_count(self, context, filters=None): return self._get_collection_count(context, SecurityGroup, filters=filters) def get_security_group(self, context, id, fields=None, tenant_id=None): """Tenant id is given to handle the case when creating a security group rule on behalf of another use. """ if tenant_id: tmp_context_tenant_id = context.tenant_id context.tenant_id = tenant_id try: with context.session.begin(subtransactions=True): ret = self._make_security_group_dict(self._get_security_group( context, id), fields) ret['security_group_rules'] = self.get_security_group_rules( context, {'security_group_id': [id]}) finally: if tenant_id: context.tenant_id = tmp_context_tenant_id return ret def _get_security_group(self, context, id): try: query = self._model_query(context, SecurityGroup) sg = query.filter(SecurityGroup.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupNotFound(id=id) return sg def delete_security_group(self, context, id): filters = {'security_group_id': [id]} ports = self._get_port_security_group_bindings(context, filters) if ports: raise ext_sg.SecurityGroupInUse(id=id) # confirm security group exists sg = self._get_security_group(context, id) if sg['name'] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() with context.session.begin(subtransactions=True): context.session.delete(sg) def update_security_group(self, context, id, security_group): s = security_group['security_group'] # NOTE(gongysh) for the purpose update the security_group with # data return security_group created just s.pop('created_at', None) with context.session.begin(subtransactions=True): sg = self._get_security_group(context, id) if sg['name'] == 'default' and 'name' in s: raise ext_sg.SecurityGroupCannotUpdateDefault() sg.update(s) return self._make_security_group_dict(sg) def _make_security_group_dict(self, security_group, fields=None, process_extensions=True): res = {'id': security_group['id'], 'name': security_group['name'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description']} res['security_group_rules'] = [self._make_security_group_rule_dict(r) for r in security_group.rules] if process_extensions: self._apply_dict_extend_functions('security_groups', res, security_group) return self._fields(res, fields) def _make_security_group_binding_dict(self, security_group, fields=None): res = {'port_id': security_group['port_id'], 'security_group_id': security_group['security_group_id']} return self._fields(res, fields) def _create_port_security_group_binding(self, context, port_id, security_group_id): with context.session.begin(subtransactions=True): db = SecurityGroupPortBinding(port_id=port_id, security_group_id=security_group_id) context.session.add(db) def _get_port_security_group_bindings(self, context, filters=None, fields=None): return self._get_collection(context, SecurityGroupPortBinding, self._make_security_group_binding_dict, filters=filters, fields=fields) def _delete_port_security_group_bindings(self, context, port_id): query = self._model_query(context, SecurityGroupPortBinding) bindings = query.filter( SecurityGroupPortBinding.port_id == port_id) with context.session.begin(subtransactions=True): for binding in bindings: context.session.delete(binding) def create_security_group_rule_bulk(self, context, security_group_rule): return self._create_bulk('security_group_rule', context, security_group_rule) def create_security_group_rule_bulk_native(self, context, security_group_rule): r = security_group_rule['security_group_rules'] for rule_dict in r: rule = rule_dict['security_group_rule'] #NOTE(gongysh) unspecified address should be None remote_ip_prefix = rule.get('remote_ip_prefix') if (remote_ip_prefix == "0.0.0.0/0" or remote_ip_prefix == "::/0"): rule['remote_ip_prefix'] = None scoped_session(context.session) security_group_id = self._validate_security_group_rules( context, security_group_rule) with context.session.begin(subtransactions=True): if not self.get_security_group(context, security_group_id): raise ext_sg.SecurityGroupNotFound(id=security_group_id) self._check_for_duplicate_rules(context, r) ret = [] for rule_dict in r: rule = rule_dict['security_group_rule'] tenant_id = self._get_tenant_id_for_create(context, rule) db = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group_id=rule['security_group_id'], direction=rule['direction'], remote_group_id=rule.get('remote_group_id'), ethertype=rule['ethertype'], protocol=rule['protocol'], port_range_min=rule['port_range_min'], port_range_max=rule['port_range_max'], remote_ip_prefix=rule.get('remote_ip_prefix')) context.session.add(db) ret.append(self._make_security_group_rule_dict(db)) return ret def create_security_group_rule(self, context, security_group_rule): bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk_native(context, bulk_rule)[0] def _get_ip_proto_number(self, protocol): if protocol is None: return return IP_PROTOCOL_MAP.get(protocol, protocol) def _validate_port_range(self, rule): """Check that port_range is valid.""" if (rule['port_range_min'] is None and rule['port_range_max'] is None): return if not rule['protocol']: raise ext_sg.SecurityGroupProtocolRequiredWithPorts() ip_proto = self._get_ip_proto_number(rule['protocol']) if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]: if (rule['port_range_min'] is not None and rule['port_range_min'] <= rule['port_range_max']): pass else: raise ext_sg.SecurityGroupInvalidPortRange() elif ip_proto == constants.PROTO_NUM_ICMP: for attr, field in [('port_range_min', 'type'), ('port_range_max', 'code')]: if rule[attr] > 255: raise ext_sg.SecurityGroupInvalidIcmpValue( field=field, attr=attr, value=rule[attr]) if (rule['port_range_min'] is None and rule['port_range_max']): raise ext_sg.SecurityGroupMissingIcmpType( value=rule['port_range_max']) def _validate_security_group_rules(self, context, security_group_rule): """Check that rules being installed. Check that all rules belong to the same security group, remote_group_id/security_group_id belong to the same tenant, and rules are valid. """ new_rules = set() tenant_ids = set() for rules in security_group_rule['security_group_rules']: rule = rules.get('security_group_rule') new_rules.add(rule['security_group_id']) self._validate_port_range(rule) self._validate_ip_prefix(rule) if rule.get('remote_ip_prefix') and rule['remote_group_id']: raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix() if rule['tenant_id'] not in tenant_ids: tenant_ids.add(rule['tenant_id']) remote_group_id = rule.get('remote_group_id') # Check that remote_group_id exists for tenant if remote_group_id: self.get_security_group(context, remote_group_id, tenant_id=rule['tenant_id']) if len(new_rules) > 1: raise ext_sg.SecurityGroupNotSingleGroupRules() security_group_id = new_rules.pop() # Confirm single tenant and that the tenant has permission # to add rules to this security group. if len(tenant_ids) > 1: raise ext_sg.SecurityGroupRulesNotSingleTenant() for tenant_id in tenant_ids: self.get_security_group(context, security_group_id, tenant_id=tenant_id) return security_group_id def _make_security_group_rule_dict(self, security_group_rule, fields=None): res = {'id': security_group_rule['id'], 'tenant_id': security_group_rule['tenant_id'], 'security_group_id': security_group_rule['security_group_id'], 'ethertype': security_group_rule['ethertype'], 'direction': security_group_rule['direction'], 'protocol': security_group_rule['protocol'], 'port_range_min': security_group_rule['port_range_min'], 'port_range_max': security_group_rule['port_range_max'], 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], 'remote_group_id': security_group_rule['remote_group_id']} return self._fields(res, fields) def _make_security_group_rule_filter_dict(self, security_group_rule): sgr = security_group_rule['security_group_rule'] res = {'tenant_id': [sgr['tenant_id']], 'security_group_id': [sgr['security_group_id']], 'direction': [sgr['direction']]} include_if_present = ['protocol', 'port_range_max', 'port_range_min', 'ethertype', 'remote_ip_prefix', 'remote_group_id'] for key in include_if_present: value = sgr.get(key) if value: res[key] = [value] return res def _check_for_duplicate_rules(self, context, security_group_rules): for i in security_group_rules: found_self = False for j in security_group_rules: if i['security_group_rule'] == j['security_group_rule']: if found_self: raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i) found_self = True # Check in database if rule exists filters = self._make_security_group_rule_filter_dict(i) db_rules = self.get_security_group_rules(context, filters) # Note(arosen): the call to get_security_group_rules wildcards # values in the filter that have a value of [None]. For # example, filters = {'remote_group_id': [None]} will return # all security group rules regardless of their value of # remote_group_id. Therefore it is not possible to do this # query unless the behavior of _get_collection() # is changed which cannot be because other methods are already # relying on this behavior. Therefore, we do the filtering # below to check for these corner cases. for db_rule in db_rules: # need to remove id from db_rule for matching id = db_rule.pop('id') if (i['security_group_rule'] == db_rule): raise ext_sg.SecurityGroupRuleExists(id=id) def _validate_ip_prefix(self, rule): """Check that a valid cidr was specified as remote_ip_prefix No need to check that it is in fact an IP address as this is already validated by attribute validators. Check that rule ethertype is consistent with remote_ip_prefix ip type. Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32). """ input_prefix = rule['remote_ip_prefix'] if input_prefix: addr = netaddr.IPNetwork(input_prefix) # set input_prefix to always include the netmask: rule['remote_ip_prefix'] = str(addr) # check consistency of ethertype with addr version if rule['ethertype'] != "IPv%d" % (addr.version): raise ext_sg.SecurityGroupRuleParameterConflict( ethertype=rule['ethertype'], cidr=input_prefix) def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'security_group_rule', limit, marker) return self._get_collection(context, SecurityGroupRule, self._make_security_group_rule_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_security_group_rules_count(self, context, filters=None): return self._get_collection_count(context, SecurityGroupRule, filters=filters) def get_security_group_rule(self, context, id, fields=None): security_group_rule = self._get_security_group_rule(context, id) return self._make_security_group_rule_dict(security_group_rule, fields) def _get_security_group_rule(self, context, id): try: query = self._model_query(context, SecurityGroupRule) sgr = query.filter(SecurityGroupRule.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupRuleNotFound(id=id) return sgr def delete_security_group_rule(self, context, id): with context.session.begin(subtransactions=True): rule = self._get_security_group_rule(context, id) context.session.delete(rule) def _extend_port_dict_security_group(self, port_res, port_db): # Security group bindings will be retrieved from the sqlalchemy # model. As they're loaded eagerly with ports because of the # joined load they will not cause an extra query. security_group_ids = [sec_group_mapping['security_group_id'] for sec_group_mapping in port_db.security_groups] port_res[ext_sg.SECURITYGROUPS] = security_group_ids return port_res # Register dict extend functions for ports db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attr.PORTS, ['_extend_port_dict_security_group']) def _process_port_create_security_group(self, context, port, security_group_ids): if attr.is_attr_set(security_group_ids): for security_group_id in security_group_ids: self._create_port_security_group_binding(context, port['id'], security_group_id) # Convert to list as a set might be passed here and # this has to be serialized port[ext_sg.SECURITYGROUPS] = (security_group_ids and list(security_group_ids) or []) def _ensure_default_security_group(self, context, tenant_id): try: return self.__ensure_default_security_group(context, tenant_id) except db_exc.DBDuplicateEntry as e: with excutils.save_and_reraise_exception() as ctxt: if e.columns == ['tenant_id', 'name']: ctxt.reraise = False return self.__ensure_default_security_group(context, tenant_id) def __ensure_default_security_group(self, context, tenant_id): """Create a default security group if one doesn't exist. :returns: the default security group id. it should not be a subtransaction """ with context.session.begin(subtransactions=True): filters = {'name': ['default'], 'tenant_id': [tenant_id]} default_group = self.get_security_groups(context, filters, default_sg=True) if not default_group: security_group = {'security_group': {'name': 'default', 'tenant_id': tenant_id, 'description': 'default'}} ret = self.create_security_group(context, security_group, True) return ret['id'] else: return default_group[0]['id'] def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups IDs on port belonging to tenant. """ p = port['port'] if not attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)): return if p.get('device_owner') and p['device_owner'].startswith('network:'): return port_sg = p.get(ext_sg.SECURITYGROUPS, []) valid_groups = set(g['id'] for g in self.get_security_groups(context, fields=['id'], filters={'id': port_sg})) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise ext_sg.SecurityGroupNotFound(id=str(port_sg_missing[0])) return requested_groups def _ensure_default_security_group_on_port(self, context, port): # we don't apply security groups for dhcp, router if (port['port'].get('device_owner') and port['port']['device_owner'].startswith('network:')): return tenant_id = self._get_tenant_id_for_create(context, port['port']) default_sg = self._ensure_default_security_group(context, tenant_id) if attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): sgids = port['port'].get(ext_sg.SECURITYGROUPS) else: sgids = [default_sg] port['port'][ext_sg.SECURITYGROUPS] = sgids def _check_update_deletes_security_groups(self, port): """Return True if port has as a security group and it's value is either [] or not is_attr_set, otherwise return False """ if (ext_sg.SECURITYGROUPS in port['port'] and not (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False def _check_update_has_security_groups(self, port): """Return True if port has as a security group and False if the security_group field is is_attr_set or []. """ if (ext_sg.SECURITYGROUPS in port['port'] and (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False
CingHu/neutron-ustack
neutron/db/securitygroups_db.py
Python
apache-2.0
30,323
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from solum.api.handlers import assembly_handler from solum.objects import assembly from solum.tests import base from solum.tests import fakes from solum.tests import utils STATES = assembly.States @mock.patch('solum.objects.registry') class TestAssemblyHandler(base.BaseTestCase): def setUp(self): super(TestAssemblyHandler, self).setUp() self.ctx = utils.dummy_context() def test_assembly_get(self, mock_registry): mock_registry.return_value.Assembly.get_by_uuid.return_value = { 'plan_id': '1234' } handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.get('test_id') self.assertIsNotNone(res) get_by_uuid = mock_registry.Assembly.get_by_uuid get_by_uuid.assert_called_once_with(self.ctx, 'test_id') def test_assembly_get_all(self, mock_registry): mock_registry.AssemblyList.get_all.return_value = {} handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.get_all() self.assertIsNotNone(res) mock_registry.AssemblyList.get_all.assert_called_once_with(self.ctx) def test_update(self, mock_registry): data = {'user_id': 'new_user_id', 'plan_uuid': 'input_plan_uuid'} db_obj = fakes.FakeAssembly() mock_registry.Assembly.get_by_uuid.return_value = db_obj handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.update('test_id', data) self.assertEqual(db_obj.user_id, res.user_id) db_obj.save.assert_called_once_with(self.ctx) db_obj.update.assert_called_once_with(data) mock_registry.Assembly.get_by_uuid.assert_called_once_with(self.ctx, 'test_id') @mock.patch('solum.worker.api.API.build') @mock.patch('solum.common.solum_keystoneclient.KeystoneClientV3') def test_create(self, mock_kc, mock_build, mock_registry): data = {'user_id': 'new_user_id', 'uuid': 'input_uuid', 'plan_uuid': 'input_plan_uuid'} db_obj = fakes.FakeAssembly() mock_registry.Assembly.return_value = db_obj fp = fakes.FakePlan() mock_registry.Plan.get_by_id.return_value = fp fp.raw_content = { 'name': 'theplan', 'artifacts': [{'name': 'nodeus', 'artifact_type': 'heroku', 'content': { 'href': 'https://example.com/ex.git'}, 'language_pack': 'auto'}]} mock_registry.Image.return_value = fakes.FakeImage() trust_ctx = utils.dummy_context() trust_ctx.trust_id = '12345' mock_kc.return_value.create_trust_context.return_value = trust_ctx handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.create(data) db_obj.update.assert_called_once_with(data) db_obj.create.assert_called_once_with(self.ctx) self.assertEqual(db_obj, res) mock_build.assert_called_once_with( build_id=8, name='nodeus', assembly_id=8, source_uri='https://example.com/ex.git', test_cmd=None, base_image_id='auto', source_format='heroku', image_format='qcow2') mock_kc.return_value.create_trust_context.assert_called_once_with() @mock.patch('solum.common.solum_keystoneclient.KeystoneClientV3') @mock.patch('solum.deployer.api.API.delete_heat_stack') def test_delete(self, mock_deploy, mock_kc, mock_registry): db_obj = fakes.FakeAssembly() mock_registry.Assembly.get_by_uuid.return_value = db_obj handler = assembly_handler.AssemblyHandler(self.ctx) handler.delete('test_id') db_obj.save.assert_called_once_with(self.ctx) mock_registry.Assembly.get_by_uuid.assert_called_once_with(self.ctx, 'test_id') mock_kc.return_value.delete_trust.assert_called_once_with( 'trust_worthy') mock_deploy.assert_called_once_with(assem_id=db_obj.id) self.assertEqual(STATES.DELETING, db_obj.status) def test_trigger_workflow(self, mock_registry): trigger_id = 1 artifacts = [{"name": "Test", "artifact_type": "heroku", "content": {"href": "https://github.com/some/project"}, "language_pack": "auto"}] db_obj = fakes.FakeAssembly() mock_registry.Assembly.get_by_trigger_id.return_value = db_obj plan_obj = fakes.FakePlan() mock_registry.Plan.get_by_id.return_value = plan_obj plan_obj.raw_content = {"artifacts": artifacts} handler = assembly_handler.AssemblyHandler(self.ctx) handler._build_artifact = mock.MagicMock() handler._context_from_trust_id = mock.MagicMock(return_value=self.ctx) handler.trigger_workflow(trigger_id) handler._build_artifact.assert_called_once_with(db_obj, artifacts[0]) handler._context_from_trust_id.assert_called_once_with('trust_worthy') mock_registry.Assembly.get_by_trigger_id.assert_called_once_with( None, trigger_id) mock_registry.Plan.get_by_id.assert_called_once_with(self.ctx, db_obj.plan_id)
gilbertpilz/solum
solum/tests/api/handlers/test_assembly.py
Python
apache-2.0
5,985
#!/usr/bin/env python # -*- coding: utf-8 -*- from threading import Thread import unreal_engine as ue import ue_site # Google Assistant imports from googlesdk.assistant.embedded.v1alpha1 import embedded_assistant_pb2 from googlesamples.assistant import common_settings # General Google imports from google.rpc import code_pb2 END_OF_UTTERANCE = embedded_assistant_pb2.ConverseResponse.END_OF_UTTERANCE DIALOG_FOLLOW_ON = embedded_assistant_pb2.ConverseResult.DIALOG_FOLLOW_ON CLOSE_MICROPHONE = embedded_assistant_pb2.ConverseResult.CLOSE_MICROPHONE class ThreadedAssistant(Thread): def __init__(self): # Opaque blob provided in ConverseResponse that, # when provided in a follow-up ConverseRequest, # gives the Assistant a context marker within the current state # of the multi-Converse()-RPC "conversation". # This value, along with MicrophoneMode, supports a more natural # "conversation" with the Assistant. self.conversation_state = None # Create Google Assistant API gRPC client. self.deadline = common_settings.DEFAULT_GRPC_DEADLINE Thread.__init__(self) def __enter__(self): return self def __exit__(self, etype, e, traceback): if e: return False ue_site.conversation_stream.close() def is_grpc_error_unavailable(e): is_grpc_error = isinstance(e, grpc.RpcError) if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE): ue.log_error('grpc unavailable error: %s', e) return True return False def run(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False ue_site.conversation_stream.start_recording() ue.log('Recording audio request.') # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in ue_site.assistant.Converse(self.gen_converse_requests(), self.deadline): # Something went wrong if resp.error.code != code_pb2.OK: ue.log_error('Server error: ' + str(resp.error.message)) break # Detected the user is done talking if resp.event_type == END_OF_UTTERANCE: ue.log('End of audio request detected') ue_site.conversation_stream.stop_recording() # We parsed what the user said if resp.result.spoken_request_text: ue.log('Transcript of user request: ' + str(resp.result.spoken_request_text)) # We have a response ready to play out the speakers if len(resp.audio_out.audio_data) > 0: ue_site.conversation_stream.write(resp.audio_out.audio_data) # We have an updated conversation state if resp.result.conversation_state: self.conversation_state = resp.result.conversation_state # Volume level needs to be updated if resp.result.volume_percentage != 0: ue_site.conversation_stream.volume_percentage = ( resp.result.volume_percentage ) # Check if user should reply if resp.result.microphone_mode == DIALOG_FOLLOW_ON: # Expecting user to reply continue_conversation = True ue.log('Expecting follow-on query from user.') elif resp.result.microphone_mode == CLOSE_MICROPHONE: # Not expecting user to reply continue_conversation = False ue.log('Finished playing assistant response.') ue_site.conversation_stream.stop_playback() return continue_conversation def gen_converse_requests(self): """Generates ConverseRequest messages to send to the API. This happens over multiple frames, so it should be run in a separate thread. Otherwise it WILL lock up the game thread while it's "thinking." """ converse_state = None if self.conversation_state: ue.log('Sending converse_state: '+ str(self.conversation_state)) converse_state = embedded_assistant_pb2.ConverseState( conversation_state=self.conversation_state, ) # Generate the config for the assistant config = embedded_assistant_pb2.ConverseConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=ue_site.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=ue_site.conversation_stream.sample_rate, volume_percentage=ue_site.conversation_stream.volume_percentage, ), converse_state=converse_state ) # The first ConverseRequest must contain the ConverseConfig # and no audio data. yield embedded_assistant_pb2.ConverseRequest(config=config) # Below, we actually activate the microphone and begin recording. for data in ue_site.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.ConverseRequest(audio_in=data) ue_site.conversation_stream.start_playback()
Jay2645/Unreal-Google-Assistant
Content/Scripts/threaded_assistant.py
Python
apache-2.0
4,927
""" Support for Wink binary sensors. For more details about this platform, please refer to the documentation at at https://home-assistant.io/components/binary_sensor.wink/ """ import asyncio import logging from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.components.wink import DOMAIN, WinkDevice _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['wink'] # These are the available sensors mapped to binary_sensor class SENSOR_TYPES = { 'brightness': 'light', 'capturing_audio': 'sound', 'capturing_video': None, 'co_detected': 'gas', 'liquid_detected': 'moisture', 'loudness': 'sound', 'motion': 'motion', 'noise': 'sound', 'opened': 'opening', 'presence': 'occupancy', 'smoke_detected': 'smoke', 'vibration': 'vibration', } def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Wink binary sensor platform.""" import pywink for sensor in pywink.get_sensors(): _id = sensor.object_id() + sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: if sensor.capability() in SENSOR_TYPES: add_entities([WinkBinarySensorDevice(sensor, hass)]) for key in pywink.get_keys(): _id = key.object_id() + key.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkBinarySensorDevice(key, hass)]) for sensor in pywink.get_smoke_and_co_detectors(): _id = sensor.object_id() + sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkSmokeDetector(sensor, hass)]) for hub in pywink.get_hubs(): _id = hub.object_id() + hub.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkHub(hub, hass)]) for remote in pywink.get_remotes(): _id = remote.object_id() + remote.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkRemote(remote, hass)]) for button in pywink.get_buttons(): _id = button.object_id() + button.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkButton(button, hass)]) for gang in pywink.get_gangs(): _id = gang.object_id() + gang.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkGang(gang, hass)]) for door_bell_sensor in pywink.get_door_bells(): _id = door_bell_sensor.object_id() + door_bell_sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkBinarySensorDevice(door_bell_sensor, hass)]) for camera_sensor in pywink.get_cameras(): _id = camera_sensor.object_id() + camera_sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: try: if camera_sensor.capability() in SENSOR_TYPES: add_entities([WinkBinarySensorDevice(camera_sensor, hass)]) except AttributeError: _LOGGER.info("Device isn't a sensor, skipping") class WinkBinarySensorDevice(WinkDevice, BinarySensorDevice): """Representation of a Wink binary sensor.""" def __init__(self, wink, hass): """Initialize the Wink binary sensor.""" super().__init__(wink, hass) if hasattr(self.wink, 'unit'): self._unit_of_measurement = self.wink.unit() else: self._unit_of_measurement = None if hasattr(self.wink, 'capability'): self.capability = self.wink.capability() else: self.capability = None @asyncio.coroutine def async_added_to_hass(self): """Call when entity is added to hass.""" self.hass.data[DOMAIN]['entities']['binary_sensor'].append(self) @property def is_on(self): """Return true if the binary sensor is on.""" return self.wink.state() @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" return SENSOR_TYPES.get(self.capability) @property def device_state_attributes(self): """Return the device state attributes.""" return super().device_state_attributes class WinkSmokeDetector(WinkBinarySensorDevice): """Representation of a Wink Smoke detector.""" @property def device_state_attributes(self): """Return the device state attributes.""" _attributes = super().device_state_attributes _attributes['test_activated'] = self.wink.test_activated() return _attributes class WinkHub(WinkBinarySensorDevice): """Representation of a Wink Hub.""" @property def device_state_attributes(self): """Return the device state attributes.""" _attributes = super().device_state_attributes _attributes['update_needed'] = self.wink.update_needed() _attributes['firmware_version'] = self.wink.firmware_version() _attributes['pairing_mode'] = self.wink.pairing_mode() _kidde_code = self.wink.kidde_radio_code() if _kidde_code is not None: # The service call to set the Kidde code # takes a string of 1s and 0s so it makes # sense to display it to the user that way _formatted_kidde_code = "{:b}".format(_kidde_code).zfill(8) _attributes['kidde_radio_code'] = _formatted_kidde_code return _attributes class WinkRemote(WinkBinarySensorDevice): """Representation of a Wink Lutron Connected bulb remote.""" @property def device_state_attributes(self): """Return the state attributes.""" _attributes = super().device_state_attributes _attributes['button_on_pressed'] = self.wink.button_on_pressed() _attributes['button_off_pressed'] = self.wink.button_off_pressed() _attributes['button_up_pressed'] = self.wink.button_up_pressed() _attributes['button_down_pressed'] = self.wink.button_down_pressed() return _attributes @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" return None class WinkButton(WinkBinarySensorDevice): """Representation of a Wink Relay button.""" @property def device_state_attributes(self): """Return the device state attributes.""" _attributes = super().device_state_attributes _attributes['pressed'] = self.wink.pressed() _attributes['long_pressed'] = self.wink.long_pressed() return _attributes class WinkGang(WinkBinarySensorDevice): """Representation of a Wink Relay gang.""" @property def is_on(self): """Return true if the gang is connected.""" return self.wink.state()
persandstrom/home-assistant
homeassistant/components/binary_sensor/wink.py
Python
apache-2.0
6,764
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys, glob import logging import random import os sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/gen-py') from radical_interface import RadicalPilotInterface import radical.pilot as rp from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer def extract_configs(task_file): configs = {} index = 0 task_desc = open(task_file, 'r').readlines() # Set some default pilot confs pilot_confs = {'mongodb' : 'mongodb://127.0.0.1:50055', 'userpass' : 'userpass', 'cleanup' : False} while index < len(task_desc): if (task_desc[index].startswith("attr.radical-pilot.")): l = len("attr.radical-pilot.") [key,value] = task_desc[index][l:].strip('\n').split("=") pilot_confs[key]= value index += 1 configs['pilot_confs'] = pilot_confs print "Extracted configs : ", configs return configs def pilot_state_cb (pilot, state) : print "[Callback]: ComputePilot '%s' state: %s." % (pilot.uid, state) if not pilot: return if state == rp.FAILED : sys.exit (1) def unit_state_cb (unit, state) : if not unit: return print "[Callback]: ComputeUnit '%s' state: %s." % (unit.uid, state) def rp_radical_init (configs): print "[rp_radical_init]" try: session = rp.Session(database_url=configs['pilot_confs']['mongodb']) c = rp.Context(configs['pilot_confs']['userpass']) session.add_context(c) print "Initializing Pilot Manager ..." pmgr = rp.PilotManager(session=session) pmgr.register_callback(pilot_state_cb) # Combine the ComputePilot, the ComputeUnits and a scheduler via # a UnitManager object. print "Initializing Unit Manager ..." umgr = rp.UnitManager (session=session, scheduler=rp.SCHED_DIRECT_SUBMISSION) # Register our callback with the UnitManager. This callback will get # called every time any of the units managed by the UnitManager # change their state. umgr.register_callback(unit_state_cb) pdesc = rp.ComputePilotDescription () pdesc.resource = configs['pilot_confs']['resource'] pdesc.runtime = int(configs['pilot_confs']['runtime']) pdesc.cores = int(configs['pilot_confs']['cores']) pdesc.cleanup = True if configs['pilot_confs']['cleanup'] in ["true", "True"] else False # submit the pilot. print "Submitting Compute Pilot to Pilot Manager ..." pilot = pmgr.submit_pilots(pdesc) # Add the created ComputePilot to the UnitManager. print "Registering Compute Pilot with Unit Manager ..." umgr.add_pilots(pilot) #session = "session_name" #pmgr = "pmgr_foo" #umgr = "umpr_blah" return [session, pmgr, umgr] except Exception as e: print "An error occurred: %s" % ((str(e))) sys.exit (-1) def filepath_cleanup(filepath): fpath = filepath.strip('\n') if fpath.startswith('file://localhost/'): l = len('file://localhost/') fpath = fpath[l:] return fpath def rp_compose_compute_unit(task_filename): task_desc = open(task_filename, 'r').readlines() index = 0 args = [] stageins = [] stageouts = [] env_vars = {} while index < len(task_desc): # We don't process directory options. if (task_desc[index].startswith("directory=")): l = len("directory=") elif (task_desc[index].startswith("env.")): l = len("env.") [key,value] = task_desc[index][l:].strip('\n').split("=") env_vars[key] = value elif (task_desc[index].startswith("executable=")): l = len("executable=") executable = task_desc[index][l:].strip('\n') elif (task_desc[index].startswith("arg=")): l = len("arg=") args.append(task_desc[index][l:].strip('\n')) elif (task_desc[index].startswith("stagein.source=")): stagein_item = {} l = len("stagein.source=") stagein_item['source'] = filepath_cleanup(task_desc[index][l:]) index += 1 if (task_desc[index].startswith("stagein.destination=")): l = len("stagein.destination=") stagein_item['destination'] = filepath_cleanup(task_desc[index][l:]) index += 1 if (task_desc[index].startswith("stagein.mode=")): l = len("stagein.mode=") # Ignore mode for now #stagein_item['destination'] = task_desc[index][l:].strip('\n') #index += 1 else: index -= 1 else: printf("[ERROR] Stagein source must have a destination") stageins.append(stagein_item) elif (task_desc[index].startswith("stageout.source=")): stageout_item = {} l = len("stageout.source=") stageout_item['source'] = filepath_cleanup(task_desc[index][l:]) index += 1 if (task_desc[index].startswith("stageout.destination=")): l = len("stageout.destination=") stageout_item['destination'] = filepath_cleanup(task_desc[index][l:]) index += 1 if (task_desc[index].startswith("stageout.mode=")): l = len("stageout.mode=") # Ignore mode for now #stageout_item['destination'] = task_desc[index][l:].strip('\n') #index += 1 else: index -= 1 else: printf("[ERROR] Stageout source must have a destination") stageouts.append(stageout_item) else: logging.debug("ignoring option : {0}".format(task_desc[index].strip('\n'))) index += 1 logging.debug("ARGS : {0}".format(args)) logging.debug("EXEC : {0}".format(executable)) logging.debug("STAGEINS : {0}".format(stageins)) logging.debug("STAGEOUTS : {0}".format(stageouts)) cudesc = rp.ComputeUnitDescription() cudesc.environment = env_vars cudesc.executable = executable cudesc.arguments = args cudesc.cores = 1 cudesc.input_staging = stageins cudesc.output_staging = stageouts return [cudesc] def rp_submit_task(unit_manager, task_filename): cu_desc = rp_compose_compute_unit(task_filename) c_unit = unit_manager.submit_units(cu_desc) return c_unit class RadicalPilotHandler: def __init__(self): self.session = 'NULL' self.pmgr = 'NULL' self.umgr = 'NULL' self.log = {} self.configs = {} #self.rp_lock = threading.Lock() self.task_lookup = {} self.session = 'NULL' logging.debug("Init done") def submit_task(self, task_filename): print "[SUBMIT_TASK] :", task_filename # If self.configs is empty, this is the first task, which requires # radical pilots to be setup if not self.configs : logging.debug("[SUBMIT_TASK] : Starting radical.pilots") self.configs = extract_configs(task_filename) logging.debug("Extracting configs done") [self.session, self.pmgr, self.umgr] = rp_radical_init(self.configs) print [self.session, self.pmgr, self.umgr] logging.debug("done with radical_init") cu_list = rp_submit_task(self.umgr, task_filename) print cu_list[0] hash_id = str(len(self.task_lookup)) self.task_lookup[hash_id] = cu_list[0] return hash_id def cancel_task(self, task_name): logging.debug("Cancelling task :" + task_name) return "Cancelled task" def status_task(self, task_name): radical_states = { 'PendingExecution' : 'Q', 'Scheduling' : 'Q', 'Executing' : 'R', 'Done' : 'C', 'Failed' : 'F' } if task_name not in self.task_lookup: return str(task_name) + " F -1 Task id not in the Radical Pilot lookup registry" state = self.task_lookup[task_name].state if state not in radical_states : logging.debug( "[DEBUG] task_name:" + task_name + " state: " + state) return str(task_name) + " Q" logging.debug("[DEBUG] task_name:{0} state:{1}".format(task_name, state)) return str(task_name) + " " + radical_states[state] def server_die(self, die_string): logging.debug("Server terminating. Received message: " + die_string) exit(0) def getStruct(self, key): print 'getStruct(%d)' % (key) return self.log[key] def zip(self): print 'zip()' # Start logging if ( len(sys.argv) < 2 ): print "[ERROR] Missing log_file argument" logging.basicConfig(filename=sys.argv[1], level=logging.DEBUG) logging.debug('Starting the server...') handler = RadicalPilotHandler() processor = RadicalPilotInterface.Processor(handler) transport = TSocket.TServerSocket(port=9090) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() server = TServer.TSimpleServer(processor, transport, tfactory, pfactory) # You could do one of these for a multithreaded server #server = TServer.TThreadedServer(processor, transport, tfactory, pfactory) #server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory) server.serve() logging.debug('done.')
radical-cybertools/aimes.swiftrp
thrift_tests/server.py
Python
apache-2.0
9,371
#!/usr/bin/env python2.7 from __future__ import absolute_import from __future__ import division from __future__ import print_function from argparse import ArgumentParser from collections import OrderedDict from contextlib import contextmanager import json import logging import multiprocessing import os import shutil import subprocess import sys import cv2 import numpy LOG_LEVELS = ( logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG ) LOG_LEVEL_TO_NAMES = OrderedDict((level, logging.getLevelName(level).lower()) for level in LOG_LEVELS) LOG_NAME_TO_LEVEL = OrderedDict((name, level) for level, name in LOG_LEVEL_TO_NAMES.items()) VIDEO_EXTENSION = 'webm' YOUTUBE_VIDEO_FORMAT = '242' YOUTUBE_AUDIO_FORMAT = '171' THRESHOLD = 30 CLIPS_OUTPUT_DIR = os.path.join('html', 'clips') MIN_CLIP_LENGTH = 1 MAX_CLIP_LENGTH = 5 LISTINGS_PATH = os.path.join('html', 'listings.json') def main(argv=None): args = parse_args(argv=argv) configure_logger(args) command = args.command if command == 'bulk': bulk(args) elif command == 'download': download_trailer(args) elif command == 'find': find_scenes(args) elif command == 'render': render_clips(args) elif command == 'listing': make_listing(args) else: raise RuntimeError('Invalid command {}'.format(args.command)) def parse_args(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser() parser.add_argument('-l', '--log-level', choices=LOG_NAME_TO_LEVEL.keys(), default=LOG_LEVEL_TO_NAMES[logging.INFO]) subparsers = parser.add_subparsers(dest='command') bulk = subparsers.add_parser('bulk') bulk.add_argument('-m', '--max-length', default=MAX_CLIP_LENGTH, type=float) bulk.add_argument('-n', '--min-length', default=MIN_CLIP_LENGTH, type=float) bulk.add_argument('-c', '--trailers_config_path', default='trailers.json') bulk.add_argument('-l', '--listings_path', default=LISTINGS_PATH) bulk.add_argument('-o', '--trailers_output_dir', default='trailers') bulk.add_argument('-s', '--scenes_output_dir', default='scenes') bulk.add_argument('-t', '--clips_output_dir', default=CLIPS_OUTPUT_DIR) bulk.add_argument('-d', '--download', dest='download', action='store_true') bulk.add_argument('-D', '--skip-download', dest='download', action='store_false') bulk.set_defaults(download=True) bulk.add_argument('-u', '--search-scenes', dest='search_scenes', action='store_true') bulk.add_argument('-U', '--skip-search-scenes', dest='search_scenes', action='store_false') bulk.set_defaults(search_scenes=True) bulk.add_argument('-r', '--render', dest='render', action='store_true') bulk.add_argument('-R', '--skip-render', dest='render', action='store_false') bulk.set_defaults(render=True) download = subparsers.add_parser('download') download.add_argument('youtube_id') download.add_argument('output_filename') download.add_argument('-v', '--video_format', default=YOUTUBE_VIDEO_FORMAT) download.add_argument('-a', '--audio_format', default=YOUTUBE_AUDIO_FORMAT) find = subparsers.add_parser('find') find.add_argument('-t', '--threshold', default=THRESHOLD, type=int) find.add_argument('video_path') find.add_argument('output_dir') render = subparsers.add_parser('render') render.add_argument('-m', '--max-length', default=MAX_CLIP_LENGTH, type=float) render.add_argument('-n', '--min-length', default=MIN_CLIP_LENGTH, type=float) render.add_argument('scenes_path') render.add_argument('video_path') render.add_argument('output_dir') listing = subparsers.add_parser('listing') listing.add_argument('clips_dir') listing.add_argument('listing_path') return parser.parse_args(args=argv[1:]) def configure_logger(args): global logger logging.basicConfig(datefmt='%H:%M:%S', format='[%(levelname).1s %(asctime)s] %(message)s', level=LOG_NAME_TO_LEVEL[args.log_level]) logger = logging.getLogger(__name__) def bulk(args): with open(args.trailers_config_path) as trailers_config_file: trailers_config = json.load(trailers_config_file) trailers_output_dir = args.trailers_output_dir ensure_dir(trailers_output_dir) scenes_output_dir = args.scenes_output_dir ensure_dir(scenes_output_dir) clips_output_dir = args.clips_output_dir ensure_dir(clips_output_dir) # XXX: Only run task so OpenCV doesn't corrupt itself up, had problems when # opening another video in the same process, would open the video and # immediately close. pool = multiprocessing.Pool(maxtasksperchild=1) for trailer in trailers_config['trailers']: pool.apply_async(create_clips_for_trailer, [trailer, trailers_output_dir, scenes_output_dir, clips_output_dir, args.download, args.search_scenes]) pool.close() pool.join() for trailer in trailers_config['trailers']: video_path = get_video_file_name(trailers_output_dir, trailer['name']) scene_file = get_scenes_file_name(video_path, scenes_output_dir) if args.render: _render_clips(video_path, clips_output_dir, scene_file, min_length=args.min_length, max_length=args.max_length) _make_listing(os.path.join(clips_output_dir, '..')) def get_video_file_name(output_dir, name): return os.path.join(output_dir, name) def create_clips_for_trailer(trailer, trailers_output_dir, scenes_output_dir, clips_output_dir, download=True, search_scenes=True): output_path = get_video_file_name(trailers_output_dir, trailer['name']) if download: _download_trailer(output_path, trailer['youtube_id']) logger.info('Searching %s', output_path) if search_scenes: _find_scenes(output_path, scenes_output_dir) def download_trailer(args): _download_trailer(args.output_filename, args.youtube_id, video_format=args.video_format, audio_format=args.audio_format) def _download_trailer( output_filename, youtube_id, video_format=YOUTUBE_VIDEO_FORMAT, audio_format=YOUTUBE_AUDIO_FORMAT): logger.info('Downloading %s ...', output_filename) subprocess.check_call([ 'youtube-dl', '-o', '{}'.format(output_filename), 'https://www.youtube.com/watch?v={}'.format(youtube_id), '-f', '{}+{}'.format(video_format, audio_format) ]) # XXX: youtube-dl leaves some artifacts of the audio and video streams it # downloaded so we'll delete them. def unlink_download_artifacts(output_filename, dl_format): extension = os.path.splitext(output_filename)[1] output_dir = os.path.dirname(os.path.realpath(output_filename)) output_basename = os.path.basename(os.path.realpath(output_filename)) basename = os.path.splitext(output_basename)[0] artifact = '{}.f{}{}'.format(basename, dl_format, extension) os.unlink(os.path.join(output_dir, artifact)) unlink_download_artifacts(output_filename, video_format) unlink_download_artifacts(output_filename, audio_format) def find_scenes(args): _find_scenes(args.video_path, args.output_dir, threshold=args.threshold) def get_scenes_file_name(video_path, output_dir): video_name = os.path.basename(video_path) video_stem, video_ext = os.path.splitext(video_name) scenes_name = '{stem}.json'.format(stem=video_stem) return os.path.join(output_dir, scenes_name) def _find_scenes(video_path, output_dir, threshold=THRESHOLD): ensure_dir(output_dir) scenes_path = get_scenes_file_name(video_path, output_dir) with video_capture(video_path) as cap: scene_splitter = SceneFinder(cap, threshold) scenes = scene_splitter.find_scenes() if len(scenes) == 0: logger.error('No scenes found for %s' % video_path) with open(scenes_path, 'w') as scenes_file: json.dump(scenes, scenes_file) return scenes_path @contextmanager def video_capture(vido_path): cap = cv2.VideoCapture(vido_path) yield cap cap.release() class SceneFinder(object): def __init__(self, cap, threshold): self._cap = cap self._threshold = threshold self._find_scenes_called = False self._in_fade = False self._scenes = [] self._start_index = 0 self._video_width = self._get_int_prop('FRAME_WIDTH') self._video_height = self._get_int_prop('FRAME_HEIGHT') self._video_fps = self._get_int_prop('FPS') def _get_int_prop(self, prop_name): name = 'CV_CAP_PROP_{prop_name}'.format(prop_name=prop_name) return int(self._cap.get(getattr(cv2.cv, name))) def find_scenes(self): if not self._find_scenes_called: self._find_scenes_called = True for index, frame in enumerate(self._frames()): self._check_frame(index, frame) return self._scenes def _frames(self): while True: ret, frame = self._cap.read() if not ret: logger.info('Stopping on frame %d' % self._start_index) if self._start_index == 0: logger.error('Not able to read any frames') raise StopIteration yield cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) def _check_frame(self, index, frame): if self._count_light_pixels(frame) == 0: if not self._in_fade: self._in_fade = True self._add_frame(self._start_index, index) elif self._in_fade: self._in_fade = False self._start_index = index def _count_light_pixels(self, frame): return numpy.count_nonzero(frame > self._threshold) def _add_frame(self, start_index, stop_index): def timestamp(index): return index / self._video_fps scene = (timestamp(start_index), timestamp(stop_index)) logger.info('Scene: %.1f %.1f', *scene) self._scenes.append(scene) def render_clips(args): _render_clips( args.video_path, args.output_dir, args.scenes_path, min_length=args.min_length, max_length=args.max_length ) def _render_clips(video_path, output_dir, scenes_path, min_length=MIN_CLIP_LENGTH, max_length=MAX_CLIP_LENGTH): video_name = os.path.basename(video_path) video_stem, video_ext = os.path.splitext(video_name) clips_dir = os.path.join(output_dir, video_stem) ensure_dir(output_dir) if os.path.isdir(clips_dir): shutil.rmtree(clips_dir) os.mkdir(clips_dir) with open(scenes_path) as scenes_file: scenes = json.load(scenes_file) def min_max_length(scene): return min_length < scene[1] - scene[0] < max_length scenes = filter(min_max_length, scenes) pool = multiprocessing.Pool() for index, (start_time, stop_time) in enumerate(scenes): clip_name = '{}-{}.{}'.format(video_stem, index, VIDEO_EXTENSION) clip_path = os.path.join(clips_dir, clip_name) if os.path.exists(clip_path): os.remove(clip_path) pool.apply_async(render_clip, [video_path, clip_path, start_time, stop_time]) pool.close() pool.join() def render_clip(video_path, clip_path, start_time, stop_time): logger.info('Rendering %s ...', clip_path) subprocess.check_call([ '/usr/bin/ffmpeg', '-ss', str(start_time), '-t', str(stop_time - start_time), '-i', video_path, '-c:v', 'libvpx', '-c:a', 'libvorbis', clip_path, ]) def ensure_dir(path): if not os.path.exists(path): os.mkdir(path) def make_listing(args): _make_listing(args.clips_dir, listing_path=args.listing_path) def _make_listing(clips_dir, listing_path=LISTINGS_PATH): listing = {'videos': []} for root, dirs, files in os.walk(clips_dir): for file_ in files: if os.path.splitext(file_)[1] != '.{}'.format(VIDEO_EXTENSION): continue common_prefix = os.path.commonprefix([clips_dir, root]) path = os.path.join(root[len(common_prefix) + 1:], file_) listing['videos'].append(path) with open(listing_path, 'w') as listing_file: json.dump(listing, listing_file) if __name__ == '__main__': sys.exit(main())
mariosgohan/infinite-trailer
infinite_trailer.py
Python
apache-2.0
12,916
# Copyright (C) 2016 Ross Wightman. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # ============================================================================== from __future__ import print_function from six import iteritems from cv_bridge import CvBridge, CvBridgeError from collections import defaultdict import os import sys import fnmatch import subprocess import cv2 import yaml import rosbag import datetime SEC_PER_NANOSEC = 1e9 MIN_PER_NANOSEC = 6e10 LEFT_CAMERA_TOPIC = "/left_camera/image_color" CENTER_CAMERA_TOPIC = "/center_camera/image_color" RIGHT_CAMERA_TOPIC = "/right_camera/image_color" LEFT_CAMERA_COMPRESSED_TOPIC = LEFT_CAMERA_TOPIC + "/compressed" CENTER_CAMERA_COMPRESSED_TOPIC = CENTER_CAMERA_TOPIC + "/compressed" RIGHT_CAMERA_COMPRESSED_TOPIC = RIGHT_CAMERA_TOPIC + "/compressed" CAMERA_TOPICS = [LEFT_CAMERA_TOPIC, CENTER_CAMERA_TOPIC, RIGHT_CAMERA_TOPIC, LEFT_CAMERA_COMPRESSED_TOPIC, CENTER_CAMERA_COMPRESSED_TOPIC, RIGHT_CAMERA_COMPRESSED_TOPIC] CENTER_CAMERA_TOPICS = [CENTER_CAMERA_TOPIC, CENTER_CAMERA_COMPRESSED_TOPIC] STEERING_TOPIC = "/vehicle/steering_report" GPS_FIX_TOPIC = "/vehicle/gps/fix" GPS_FIX_NEW_TOPIC = "/fix" WHEEL_SPEED_TOPIC = "/vehicle/wheel_speed_report" THROTTLE_TOPIC = "/vehicle/throttle_report" BRAKE_TOPIC = "/vehicle/brake_report" GEAR_TOPIC = "/vehicle/gear_report" IMU_TOPIC = "/vehicle/imu/data_raw" OTHER_TOPICS = [ WHEEL_SPEED_TOPIC, THROTTLE_TOPIC, BRAKE_TOPIC, GEAR_TOPIC, IMU_TOPIC] CAMERA_REMAP_LCCL = { LEFT_CAMERA_TOPIC: CENTER_CAMERA_TOPIC, LEFT_CAMERA_COMPRESSED_TOPIC: CENTER_CAMERA_COMPRESSED_TOPIC, CENTER_CAMERA_TOPIC: LEFT_CAMERA_TOPIC, CENTER_CAMERA_COMPRESSED_TOPIC: LEFT_CAMERA_COMPRESSED_TOPIC, 'left_camera': 'center_camera', 'center_camera': 'left_camera', } def check_remap_hack(filename): if fnmatch.fnmatch(filename, "2016-10-25*.bag"): print(filename, 'matches remap hack.') return CAMERA_REMAP_LCCL else: return {} def get_bag_info(bag_file, nanosec=True): info = yaml.load(subprocess.Popen( ['rosbag', 'info', '--yaml', bag_file], stdout=subprocess.PIPE).communicate()[0]) if nanosec: if 'start' in info: info['start'] = int(info['start']*1e9) if 'end' in info: info['end'] = int(info['end']*1e9) if 'duration' in info: info['duration'] = int(info['duration']*1e9) return info def get_topic_names(bag_info_yaml): topic_names = [] topics = bag_info_yaml['topics'] for t in topics: topic_names.append(t['topic']) return topic_names def ns_to_str(timestamp_ns): secs = timestamp_ns / 1e9 dt = datetime.datetime.fromtimestamp(secs) return dt.strftime('%Y-%m-%dT%H:%M:%S.%f') class BagReader(object): def __init__(self, bagfiles, topics, remap_camera={}): self.bagfiles = bagfiles self.topics = topics self._remap_camera = remap_camera def read_messages(self): for f in self.bagfiles: with rosbag.Bag(f, "r") as bag: for topic, msg, _ in bag.read_messages(topics=self.topics): if self._remap_camera and topic in self._remap_camera: topic = self._remap_camera[topic] msg.header.frame_id = self._remap_camera[msg.header.frame_id] yield topic, msg JOIN_THRESH_NS = 10 * MIN_PER_NANOSEC class BagSet(object): def __init__(self, name, bagfiles, filter_topics, remap_camera={}): self.name = name self.files = sorted(bagfiles) self.infos = [] self.topic_map = defaultdict(list) self.start_time = None self.end_time = None self._remap_camera = remap_camera self._process_infos(filter_topics) def _process_infos(self, filter_topics): for f in self.files: print("Extracting bag info %s" % f) sys.stdout.flush() info = get_bag_info(f) if 'start' not in info or 'end' not in info: print('Ignoring info %s without start/end time' % info['path']) continue if self._remap_camera and check_remap_hack(os.path.basename(f)): info['remap'] = self._remap_camera info_start = info['start'] info_end = info['end'] if not self.start_time or not self.end_time: self._extend_range(info_start, info_end) elif (info_start - JOIN_THRESH_NS) <= self.end_time and self.start_time <= (info_end + JOIN_THRESH_NS): self._extend_range(info_start, info_end) else: print('Orphaned bag info time range, are there multiple datasets in same folder?') continue self.infos.append(info) if self._remap_camera: filter_topics = self._filter_topics_remap(filter_topics) filtered = [x['topic'] for x in info['topics'] if not filter_topics or x['topic'] in filter_topics] gps_fix_replace = False if GPS_FIX_NEW_TOPIC in filtered and GPS_FIX_TOPIC in filtered: print("New GPS fix topic %s replacing old %s" % (GPS_FIX_NEW_TOPIC, GPS_FIX_TOPIC)) gps_fix_replace = True for x in filtered: if gps_fix_replace and x == GPS_FIX_TOPIC: # skip old gps topic continue self.topic_map[x].append((info['start'], info['path'])) self.topic_map[x] = sorted(self.topic_map[x]) def _extend_range(self, start_time, end_time): if not self.start_time or start_time < self.start_time: self.start_time = start_time if not self.end_time or end_time > self.end_time: self.end_time = end_time def _filter_topics_remap(self, filters): return [self._remap_camera[x] if x in self._remap_camera else x for x in filters] def write_infos(self, dest): for info in self.infos: info_path = os.path.splitext(os.path.basename(info['path']))[0] write_file = os.path.join(dest, info_path + '.yaml') with open(write_file, 'w') as f: yaml.dump(info, f) def get_message_count(self, topic_filter=[]): count = 0 for info in self.infos: if self._remap_camera: topic_filter = self._filter_topics_remap(topic_filter) filtered = [x['topic'] for x in info['topics'] if not topic_filter or x['topic'] in topic_filter] gps_fix_replace = False if GPS_FIX_NEW_TOPIC in filtered and GPS_FIX_TOPIC in filtered: gps_fix_replace = True for topic in info['topics']: if ((not topic_filter or topic['topic'] in topic_filter) and (not gps_fix_replace or topic['topic'] != GPS_FIX_TOPIC)): count += topic['messages'] return count def get_readers(self): readers = [] for topic, timestamp_files in iteritems(self.topic_map): starts, files = zip(*timestamp_files) merged = False for r in readers: if r.bagfiles == files: r.topics.append(topic) merged = True if not merged: readers.append(BagReader(bagfiles=files, topics=[topic], remap_camera=self._remap_camera)) return readers def __repr__(self): return "start: %s, end: %s, topic_map: %s" % (self.start_time, self.end_time, str(self.topic_map)) def find_bagsets(directory, filter_topics=[], pattern="*.bag"): sets = [] for root, dirs, files in os.walk(directory): matched_files = [] remap_camera = {} for basename in files: if fnmatch.fnmatch(basename, pattern): if not remap_camera: remap_camera = check_remap_hack(basename) filename = os.path.join(root, basename) matched_files.append(filename) if matched_files: set_name = os.path.relpath(root, directory) bag_set = BagSet(set_name, matched_files, filter_topics, remap_camera) sets.append(bag_set) return sets class BagCursor(object): def __init__(self, reader): self.latest_timestamp = None self.read_count = 0 self.done = False self.vals = [] self.reader = reader self._iter = reader.read_messages() def __bool__(self): return not self.done __nonzero__ = __bool__ # Advance cursor by one element, store element vals list def advance(self, n=1): if self.done: return False try: while n > 0: topic, msg = next(self._iter) self.read_count += 1 timestamp = msg.header.stamp.to_nsec() if not self.latest_timestamp or timestamp > self.latest_timestamp: self.latest_timestamp = timestamp self.vals.append((timestamp, topic, msg)) n -= 1 except StopIteration: self.done = True return not self.done # Advance cursor by relative time duration in nanoseconds def advance_by(self, duration_ns): if not self.latest_timestamp and not self.advance(): return False start_time_ns = self.latest_timestamp while self.advance(): elapsed = self.latest_timestamp - start_time_ns if elapsed >= duration_ns: break return not self.done # Advance cursor until specified absolute time in nanoseconds def advance_until(self, end_time_ns): while self.advance(): if self.latest_timestamp >= end_time_ns: break return not self.done def collect_vals(self, dest): dest.extend(self.vals) self.vals = [] def clear_vals(self): self.vals = [] def __repr__(self): return "Cursor for bags: %s, topics: %s" % (str(self.reader.bagfiles), str(self.reader.topics)) class CursorGroup(object): def __init__(self, readers=[], cursors=[]): # a group can be created from readers or existing cursors, if readers: assert not cursors self.cursors = [BagCursor(r) for r in readers] elif cursors: self.cursors = cursors def __bool__(self): for c in self.cursors: if c: return True return False __nonzero__ = __bool__ def advance(self, n=1): all_done = True for c in self.cursors: if c and c.advance(n): all_done = False return not all_done # Advance all cursors by specified duration # Risk of cursors drifting over time from each other def advance_by(self, duration_ns=1*SEC_PER_NANOSEC): all_done = True for c in self.cursors: if c and c.advance_by(duration_ns): all_done = False return not all_done # Advance all cursors up to same end time def advance_until(self, end_time_ns): all_done = True for c in self.cursors: if c and c.advance_until(end_time_ns): all_done = False return not all_done # Advance the first ready cursor in group by specified amount and bring the reset # up to same resulting end time. # Risk of pulling in large amounts of data if leading stream has a large gap. def advance_by_until(self, duration_ns=1*SEC_PER_NANOSEC): all_done = True end_time_ns = None for c in self.cursors: ready = False if c: if not end_time_ns: ready = c.advance_by(duration_ns) end_time_ns = c.latest_timestamp else: ready = c.advance_until(end_time_ns) if ready: all_done = False return not all_done def collect_vals(self, dest): for c in self.cursors: c.collect_vals(dest)
rwightman/udacity-driving-reader
script/bagutils.py
Python
apache-2.0
12,425
# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cells Scheduler """ import random import time from oslo.config import cfg from nova import compute from nova.compute import vm_states from nova.db import base from nova import exception from nova.openstack.common import log as logging from nova.scheduler import rpcapi as scheduler_rpcapi cell_scheduler_opts = [ cfg.IntOpt('scheduler_retries', default=10, help='How many retries when no cells are available.'), cfg.IntOpt('scheduler_retry_delay', default=2, help='How often to retry in seconds when no cells are ' 'available.') ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(cell_scheduler_opts, group='cells') class CellsScheduler(base.Base): """The cells scheduler.""" def __init__(self, msg_runner): super(CellsScheduler, self).__init__() self.msg_runner = msg_runner self.state_manager = msg_runner.state_manager self.compute_api = compute.API() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() def _create_instances_here(self, ctxt, request_spec): instance_values = request_spec['instance_properties'] num_instances = len(request_spec['instance_uuids']) for i, instance_uuid in enumerate(request_spec['instance_uuids']): instance_values['uuid'] = instance_uuid instance = self.compute_api.create_db_entry_for_new_instance( ctxt, request_spec['instance_type'], request_spec['image'], instance_values, request_spec['security_group'], request_spec['block_device_mapping'], num_instances, i) self.msg_runner.instance_update_at_top(ctxt, instance) def _get_possible_cells(self): cells = set(self.state_manager.get_child_cells()) our_cell = self.state_manager.get_my_state() # Include our cell in the list, if we have any capacity info if not cells or our_cell.capacities: cells.add(our_cell) return cells def _run_instance(self, message, host_sched_kwargs): """Attempt to schedule instance(s). If we have no cells to try, raise exception.NoCellsAvailable """ ctxt = message.ctxt request_spec = host_sched_kwargs['request_spec'] # The message we might forward to a child cell cells = self._get_possible_cells() if not cells: raise exception.NoCellsAvailable() cells = list(cells) # Random selection for now random.shuffle(cells) target_cell = cells[0] LOG.debug(_("Scheduling with routing_path=%(routing_path)s"), locals()) if target_cell.is_me: # Need to create instance DB entries as the host scheduler # expects that the instance(s) already exists. self._create_instances_here(ctxt, request_spec) self.scheduler_rpcapi.run_instance(ctxt, **host_sched_kwargs) return self.msg_runner.schedule_run_instance(ctxt, target_cell, host_sched_kwargs) def run_instance(self, message, host_sched_kwargs): """Pick a cell where we should create a new instance.""" try: for i in xrange(max(0, CONF.cells.scheduler_retries) + 1): try: return self._run_instance(message, host_sched_kwargs) except exception.NoCellsAvailable: if i == max(0, CONF.cells.scheduler_retries): raise sleep_time = max(1, CONF.cells.scheduler_retry_delay) LOG.info(_("No cells available when scheduling. Will " "retry in %(sleep_time)s second(s)"), locals()) time.sleep(sleep_time) continue except Exception: request_spec = host_sched_kwargs['request_spec'] instance_uuids = request_spec['instance_uuids'] LOG.exception(_("Error scheduling instances %(instance_uuids)s"), locals()) ctxt = message.ctxt for instance_uuid in instance_uuids: self.msg_runner.instance_update_at_top(ctxt, {'uuid': instance_uuid, 'vm_state': vm_states.ERROR}) try: self.db.instance_update(ctxt, instance_uuid, {'vm_state': vm_states.ERROR}) except Exception: pass
sridevikoushik31/openstack
nova/cells/scheduler.py
Python
apache-2.0
5,442
# Copyright 2015 Vinicius Chiele. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Message Handler is used to process a message received. """ from .errors import SimpleBusError from .pipeline import PipelineStep class InvokeHandlerStep(PipelineStep): id = 'InvokeHandler' def __init__(self, handlers): self.__handlers = handlers def execute(self, context, next_step): handler = self.__handlers.get(context.message_def.message_cls) if not handler: raise SimpleBusError('No handler found to the message \'%s\'.' % str(type(context.message_def.message_cls))) handler(context.body) next_step()
viniciuschiele/simplebus
simplebus/handlers.py
Python
apache-2.0
1,217
#!/usr/bin/python2.7 import nltk f = open("../corpus/data.en","r") f2 = open("../corpus/tagged_data","w") lines = f.readlines() for line in lines: tokens = nltk.word_tokenize(line) tag_list = nltk.pos_tag(tokens) print>> f2, tag_list # f2.write(tag_list)
shyamjvs/cs626_project
python_code/pos_tag_corpus.py
Python
apache-2.0
274
"""Tests for aiohttp/protocol.py""" import asyncio import unittest import zlib from unittest import mock import pytest from multidict import CIMultiDict from yarl import URL import aiohttp from aiohttp import http_exceptions, streams from aiohttp.http_parser import (DeflateBuffer, HttpPayloadParser, HttpRequestParserPy, HttpResponseParserPy) try: import brotli except ImportError: brotli = None REQUEST_PARSERS = [HttpRequestParserPy] RESPONSE_PARSERS = [HttpResponseParserPy] try: from aiohttp import _http_parser REQUEST_PARSERS.append(_http_parser.HttpRequestParserC) RESPONSE_PARSERS.append(_http_parser.HttpResponseParserC) except ImportError: # pragma: no cover pass @pytest.fixture def protocol(): return mock.Mock() @pytest.fixture(params=REQUEST_PARSERS) def parser(loop, protocol, request): """Parser implementations""" return request.param(protocol, loop, 8190, 32768, 8190) @pytest.fixture(params=REQUEST_PARSERS) def request_cls(request): """Request Parser class""" return request.param @pytest.fixture(params=RESPONSE_PARSERS) def response(loop, protocol, request): """Parser implementations""" return request.param(protocol, loop, 8190, 32768, 8190) @pytest.fixture(params=RESPONSE_PARSERS) def response_cls(request): """Parser implementations""" return request.param def test_parse_headers(parser): text = b'''GET /test HTTP/1.1\r test: line\r continue\r test2: data\r \r ''' messages, upgrade, tail = parser.feed_data(text) assert len(messages) == 1 msg = messages[0][0] assert list(msg.headers.items()) == [('test', 'line continue'), ('test2', 'data')] assert msg.raw_headers == ((b'test', b'line continue'), (b'test2', b'data')) assert not msg.should_close assert msg.compression is None assert not msg.upgrade def test_parse(parser): text = b'GET /test HTTP/1.1\r\n\r\n' messages, upgrade, tail = parser.feed_data(text) assert len(messages) == 1 msg, _ = messages[0] assert msg.compression is None assert not msg.upgrade assert msg.method == 'GET' assert msg.path == '/test' assert msg.version == (1, 1) async def test_parse_body(parser): text = b'GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody' messages, upgrade, tail = parser.feed_data(text) assert len(messages) == 1 _, payload = messages[0] body = await payload.read(4) assert body == b'body' async def test_parse_body_with_CRLF(parser): text = b'\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody' messages, upgrade, tail = parser.feed_data(text) assert len(messages) == 1 _, payload = messages[0] body = await payload.read(4) assert body == b'body' def test_parse_delayed(parser): text = b'GET /test HTTP/1.1\r\n' messages, upgrade, tail = parser.feed_data(text) assert len(messages) == 0 assert not upgrade messages, upgrade, tail = parser.feed_data(b'\r\n') assert len(messages) == 1 msg = messages[0][0] assert msg.method == 'GET' def test_headers_multi_feed(parser): text1 = b'GET /test HTTP/1.1\r\n' text2 = b'test: line\r' text3 = b'\n continue\r\n\r\n' messages, upgrade, tail = parser.feed_data(text1) assert len(messages) == 0 messages, upgrade, tail = parser.feed_data(text2) assert len(messages) == 0 messages, upgrade, tail = parser.feed_data(text3) assert len(messages) == 1 msg = messages[0][0] assert list(msg.headers.items()) == [('test', 'line continue')] assert msg.raw_headers == ((b'test', b'line continue'),) assert not msg.should_close assert msg.compression is None assert not msg.upgrade def test_headers_split_field(parser): text1 = b'GET /test HTTP/1.1\r\n' text2 = b't' text3 = b'es' text4 = b't: value\r\n\r\n' messages, upgrade, tail = parser.feed_data(text1) messages, upgrade, tail = parser.feed_data(text2) messages, upgrade, tail = parser.feed_data(text3) assert len(messages) == 0 messages, upgrade, tail = parser.feed_data(text4) assert len(messages) == 1 msg = messages[0][0] assert list(msg.headers.items()) == [('test', 'value')] assert msg.raw_headers == ((b'test', b'value'),) assert not msg.should_close assert msg.compression is None assert not msg.upgrade def test_parse_headers_multi(parser): text = (b'GET /test HTTP/1.1\r\n' b'Set-Cookie: c1=cookie1\r\n' b'Set-Cookie: c2=cookie2\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) assert len(messages) == 1 msg = messages[0][0] assert list(msg.headers.items()) == [('Set-Cookie', 'c1=cookie1'), ('Set-Cookie', 'c2=cookie2')] assert msg.raw_headers == ((b'Set-Cookie', b'c1=cookie1'), (b'Set-Cookie', b'c2=cookie2')) assert not msg.should_close assert msg.compression is None def test_conn_default_1_0(parser): text = b'GET /test HTTP/1.0\r\n\r\n' messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.should_close def test_conn_default_1_1(parser): text = b'GET /test HTTP/1.1\r\n\r\n' messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert not msg.should_close def test_conn_close(parser): text = (b'GET /test HTTP/1.1\r\n' b'connection: close\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.should_close def test_conn_close_1_0(parser): text = (b'GET /test HTTP/1.0\r\n' b'connection: close\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.should_close def test_conn_keep_alive_1_0(parser): text = (b'GET /test HTTP/1.0\r\n' b'connection: keep-alive\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert not msg.should_close def test_conn_keep_alive_1_1(parser): text = (b'GET /test HTTP/1.1\r\n' b'connection: keep-alive\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert not msg.should_close def test_conn_other_1_0(parser): text = (b'GET /test HTTP/1.0\r\n' b'connection: test\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.should_close def test_conn_other_1_1(parser): text = (b'GET /test HTTP/1.1\r\n' b'connection: test\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert not msg.should_close def test_request_chunked(parser): text = (b'GET /test HTTP/1.1\r\n' b'transfer-encoding: chunked\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg, payload = messages[0] assert msg.chunked assert not upgrade assert isinstance(payload, streams.FlowControlStreamReader) def test_conn_upgrade(parser): text = (b'GET /test HTTP/1.1\r\n' b'connection: upgrade\r\n' b'upgrade: websocket\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert not msg.should_close assert msg.upgrade assert upgrade def test_compression_empty(parser): text = (b'GET /test HTTP/1.1\r\n' b'content-encoding: \r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.compression is None def test_compression_deflate(parser): text = (b'GET /test HTTP/1.1\r\n' b'content-encoding: deflate\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.compression == 'deflate' def test_compression_gzip(parser): text = (b'GET /test HTTP/1.1\r\n' b'content-encoding: gzip\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.compression == 'gzip' @pytest.mark.skipif(brotli is None, reason="brotli is not installed") def test_compression_brotli(parser): text = (b'GET /test HTTP/1.1\r\n' b'content-encoding: br\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.compression == 'br' def test_compression_unknown(parser): text = (b'GET /test HTTP/1.1\r\n' b'content-encoding: compress\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg.compression is None def test_headers_connect(parser): text = (b'CONNECT www.google.com HTTP/1.1\r\n' b'content-length: 0\r\n\r\n') messages, upgrade, tail = parser.feed_data(text) msg, payload = messages[0] assert upgrade assert isinstance(payload, streams.FlowControlStreamReader) def test_headers_old_websocket_key1(parser): text = (b'GET /test HTTP/1.1\r\n' b'SEC-WEBSOCKET-KEY1: line\r\n\r\n') with pytest.raises(http_exceptions.BadHttpMessage): parser.feed_data(text) def test_headers_content_length_err_1(parser): text = (b'GET /test HTTP/1.1\r\n' b'content-length: line\r\n\r\n') with pytest.raises(http_exceptions.BadHttpMessage): parser.feed_data(text) def test_headers_content_length_err_2(parser): text = (b'GET /test HTTP/1.1\r\n' b'content-length: -1\r\n\r\n') with pytest.raises(http_exceptions.BadHttpMessage): parser.feed_data(text) def test_invalid_header(parser): text = (b'GET /test HTTP/1.1\r\n' b'test line\r\n\r\n') with pytest.raises(http_exceptions.BadHttpMessage): parser.feed_data(text) def test_invalid_name(parser): text = (b'GET /test HTTP/1.1\r\n' b'test[]: line\r\n\r\n') with pytest.raises(http_exceptions.BadHttpMessage): parser.feed_data(text) def test_max_header_field_size(parser): name = b'test' * 10 * 1024 text = (b'GET /test HTTP/1.1\r\n' + name + b':data\r\n\r\n') with pytest.raises(http_exceptions.LineTooLong): parser.feed_data(text) def test_max_header_value_size(parser): name = b'test' * 10 * 1024 text = (b'GET /test HTTP/1.1\r\n' b'data:' + name + b'\r\n\r\n') with pytest.raises(http_exceptions.LineTooLong): parser.feed_data(text) def test_max_header_value_size_continuation(parser): name = b'test' * 10 * 1024 text = (b'GET /test HTTP/1.1\r\n' b'data: test\r\n ' + name + b'\r\n\r\n') with pytest.raises(http_exceptions.LineTooLong): parser.feed_data(text) def test_http_request_parser(parser): text = b'GET /path HTTP/1.1\r\n\r\n' messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg == ('GET', '/path', (1, 1), CIMultiDict(), (), False, None, False, False, URL('/path')) def test_http_request_bad_status_line(parser): text = b'getpath \r\n\r\n' with pytest.raises(http_exceptions.BadStatusLine): parser.feed_data(text) def test_http_request_upgrade(parser): text = (b'GET /test HTTP/1.1\r\n' b'connection: upgrade\r\n' b'upgrade: websocket\r\n\r\n' b'some raw data') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert not msg.should_close assert msg.upgrade assert upgrade assert tail == b'some raw data' def test_http_request_parser_utf8(parser): text = 'GET /path HTTP/1.1\r\nx-test:тест\r\n\r\n'.encode('utf-8') messages, upgrade, tail = parser.feed_data(text) msg = messages[0][0] assert msg == ('GET', '/path', (1, 1), CIMultiDict([('X-TEST', 'тест')]), ((b'x-test', 'тест'.encode('utf-8')),), False, None, False, False, URL('/path')) def test_http_request_parser_non_utf8(parser): text = 'GET /path HTTP/1.1\r\nx-test:тест\r\n\r\n'.encode('cp1251') msg = parser.feed_data(text)[0][0][0] assert msg == ('GET', '/path', (1, 1), CIMultiDict([('X-TEST', 'тест'.encode('cp1251').decode( 'utf-8', 'surrogateescape'))]), ((b'x-test', 'тест'.encode('cp1251')),), False, None, False, False, URL('/path')) def test_http_request_parser_two_slashes(parser): text = b'GET //path HTTP/1.1\r\n\r\n' msg = parser.feed_data(text)[0][0][0] assert msg[:-1] == ('GET', '//path', (1, 1), CIMultiDict(), (), False, None, False, False) def test_http_request_parser_bad_method(parser): with pytest.raises(http_exceptions.BadStatusLine): parser.feed_data(b'!12%()+=~$ /get HTTP/1.1\r\n\r\n') def test_http_request_parser_bad_version(parser): with pytest.raises(http_exceptions.BadHttpMessage): parser.feed_data(b'GET //get HT/11\r\n\r\n') def test_http_request_max_status_line(parser): with pytest.raises(http_exceptions.LineTooLong): parser.feed_data( b'GET /path' + b'test' * 10 * 1024 + b' HTTP/1.1\r\n\r\n') def test_http_response_parser_utf8(response): text = 'HTTP/1.1 200 Ok\r\nx-test:тест\r\n\r\n'.encode('utf-8') messages, upgraded, tail = response.feed_data(text) assert len(messages) == 1 msg = messages[0][0] assert msg.version == (1, 1) assert msg.code == 200 assert msg.reason == 'Ok' assert msg.headers == CIMultiDict([('X-TEST', 'тест')]) assert msg.raw_headers == ((b'x-test', 'тест'.encode('utf-8')),) assert not upgraded assert not tail def test_http_response_parser_bad_status_line_too_long(response): with pytest.raises(http_exceptions.LineTooLong): response.feed_data( b'HTTP/1.1 200 Ok' + b'test' * 10 * 1024 + b'\r\n\r\n') def test_http_response_parser_bad_version(response): with pytest.raises(http_exceptions.BadHttpMessage): response.feed_data(b'HT/11 200 Ok\r\n\r\n') def test_http_response_parser_no_reason(response): msg = response.feed_data(b'HTTP/1.1 200\r\n\r\n')[0][0][0] assert msg.version == (1, 1) assert msg.code == 200 assert not msg.reason def test_http_response_parser_bad(response): with pytest.raises(http_exceptions.BadHttpMessage): response.feed_data(b'HTT/1\r\n\r\n') def test_http_response_parser_code_under_100(response): msg = response.feed_data(b'HTTP/1.1 99 test\r\n\r\n')[0][0][0] assert msg.code == 99 def test_http_response_parser_code_above_999(response): with pytest.raises(http_exceptions.BadHttpMessage): response.feed_data(b'HTTP/1.1 9999 test\r\n\r\n') def test_http_response_parser_code_not_int(response): with pytest.raises(http_exceptions.BadHttpMessage): response.feed_data(b'HTTP/1.1 ttt test\r\n\r\n') def test_http_request_chunked_payload(parser): text = (b'GET /test HTTP/1.1\r\n' b'transfer-encoding: chunked\r\n\r\n') msg, payload = parser.feed_data(text)[0][0] assert msg.chunked assert not payload.is_eof() assert isinstance(payload, streams.FlowControlStreamReader) parser.feed_data(b'4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n') assert b'dataline' == b''.join(d for d in payload._buffer) assert [4, 8] == payload._http_chunk_splits assert payload.is_eof() def test_http_request_chunked_payload_and_next_message(parser): text = (b'GET /test HTTP/1.1\r\n' b'transfer-encoding: chunked\r\n\r\n') msg, payload = parser.feed_data(text)[0][0] messages, upgraded, tail = parser.feed_data( b'4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n' b'POST /test2 HTTP/1.1\r\n' b'transfer-encoding: chunked\r\n\r\n') assert b'dataline' == b''.join(d for d in payload._buffer) assert [4, 8] == payload._http_chunk_splits assert payload.is_eof() assert len(messages) == 1 msg2, payload2 = messages[0] assert msg2.method == 'POST' assert msg2.chunked assert not payload2.is_eof() def test_http_request_chunked_payload_chunks(parser): text = (b'GET /test HTTP/1.1\r\n' b'transfer-encoding: chunked\r\n\r\n') msg, payload = parser.feed_data(text)[0][0] parser.feed_data(b'4\r\ndata\r') parser.feed_data(b'\n4') parser.feed_data(b'\r') parser.feed_data(b'\n') parser.feed_data(b'li') parser.feed_data(b'ne\r\n0\r\n') parser.feed_data(b'test: test\r\n') assert b'dataline' == b''.join(d for d in payload._buffer) assert [4, 8] == payload._http_chunk_splits assert not payload.is_eof() parser.feed_data(b'\r\n') assert b'dataline' == b''.join(d for d in payload._buffer) assert [4, 8] == payload._http_chunk_splits assert payload.is_eof() def test_parse_chunked_payload_chunk_extension(parser): text = (b'GET /test HTTP/1.1\r\n' b'transfer-encoding: chunked\r\n\r\n') msg, payload = parser.feed_data(text)[0][0] parser.feed_data( b'4;test\r\ndata\r\n4\r\nline\r\n0\r\ntest: test\r\n\r\n') assert b'dataline' == b''.join(d for d in payload._buffer) assert [4, 8] == payload._http_chunk_splits assert payload.is_eof() def _test_parse_no_length_or_te_on_post(loop, protocol, request_cls): parser = request_cls(protocol, loop, readall=True) text = b'POST /test HTTP/1.1\r\n\r\n' msg, payload = parser.feed_data(text)[0][0] assert payload.is_eof() def test_parse_payload_response_without_body(loop, protocol, response_cls): parser = response_cls(protocol, loop, response_with_body=False) text = (b'HTTP/1.1 200 Ok\r\n' b'content-length: 10\r\n\r\n') msg, payload = parser.feed_data(text)[0][0] assert payload.is_eof() def test_parse_length_payload(response): text = (b'HTTP/1.1 200 Ok\r\n' b'content-length: 4\r\n\r\n') msg, payload = response.feed_data(text)[0][0] assert not payload.is_eof() response.feed_data(b'da') response.feed_data(b't') response.feed_data(b'aHT') assert payload.is_eof() assert b'data' == b''.join(d for d in payload._buffer) def test_parse_no_length_payload(parser): text = b'PUT / HTTP/1.1\r\n\r\n' msg, payload = parser.feed_data(text)[0][0] assert payload.is_eof() def test_partial_url(parser): messages, upgrade, tail = parser.feed_data(b'GET /te') assert len(messages) == 0 messages, upgrade, tail = parser.feed_data(b'st HTTP/1.1\r\n\r\n') assert len(messages) == 1 msg, payload = messages[0] assert msg.method == 'GET' assert msg.path == '/test' assert msg.version == (1, 1) assert payload.is_eof() def test_url_parse_non_strict_mode(parser): payload = 'GET /test/тест HTTP/1.1\r\n\r\n'.encode('utf-8') messages, upgrade, tail = parser.feed_data(payload) assert len(messages) == 1 msg, payload = messages[0] assert msg.method == 'GET' assert msg.path == '/test/тест' assert msg.version == (1, 1) assert payload.is_eof() class TestParsePayload(unittest.TestCase): def setUp(self): self.stream = mock.Mock() asyncio.set_event_loop(None) def test_parse_eof_payload(self): out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser(out, readall=True) p.feed_data(b'data') p.feed_eof() self.assertTrue(out.is_eof()) self.assertEqual([(bytearray(b'data'), 4)], list(out._buffer)) def test_parse_no_body(self): out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser(out, method='PUT') self.assertTrue(out.is_eof()) self.assertTrue(p.done) def test_parse_length_payload_eof(self): out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser(out, length=4) p.feed_data(b'da') with pytest.raises(http_exceptions.ContentLengthError): p.feed_eof() def test_parse_chunked_payload_size_error(self): out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser(out, chunked=True) self.assertRaises( http_exceptions.TransferEncodingError, p.feed_data, b'blah\r\n') self.assertIsInstance( out.exception(), http_exceptions.TransferEncodingError) def test_http_payload_parser_length(self): out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser(out, length=2) eof, tail = p.feed_data(b'1245') self.assertTrue(eof) self.assertEqual(b'12', b''.join(d for d, _ in out._buffer)) self.assertEqual(b'45', tail) _comp = zlib.compressobj(wbits=-zlib.MAX_WBITS) _COMPRESSED = b''.join([_comp.compress(b'data'), _comp.flush()]) def test_http_payload_parser_deflate(self): length = len(self._COMPRESSED) out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser( out, length=length, compression='deflate') p.feed_data(self._COMPRESSED) self.assertEqual(b'data', b''.join(d for d, _ in out._buffer)) self.assertTrue(out.is_eof()) def test_http_payload_parser_deflate_no_wbits(self): comp = zlib.compressobj() COMPRESSED = b''.join([comp.compress(b'data'), comp.flush()]) length = len(COMPRESSED) out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser( out, length=length, compression='deflate') p.feed_data(COMPRESSED) self.assertEqual(b'data', b''.join(d for d, _ in out._buffer)) self.assertTrue(out.is_eof()) def test_http_payload_parser_length_zero(self): out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser(out, length=0) self.assertTrue(p.done) self.assertTrue(out.is_eof()) @pytest.mark.skipif(brotli is None, reason="brotli is not installed") def test_http_payload_brotli(self): compressed = brotli.compress(b'brotli data') out = aiohttp.FlowControlDataQueue(self.stream) p = HttpPayloadParser( out, length=len(compressed), compression='br') p.feed_data(compressed) self.assertEqual(b'brotli data', b''.join(d for d, _ in out._buffer)) self.assertTrue(out.is_eof()) class TestDeflateBuffer(unittest.TestCase): def setUp(self): self.stream = mock.Mock() asyncio.set_event_loop(None) def test_feed_data(self): buf = aiohttp.FlowControlDataQueue(self.stream) dbuf = DeflateBuffer(buf, 'deflate') dbuf.decompressor = mock.Mock() dbuf.decompressor.decompress.return_value = b'line' dbuf.feed_data(b'data', 4) self.assertEqual([b'line'], list(d for d, _ in buf._buffer)) def test_feed_data_err(self): buf = aiohttp.FlowControlDataQueue(self.stream) dbuf = DeflateBuffer(buf, 'deflate') exc = ValueError() dbuf.decompressor = mock.Mock() dbuf.decompressor.decompress.side_effect = exc self.assertRaises( http_exceptions.ContentEncodingError, dbuf.feed_data, b'data', 4) def test_feed_eof(self): buf = aiohttp.FlowControlDataQueue(self.stream) dbuf = DeflateBuffer(buf, 'deflate') dbuf.decompressor = mock.Mock() dbuf.decompressor.flush.return_value = b'line' dbuf.feed_eof() self.assertEqual([b'line'], list(d for d, _ in buf._buffer)) self.assertTrue(buf._eof) def test_feed_eof_err(self): buf = aiohttp.FlowControlDataQueue(self.stream) dbuf = DeflateBuffer(buf, 'deflate') dbuf.decompressor = mock.Mock() dbuf.decompressor.flush.return_value = b'line' dbuf.decompressor.eof = False self.assertRaises(http_exceptions.ContentEncodingError, dbuf.feed_eof) def test_empty_body(self): buf = aiohttp.FlowControlDataQueue(self.stream) dbuf = DeflateBuffer(buf, 'deflate') dbuf.feed_eof() self.assertTrue(buf.at_eof())
playpauseandstop/aiohttp
tests/test_http_parser.py
Python
apache-2.0
24,179
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import decorators from tempest.lib import exceptions as lib_exc from ironic_tempest_plugin.common import waiters from ironic_tempest_plugin.tests.api.admin import api_microversion_fixture from ironic_tempest_plugin.tests.api.admin import base CONF = config.CONF class TestNodes(base.BaseBaremetalTest): """Tests for baremetal nodes.""" def setUp(self): super(TestNodes, self).setUp() _, self.chassis = self.create_chassis() _, self.node = self.create_node(self.chassis['uuid']) def _associate_node_with_instance(self): self.client.set_node_power_state(self.node['uuid'], 'power off') waiters.wait_for_bm_node_status(self.client, self.node['uuid'], 'power_state', 'power off') instance_uuid = data_utils.rand_uuid() self.client.update_node(self.node['uuid'], instance_uuid=instance_uuid) self.addCleanup(self.client.update_node, uuid=self.node['uuid'], instance_uuid=None) return instance_uuid @decorators.idempotent_id('4e939eb2-8a69-4e84-8652-6fffcbc9db8f') def test_create_node(self): params = {'cpu_arch': 'x86_64', 'cpus': '12', 'local_gb': '10', 'memory_mb': '1024'} _, body = self.create_node(self.chassis['uuid'], **params) self._assertExpected(params, body['properties']) @decorators.idempotent_id('9ade60a4-505e-4259-9ec4-71352cbbaf47') def test_delete_node(self): _, node = self.create_node(self.chassis['uuid']) self.delete_node(node['uuid']) self.assertRaises(lib_exc.NotFound, self.client.show_node, node['uuid']) @decorators.idempotent_id('55451300-057c-4ecf-8255-ba42a83d3a03') def test_show_node(self): _, loaded_node = self.client.show_node(self.node['uuid']) self._assertExpected(self.node, loaded_node) @decorators.idempotent_id('4ca123c4-160d-4d8d-a3f7-15feda812263') def test_list_nodes(self): _, body = self.client.list_nodes() self.assertIn(self.node['uuid'], [i['uuid'] for i in body['nodes']]) @decorators.idempotent_id('85b1f6e0-57fd-424c-aeff-c3422920556f') def test_list_nodes_association(self): _, body = self.client.list_nodes(associated=True) self.assertNotIn(self.node['uuid'], [n['uuid'] for n in body['nodes']]) self._associate_node_with_instance() _, body = self.client.list_nodes(associated=True) self.assertIn(self.node['uuid'], [n['uuid'] for n in body['nodes']]) _, body = self.client.list_nodes(associated=False) self.assertNotIn(self.node['uuid'], [n['uuid'] for n in body['nodes']]) @decorators.idempotent_id('18c4ebd8-f83a-4df7-9653-9fb33a329730') def test_node_port_list(self): _, port = self.create_port(self.node['uuid'], data_utils.rand_mac_address()) _, body = self.client.list_node_ports(self.node['uuid']) self.assertIn(port['uuid'], [p['uuid'] for p in body['ports']]) @decorators.idempotent_id('72591acb-f215-49db-8395-710d14eb86ab') def test_node_port_list_no_ports(self): _, node = self.create_node(self.chassis['uuid']) _, body = self.client.list_node_ports(node['uuid']) self.assertEmpty(body['ports']) @decorators.idempotent_id('4fed270a-677a-4d19-be87-fd38ae490320') def test_update_node(self): props = {'cpu_arch': 'x86_64', 'cpus': '12', 'local_gb': '10', 'memory_mb': '128'} _, node = self.create_node(self.chassis['uuid'], **props) new_p = {'cpu_arch': 'x86', 'cpus': '1', 'local_gb': '10000', 'memory_mb': '12300'} _, body = self.client.update_node(node['uuid'], properties=new_p) _, node = self.client.show_node(node['uuid']) self._assertExpected(new_p, node['properties']) @decorators.idempotent_id('cbf1f515-5f4b-4e49-945c-86bcaccfeb1d') def test_validate_driver_interface(self): _, body = self.client.validate_driver_interface(self.node['uuid']) core_interfaces = ['power', 'deploy'] for interface in core_interfaces: self.assertIn(interface, body) @decorators.idempotent_id('5519371c-26a2-46e9-aa1a-f74226e9d71f') def test_set_node_boot_device(self): self.client.set_node_boot_device(self.node['uuid'], 'pxe') @decorators.idempotent_id('9ea73775-f578-40b9-bc34-efc639c4f21f') def test_get_node_boot_device(self): body = self.client.get_node_boot_device(self.node['uuid']) self.assertIn('boot_device', body) self.assertIn('persistent', body) self.assertIsInstance(body['boot_device'], six.string_types) self.assertIsInstance(body['persistent'], bool) @decorators.idempotent_id('3622bc6f-3589-4bc2-89f3-50419c66b133') def test_get_node_supported_boot_devices(self): body = self.client.get_node_supported_boot_devices(self.node['uuid']) self.assertIn('supported_boot_devices', body) self.assertIsInstance(body['supported_boot_devices'], list) @decorators.idempotent_id('f63b6288-1137-4426-8cfe-0d5b7eb87c06') def test_get_console(self): _, body = self.client.get_console(self.node['uuid']) con_info = ['console_enabled', 'console_info'] for key in con_info: self.assertIn(key, body) @decorators.idempotent_id('80504575-9b21-4670-92d1-143b948f9437') def test_set_console_mode(self): self.client.set_console_mode(self.node['uuid'], True) waiters.wait_for_bm_node_status(self.client, self.node['uuid'], 'console_enabled', True) @decorators.idempotent_id('b02a4f38-5e8b-44b2-aed2-a69a36ecfd69') def test_get_node_by_instance_uuid(self): instance_uuid = self._associate_node_with_instance() _, body = self.client.show_node_by_instance_uuid(instance_uuid) self.assertEqual(1, len(body['nodes'])) self.assertIn(self.node['uuid'], [n['uuid'] for n in body['nodes']]) class TestNodesVif(base.BaseBaremetalTest): min_microversion = '1.28' @classmethod def skip_checks(cls): super(TestNodesVif, cls).skip_checks() if not CONF.service_available.neutron: raise cls.skipException('Neutron is not enabled.') def setUp(self): super(TestNodesVif, self).setUp() _, self.chassis = self.create_chassis() _, self.node = self.create_node(self.chassis['uuid']) @decorators.idempotent_id('a3d319d0-cacb-4e55-a3dc-3fa8b74880f1') def test_vif_on_port(self): """Test attachment and detachment of VIFs on the node with port. Test steps: 1) Create chassis and node in setUp. 2) Create port for the node. 3) Attach VIF to the node. 4) Check VIF info in VIFs list and port internal_info. 5) Detach VIF from the node. 6) Check that no more VIF info in VIFs list and port internal_info. """ self.useFixture( api_microversion_fixture.APIMicroversionFixture('1.28')) _, self.port = self.create_port(self.node['uuid'], data_utils.rand_mac_address()) self.client.vif_attach(self.node['uuid'], 'test-vif') _, body = self.client.vif_list(self.node['uuid']) self.assertEqual({'vifs': [{'id': 'test-vif'}]}, body) _, port = self.client.show_port(self.port['uuid']) self.assertEqual('test-vif', port['internal_info']['tenant_vif_port_id']) self.client.vif_detach(self.node['uuid'], 'test-vif') _, body = self.client.vif_list(self.node['uuid']) self.assertEqual({'vifs': []}, body) _, port = self.client.show_port(self.port['uuid']) self.assertNotIn('tenant_vif_port_id', port['internal_info']) @decorators.idempotent_id('95279515-7d0a-4f5f-987f-93e36aae5585') def test_vif_on_portgroup(self): """Test attachment and detachment of VIFs on the node with port group. Test steps: 1) Create chassis and node in setUp. 2) Create port for the node. 3) Create port group for the node. 4) Plug port into port group. 5) Attach VIF to the node. 6) Check VIF info in VIFs list and port group internal_info, but not in port internal_info. 7) Detach VIF from the node. 8) Check that no VIF info in VIFs list and port group internal_info. """ self.useFixture( api_microversion_fixture.APIMicroversionFixture('1.28')) _, self.port = self.create_port(self.node['uuid'], data_utils.rand_mac_address()) _, self.portgroup = self.create_portgroup( self.node['uuid'], address=data_utils.rand_mac_address()) patch = [{'path': '/portgroup_uuid', 'op': 'add', 'value': self.portgroup['uuid']}] self.client.update_port(self.port['uuid'], patch) self.client.vif_attach(self.node['uuid'], 'test-vif') _, body = self.client.vif_list(self.node['uuid']) self.assertEqual({'vifs': [{'id': 'test-vif'}]}, body) _, port = self.client.show_port(self.port['uuid']) self.assertNotIn('tenant_vif_port_id', port['internal_info']) _, portgroup = self.client.show_portgroup(self.portgroup['uuid']) self.assertEqual('test-vif', portgroup['internal_info']['tenant_vif_port_id']) self.client.vif_detach(self.node['uuid'], 'test-vif') _, body = self.client.vif_list(self.node['uuid']) self.assertEqual({'vifs': []}, body) _, portgroup = self.client.show_portgroup(self.portgroup['uuid']) self.assertNotIn('tenant_vif_port_id', portgroup['internal_info']) @decorators.idempotent_id('a3d319d0-cacb-4e55-a3dc-3fa8b74880f2') def test_vif_already_set_on_extra(self): self.useFixture( api_microversion_fixture.APIMicroversionFixture('1.28')) _, self.port = self.create_port(self.node['uuid'], data_utils.rand_mac_address()) patch = [{'path': '/extra/vif_port_id', 'op': 'add', 'value': 'test-vif'}] self.client.update_port(self.port['uuid'], patch) _, body = self.client.vif_list(self.node['uuid']) self.assertEqual({'vifs': [{'id': 'test-vif'}]}, body) self.assertRaises(lib_exc.Conflict, self.client.vif_attach, self.node['uuid'], 'test-vif') self.client.vif_detach(self.node['uuid'], 'test-vif')
NaohiroTamura/ironic
ironic_tempest_plugin/tests/api/admin/test_nodes.py
Python
apache-2.0
11,622
import Tkinter as tk import Tkinter as tk2 import code as c def g1(): g1 = tk.Tk() A = tk.Button(g1, text ="About", command = c.b1) B = tk.Button(g1, text ="Date", command = c.b2) C = tk.Button(g1, text ="Time", command = c.b3) D = tk.Button(g1, text ="CountSeconds", command = c.b4) A.pack() B.pack() C.pack() D.pack() g1.mainloop() def g2(): g2 = tk2.Tk() A = tk2.Button(g2, text ="OSInfo", command = c.b5) B = tk2.Button(g2, text ="Ghost Game", command = c.b6) A.pack() B.pack() g2.mainloop()
Prouser123/python-tk
py/gui.py
Python
apache-2.0
514
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import getpass import logging import os import socket import subprocess import threading import time from nose.tools import assert_equal, assert_true from desktop.lib.paths import get_run_root from hadoop import pseudo_hdfs4 from liboozie.oozie_api import get_oozie from liboozie.conf import OOZIE_URL _oozie_lock = threading.Lock() LOG = logging.getLogger(__name__) class OozieServerProvider(object): """ Setup a Oozie server. """ OOZIE_TEST_PORT = '18080' OOZIE_HOME = get_run_root('ext/oozie/oozie') requires_hadoop = True is_oozie_running = False @classmethod def setup_class(cls): cls.cluster = pseudo_hdfs4.shared_cluster() cls.oozie, callback = cls._get_shared_oozie_server() cls.shutdown = [callback] @classmethod def wait_until_completion(cls, oozie_jobid, timeout=300.0, step=5): job = cls.oozie.get_job(oozie_jobid) start = time.time() while job.is_running() and time.time() - start < timeout: time.sleep(step) LOG.info('Checking status of %s...' % oozie_jobid) job = cls.oozie.get_job(oozie_jobid) LOG.info('[%d] Status after %d: %s' % (time.time(), time.time() - start, job)) logs = cls.oozie.get_job_log(oozie_jobid) if job.is_running(): msg = "[%d] %s took more than %d to complete: %s" % (time.time(), oozie_jobid, timeout, logs) LOG.info(msg) raise Exception(msg) else: LOG.info('[%d] Job %s took %d: %s' % (time.time(), job.id, time.time() - start, logs)) return job @classmethod def _start_oozie(cls, cluster): """ Start oozie process. """ args = [OozieServerProvider.OOZIE_HOME + '/bin/oozied.sh', 'run'] env = os.environ env['OOZIE_HTTP_PORT'] = OozieServerProvider.OOZIE_TEST_PORT conf_dir = os.path.join(cluster.log_dir, 'oozie') os.mkdir(conf_dir) env['OOZIE_LOG'] = conf_dir LOG.info("Executing %s, env %s, cwd %s" % (repr(args), repr(env), cluster._tmpdir)) process = subprocess.Popen(args=args, env=env, cwd=cluster._tmpdir, stdin=subprocess.PIPE) return process @classmethod def _reset_oozie(cls): env = os.environ args = ['rm', '-r', OozieServerProvider.OOZIE_HOME + '/data/oozie-db'] LOG.info("Executing %s, env %s" % (args, env)) subprocess.call(args, env=env) args = [OozieServerProvider.OOZIE_HOME + '/bin/ooziedb.sh', 'create', '-sqlfile', 'oozie.sql', '-run'] LOG.info("Executing %s, env %s" % (args, env)) subprocess.call(args, env=env) @classmethod def _setup_sharelib(cls): # At some point could reuse: # oozie-setup.sh sharelib create -fs FS_URI LOG.info("Copying Oozie sharelib") user_home = cls.cluster.fs.do_as_user(getpass.getuser(), cls.cluster.fs.get_home_dir) oozie_share_lib = user_home + '/share' cls.cluster.fs.do_as_user(getpass.getuser(), cls.cluster.fs.create_home_dir) cls.cluster.fs.do_as_user(getpass.getuser(), cls.cluster.fs.copyFromLocal, OozieServerProvider.OOZIE_HOME + '/share', oozie_share_lib) LOG.info("Oozie sharelib copied to %s" % oozie_share_lib) @classmethod def _get_shared_oozie_server(cls): callback = lambda: None _oozie_lock.acquire() if not OozieServerProvider.is_oozie_running: LOG.info('\nStarting a Mini Oozie. Requires "tools/jenkins/jenkins.sh" to be previously ran.\n') LOG.info('See https://issues.cloudera.org/browse/HUE-861\n') finish = ( OOZIE_URL.set_for_testing("http://%s:%s/oozie" % (socket.getfqdn(), OozieServerProvider.OOZIE_TEST_PORT)), ) # Setup cluster = pseudo_hdfs4.shared_cluster() cls._setup_sharelib() cls._reset_oozie() p = cls._start_oozie(cluster) def kill(): LOG.info("Killing Oozie server (pid %d)." % p.pid) os.kill(p.pid, 9) p.wait() atexit.register(kill) start = time.time() started = False sleep = 0.01 while not started and time.time() - start < 30.0: status = None try: LOG.info('Check Oozie status...') status = get_oozie().get_oozie_status() if status['systemMode'] == 'NORMAL': started = True break time.sleep(sleep) sleep *= 2 except Exception, e: LOG.info('Oozie server status not NORMAL yet: %s - %s' % (status, e)) time.sleep(sleep) sleep *= 2 pass if not started: raise Exception("Oozie server took too long to come up.") OozieServerProvider.is_oozie_running = True def shutdown(): for f in finish: f() cluster.stop() callback = shutdown _oozie_lock.release() return get_oozie(), callback class TestMiniOozie(OozieServerProvider): def test_oozie_status(self): assert_equal(get_oozie().get_oozie_status()['systemMode'], 'NORMAL') assert_true(self.cluster.fs.exists('/user/%(user)s/share/lib' % {'user': getpass.getuser()}))
2013Commons/HUE-SHARK
desktop/libs/liboozie/src/liboozie/oozie_api_test.py
Python
apache-2.0
5,769
#!/usr/bin/env python # # Copyright 2016 timercrack # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABCMeta from functools import wraps def param_function(**out_kwargs): def _rest_handler(func): @wraps(func) def wrapper(self, *args, **kwargs): return func(self, *args, *kwargs) for key, value in out_kwargs.items(): setattr(wrapper, 'arg_{}'.format(key), value) setattr(wrapper, 'is_module_function', True) return wrapper return _rest_handler class ParamFunctionContainer(object, metaclass=ABCMeta): def __init__(self): self.module_arg_dict = dict() self._collect_all() def _collect_all(self): for fun_name in dir(self): fun = getattr(self, fun_name) if hasattr(fun, 'is_module_function'): params = dict() for arg in dir(fun): if arg.startswith('arg_'): params[arg[4:]] = getattr(fun, arg) self.module_arg_dict[fun_name] = params
timercrack/pydatacoll
pydatacoll/utils/func_container.py
Python
apache-2.0
1,572
import logging from coursedashboards.util.retry import retry from uw_sws.registration import get_active_registrations_by_section from restclients_core.exceptions import DataFailureException from urllib3.exceptions import MaxRetryError logger = logging.getLogger(__name__) @retry(MaxRetryError, tries=5, delay=3, logger=logger) def get_active_registrations_for_section(section): try: return get_active_registrations_by_section(section) except DataFailureException as ex: if ex.status == 404: logger.info(" {}".format(ex)) else: logger.error(" {}".format(ex)) return []
uw-it-aca/course-dashboards
coursedashboards/dao/registration.py
Python
apache-2.0
633
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .horovod_ray_runner import HorovodRayRunner
intel-analytics/analytics-zoo
pyzoo/zoo/orca/learn/horovod/__init__.py
Python
apache-2.0
640
import pika auth = 'bunny:bunny' mq_credentials = pika.credentials.PlainCredentials(*auth.split(':')) params = pika.ConnectionParameters( host='bunny-vayu.sbgenomics.com', credentials=mq_credentials, virtual_host='bunny') fp = open('sample/app.json', 'r') sample_json = fp.read() connection = pika.BlockingConnection(params) channel = connection.channel() channel.exchange_declare(exchange='backend_exchange_d1abee3f-af71-4f5f-bd00-942ea4a50036', type='direct') channel.basic_publish(exchange='backend_exchange_d1abee3f-af71-4f5f-bd00-942ea4a50036', routing_key='receive_routing_key', body=sample_json) connection.close() print 'no errors, sent'
rabix/bunny
rabix-integration-testing/backends/mock_backend/mq_send.py
Python
apache-2.0
700
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import import unittest from builtins import object from apache_beam.metrics.cells import DistributionData from apache_beam.metrics.execution import MetricKey from apache_beam.metrics.execution import MetricsContainer from apache_beam.metrics.execution import MetricsEnvironment from apache_beam.metrics.metric import MetricResults from apache_beam.metrics.metric import Metrics from apache_beam.metrics.metric import MetricsFilter from apache_beam.metrics.metricbase import MetricName from apache_beam.runners.worker import statesampler from apache_beam.utils import counters class NameTest(unittest.TestCase): def test_basic_metric_name(self): name = MetricName('namespace1', 'name1') self.assertEqual(name.namespace, 'namespace1') self.assertEqual(name.name, 'name1') self.assertEqual(name, MetricName('namespace1', 'name1')) key = MetricKey('step1', name) self.assertEqual(key.step, 'step1') self.assertEqual(key.metric.namespace, 'namespace1') self.assertEqual(key.metric.name, 'name1') self.assertEqual(key, MetricKey('step1', MetricName('namespace1', 'name1'))) class MetricResultsTest(unittest.TestCase): def test_metric_filter_namespace_matching(self): filter = MetricsFilter().with_namespace('ns1') name = MetricName('ns1', 'name1') key = MetricKey('step1', name) self.assertTrue(MetricResults.matches(filter, key)) def test_metric_filter_name_matching(self): filter = MetricsFilter().with_name('name1').with_namespace('ns1') name = MetricName('ns1', 'name1') key = MetricKey('step1', name) self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_name('name1') name = MetricName('ns1', 'name1') key = MetricKey('step1', name) self.assertTrue(MetricResults.matches(filter, key)) def test_metric_filter_step_matching(self): name = MetricName('ns1', 'name1') filter = MetricsFilter().with_step('Step1') key = MetricKey('Step1', name) self.assertTrue(MetricResults.matches(filter, key)) key = MetricKey('Step10', name) self.assertFalse(MetricResults.matches(filter, key)) key = MetricKey('Step10/Step1', name) self.assertTrue(MetricResults.matches(filter, key)) key = MetricKey('Top1/Outer1/Inner1', name) filter = MetricsFilter().with_step('Top1/Outer1/Inner1') self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_step('Top1/Outer1') self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_step('Outer1/Inner1') self.assertTrue(MetricResults.matches(filter, key)) filter = MetricsFilter().with_step('Top1/Inner1') self.assertFalse(MetricResults.matches(filter, key)) class MetricsTest(unittest.TestCase): def test_get_namespace_class(self): class MyClass(object): pass self.assertEqual('{}.{}'.format(MyClass.__module__, MyClass.__name__), Metrics.get_namespace(MyClass)) def test_get_namespace_string(self): namespace = 'MyNamespace' self.assertEqual(namespace, Metrics.get_namespace(namespace)) def test_get_namespace_error(self): with self.assertRaises(ValueError): Metrics.get_namespace(object()) def test_counter_empty_name(self): with self.assertRaises(ValueError): Metrics.counter("namespace", "") def test_counter_empty_namespace(self): with self.assertRaises(ValueError): Metrics.counter("", "names") def test_distribution_empty_name(self): with self.assertRaises(ValueError): Metrics.distribution("namespace", "") def test_distribution_empty_namespace(self): with self.assertRaises(ValueError): Metrics.distribution("", "names") def test_create_counter_distribution(self): sampler = statesampler.StateSampler('', counters.CounterFactory()) statesampler.set_current_tracker(sampler) state1 = sampler.scoped_state('mystep', 'myState', metrics_container=MetricsContainer('mystep')) try: sampler.start() with state1: counter_ns = 'aCounterNamespace' distro_ns = 'aDistributionNamespace' name = 'a_name' counter = Metrics.counter(counter_ns, name) distro = Metrics.distribution(distro_ns, name) counter.inc(10) counter.dec(3) distro.update(10) distro.update(2) self.assertTrue(isinstance(counter, Metrics.DelegatingCounter)) self.assertTrue(isinstance(distro, Metrics.DelegatingDistribution)) del distro del counter container = MetricsEnvironment.current_container() self.assertEqual( container.get_counter( MetricName(counter_ns, name)).get_cumulative(), 7) self.assertEqual( container.get_distribution( MetricName(distro_ns, name)).get_cumulative(), DistributionData(12, 2, 2, 10)) finally: sampler.stop() if __name__ == '__main__': unittest.main()
RyanSkraba/beam
sdks/python/apache_beam/metrics/metric_test.py
Python
apache-2.0
5,846
"""Support for the Hive sensors.""" from datetime import timedelta from homeassistant.components.sensor import SensorDeviceClass, SensorEntity from homeassistant.helpers.entity import DeviceInfo from . import HiveEntity from .const import DOMAIN PARALLEL_UPDATES = 0 SCAN_INTERVAL = timedelta(seconds=15) DEVICETYPE = { "Battery": {"unit": " % ", "type": SensorDeviceClass.BATTERY}, } async def async_setup_entry(hass, entry, async_add_entities): """Set up Hive thermostat based on a config entry.""" hive = hass.data[DOMAIN][entry.entry_id] devices = hive.session.deviceList.get("sensor") entities = [] if devices: for dev in devices: entities.append(HiveSensorEntity(hive, dev)) async_add_entities(entities, True) class HiveSensorEntity(HiveEntity, SensorEntity): """Hive Sensor Entity.""" @property def unique_id(self): """Return unique ID of entity.""" return self._unique_id @property def device_info(self) -> DeviceInfo: """Return device information.""" return DeviceInfo( identifiers={(DOMAIN, self.device["device_id"])}, manufacturer=self.device["deviceData"]["manufacturer"], model=self.device["deviceData"]["model"], name=self.device["device_name"], sw_version=self.device["deviceData"]["version"], via_device=(DOMAIN, self.device["parentDevice"]), ) @property def available(self): """Return if sensor is available.""" return self.device.get("deviceData", {}).get("online") @property def device_class(self): """Device class of the entity.""" return DEVICETYPE[self.device["hiveType"]].get("type") @property def native_unit_of_measurement(self): """Return the unit of measurement.""" return DEVICETYPE[self.device["hiveType"]].get("unit") @property def name(self): """Return the name of the sensor.""" return self.device["haName"] @property def native_value(self): """Return the state of the sensor.""" return self.device["status"]["state"] async def async_update(self): """Update all Node data from Hive.""" await self.hive.session.updateData(self.device) self.device = await self.hive.sensor.getSensor(self.device)
home-assistant/home-assistant
homeassistant/components/hive/sensor.py
Python
apache-2.0
2,371
import os import calliope import pytest # pylint: disable=unused-import import tempfile from calliope.core.attrdict import AttrDict from calliope.test.common.util import check_error_or_warning, python36_or_higher HTML_STRINGS = AttrDict.from_yaml( os.path.join(os.path.dirname(__file__), 'common', 'html_strings.yaml') ) class TestPlotting: @pytest.fixture(scope="module") def national_scale_example(self): model = calliope.examples.national_scale( override_dict={'model.subset_time': '2005-01-01'} ) model.run() return model @python36_or_higher def test_national_scale_plotting(self, national_scale_example): model = national_scale_example plot_html_outputs = { 'capacity': model.plot.capacity(html_only=True), 'timeseries': model.plot.timeseries(html_only=True), 'transmission': model.plot.transmission(html_only=True), 'flows': model.plot.flows(html_only=True), } for plot_type in HTML_STRINGS['national_scale']: for string in HTML_STRINGS['national_scale'][plot_type]: assert string in plot_html_outputs[plot_type] # Also just try plotting the summary model.plot.summary() # Testing that the model can handle not having supply_plus technologies # Wrapped in temporary directory as we can't stop it saving an HTML file model._model_data = model._model_data.drop('resource_con') with tempfile.TemporaryDirectory() as tempdir: out_path = os.path.join(tempdir, 'test_plot.html') model.plot.timeseries( plotly_kwarg_updates={'auto_open': False, 'filename': out_path} ) @python36_or_higher def test_milp_plotting(self): override = {'model.subset_time': '2005-07-01'} model = calliope.examples.milp(override_dict=override) model.run() plot_html_outputs = { 'capacity': model.plot.capacity(html_only=True), 'timeseries': model.plot.timeseries(html_only=True), 'transmission': model.plot.transmission(html_only=True), 'flows': model.plot.flows(html_only=True), } for plot_type in HTML_STRINGS['milp']: for string in HTML_STRINGS['milp'][plot_type]: assert string in plot_html_outputs[plot_type] # Also just try plotting the summary model.plot.summary() def test_failed_cap_plotting(self, national_scale_example): model = national_scale_example # should fail, not in array with pytest.raises(ValueError): model.plot.capacity(array='carrier_prod') model.plot.capacity(array=['energy_eff', 'energy_cap']) # orient has to be 'v', 'vertical', 'h', or 'horizontal' model.plot.capacity(orient='g') def test_failed_timeseries_plotting(self, national_scale_example): model = national_scale_example # should fail, not in array with pytest.raises(ValueError): model.plot.timeseries(array='energy_cap') model.plot.timeseries(squeeze=False) model.plot.timeseries(sum_dims=None) @python36_or_higher def test_clustered_plotting(self): override = {'model.time.function_options.k': 2} model = calliope.examples.time_clustering(override_dict=override) # While we have a model that hasn't been run, try plotting transmission and capacity model.plot.transmission(html_only=True) model.plot.capacity(html_only=True) model.run() plot_html = model.plot.timeseries(html_only=True) for string in HTML_STRINGS['clustering']['timeseries']: assert string in plot_html def test_subset_plotting(self, national_scale_example): model = national_scale_example model.plot.capacity( html_only=True, subset={'timesteps': ['2015-01-01 01:00']} ) # should raise, subsetting with a tech that does not exist with pytest.raises(ValueError) as excinfo: model.plot.capacity( html_only=True, subset={'techs': ['foobar']} ) assert check_error_or_warning(excinfo, 'No data to plot') def test_subset_array(self, national_scale_example): model = national_scale_example model.plot.capacity(html_only=True, array='inputs') model.plot.capacity(html_only=True, array='results') model.plot.capacity(html_only=True, array='energy_cap') model.plot.capacity(html_only=True, array='storage_cap') model.plot.capacity( html_only=True, array=['systemwide_levelised_cost', 'storage_cap'] ) model.plot.timeseries(html_only=True, array='inputs') model.plot.timeseries(html_only=True, array='results') model.plot.timeseries(html_only=True, array='power') model.plot.timeseries(html_only=True, array='resource') model.plot.timeseries( html_only=True, array=['resource_con', 'cost_var'] ) def test_long_name(self, national_scale_example): model = national_scale_example model._model_data['names'] = model._model_data.names.astype('<U100') model._model_data.names.loc['ccgt'] = ( 'a long name for a technology, longer than 30 characters' ) model._model_data.names.loc['csp'] = ( 'a really very long name for a technology that is longer than 60 characters' ) model._model_data.names.loc['battery'] = ( 'another_long_name_but_without_space_to_break_at' ) model._model_data.names.loc['ac_transmission'] = ( 'long_transmission_name_which_has two break types in technology name' ) broken_names = [ 'a long name for a technology,<br>longer than 30 characters', 'another_long_name_but_without_...<br>space_to_break_at', 'a really very long name for a<br>technology that is longer<br>than 60 characters' ] html_cap = model.plot.capacity(html_only=True) html_timeseries = model.plot.timeseries(html_only=True) html_transmission = model.plot.transmission(html_only=True) for i in broken_names: assert i in html_cap assert i in html_timeseries assert ( 'long_transmission_name_which_h...<br>as two break types in<br>technology name' in html_transmission ) def test_plot_cost(self): model = calliope.examples.national_scale( override_dict={ 'techs.ccgt.costs.carbon': {'energy_cap': 10, 'interest_rate': 0.01} } ) model.run() # should fail, multiple costs provided, can only plot one with pytest.raises(ValueError): model.plot.capacity(html_only=True, array='results') # should succeed, multiple costs provided, subset to one model.plot.capacity( html_only=True, array='results', subset={'costs': 'carbon'} ) # FIXME: sum_dims doesn't seem to work at all # model.plot.capacity(html_only=True, sum_dims=['locs']) def test_to_file(self, national_scale_example): model = national_scale_example # should fail, 'gif' not in allowed extensions with pytest.raises(TypeError): model.plot.capacity( to_file='plot_to_save.gif', plotly_kwarg_updates={'auto_open': False}) # FIXME: currently throws up save dialogue rather than just # saving the file # with tempfile.TemporaryDirectory() as tempdir: # for extension in ['png', 'jpeg', 'svg', 'webp']: # out_path = os.path.join(tempdir, 'plot_to_save.' + extension) # model.plot.capacity(array='energy_cap', to_file=out_path, auto_open=False) # assert os.path.isfile(out_path) # should fail, cannot save a plot with multiple DataArrays being plotted with pytest.raises(ValueError): model.plot.capacity( to_file='plot_to_save.svg', plotly_kwarg_updates={'auto_open': False}) # test saving summary to file with tempfile.TemporaryDirectory() as tempdir: out_path = os.path.join(tempdir, 'test_summary.html') model.plot.summary(to_file=out_path) assert os.path.isfile(out_path)
brynpickering/calliope
calliope/test/test_analysis.py
Python
apache-2.0
8,658
""" Auth module, contains of login, logout and social network authorization modules, now vk only """ from flask import request, redirect, render_template, url_for, g, session, Blueprint from flask_login import login_user, logout_user, current_user from app import lm from app.models import User, ROLE_USER from app.vk_api import VkApi from app.secrets import vk_client_id, vk_secret_key from urllib.request import urlopen, Request import json auth_module = Blueprint('auth', __name__, template_folder='templates') @auth_module.route('/login', methods=['GET', 'POST']) def login(): if g.user is not None and g.user.is_authenticated: return redirect(url_for('pomodoro.index')) return render_template('login.html') @auth_module.route('/logout') def logout(): logout_user() return redirect(url_for('auth.login')) @lm.user_loader def load_user(email): """ function for flask_login package, it checks exist user or not :param email: the id of user :return: None if no user in base, user if user exists """ user = User.objects(email=email) if not user: return None return User.objects.get(email=email) @auth_module.route('/try_vk_auth') def try_vk_auth(): """ try get code from vk.com for authorization :return: redirect to vk_auth page with code or error """ vk_auth_page = url_for('auth.vk_auth', _external=True) req_url = 'https://oauth.vk.com/authorize?client_id=' + vk_client_id + \ '&scope=email&redirect_uri=' + vk_auth_page + \ '&response_type=code&v=5.52' return redirect(req_url) @auth_module.route('/vk_auth') def vk_auth(): """ Authorization using vk OAuth, getting user email, first name, last name and avatar :return: redirect to index page if all is ok else redirect to login page again """ vk_auth_page = url_for('auth.vk_auth', _external=True) code = request.args.get('code', '') access_token_url = 'https://oauth.vk.com/access_token?client_id=' + vk_client_id + \ '&client_secret=' + vk_secret_key + '&code=' + code + '&redirect_uri=' + vk_auth_page req = Request(url=access_token_url) response = urlopen(req).read() response = json.loads(response.decode('utf-8')) if 'access_token' in response and 'email' in response: access_token = response['access_token'] email = response['email'] user = User.objects(email=email) user_id = response['user_id'] vk_api = VkApi(token=access_token) req_result = vk_api.call_api('users.get', params={'user_id': user_id, 'fields': 'photo_50,screen_name'}) nickname = email.split('@')[0] avatar_url = '' if req_result: if 'last_name' in req_result[0].keys() and 'first_name' in req_result[0].keys(): nickname = req_result[0]['first_name'] + ' ' + req_result[0]['last_name'] if 'photo_50' in req_result[0].keys(): avatar_url = req_result[0]['photo_50'] if not user: user = User(nickname=nickname, email=email, role=ROLE_USER, avatar_url=avatar_url) else: user = User.objects.get(email=email) user.nickname = nickname if avatar_url: user.avatar_url = avatar_url user.save() remember_me = False if 'remember_me' in session: remember_me = session['remember_me'] session.pop('remember_me', None) login_user(user, remember=remember_me) return redirect(request.args.get('next') or url_for('pomodoro.index')) elif 'error' in response: return redirect(url_for('auth.login')) @auth_module.before_request def before_request(): g.user = current_user
Kwentar/Dream-Crusher
app/auth_views.py
Python
apache-2.0
3,770
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2014 Thibaut Lapierre <[email protected]>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from jinja2 import Template import os.path import yaml class ModelDefinition(object): """Container definition This class is loading the model from the yaml files and provides different methods to read from it more easily. We can pass it a set of yaml files or directly a Python dictionary. It can contain 'projects', 'clusters' and 'services' It takes and return only dictionaries. """ def __init__(self, model=None, app_args=None, cluster_name=None): """Return a cluster list from the model. This method return the differents clusters as a list of dicts. """ self.model = 'shaddock.yml' if model: self.model = model self.app_args = app_args self.cluster_name = cluster_name if app_args and app_args.shdk_cluster: self.cluster_name = app_args.shdk_cluster Loader.add_constructor('!include', Loader.include) # Services are first imported as single string # They are then re loaded from yaml after jinja2. # Loader.add_constructor('services:', Loader.import_str) with open(self.model) as f: model = yaml.load(f, Loader) self.cluster_list = [] if model.get('projects') is not None: for project in model['projects']: for cluster in project['clusters']: self.cluster_list.append(cluster) if model.get('clusters') is not None: for cluster in model['clusters']: self.cluster_list.append(cluster) def get_cluster(self, name): """Return a cluster object by its name """ try: cluster = [clu for clu in self.cluster_list if clu['name'] == name] if len(cluster) > 1: raise TemplateFileError( "There is more than one definition matching" " 'name: {}' in your model".format(name)) cluster = cluster[0] except IndexError: raise TemplateFileError( "There is no cluster definition containing" " 'name: {}' in your model".format(name)) except KeyError: raise TemplateFileError( "At least one cluster definition " "is missing the name property") return cluster def _get_cluster_services(self, cluster): """Return a list of services from a cluster name """ services_list = [] if ('vars' in cluster): try: j2 = Template(str(cluster['services'])) services_yaml = j2.render(cluster['vars']) services = yaml.load(services_yaml) except ValueError: for l in cluster['vars']: j2 = Template(str(cluster['services'])) services_yaml = j2.render(l) services = yaml.load(services_yaml) cluster['services'] = services for service in cluster['services']: service['cluster'] = {} service['cluster']['images'] = cluster['images'] service['cluster']['name'] = cluster['name'] service['cluster']['hosts'] = cluster.get('hosts') service['cluster']['vars'] = cluster.get('vars') services_list.append(service) return services_list def get_services_list(self): """This method returns a service list as a dict list. """ if self.cluster_name is None: svc_list = [] for clu in self.cluster_list: clu_svc_list = self._get_cluster_services(clu) svc_list = svc_list + clu_svc_list else: cluster = self.get_cluster(self.cluster_name) svc_list = self._get_cluster_services(cluster) return svc_list def get_service(self, name): """This method returns a service as a dict. It can only return a service from a specific cluster. A service name is allowed only once per cluster. """ services_list = self.get_services_list() try: service = [svc for svc in services_list if svc['name'] == name] if len(service) > 1: raise TemplateFileError( "There is more than one definition matching" " 'name: {}' in this cluster".format(name)) service = service[0] except IndexError: raise TemplateFileError( "There is no container definition containing" " 'name: {}' in your model".format(name)) except KeyError: raise TemplateFileError( "At least one container definition in your model" " is missing the name property") service = self.build_service_dict(service) return service def build_service_dict(self, service): """Build a service dictionary """ # Image dir definition: # try: service['images_dir'] = os.path.join( os.path.dirname(self.model), service['cluster']['images']) except TypeError: raise TemplateFileError( "Cluster definition in your model is missing the images" " key.") try: service['image'] except KeyError: raise TemplateFileError( "Container definition of: '{}' in your model is" " missing the image property".format(service['name'])) service['path'] = '{}/{}'.format(service['images_dir'], service['image'].split(":")[0]) clu_name = service['cluster']['name'] service['service_name'] = clu_name + '_' + service['name'] # Host API Definition: # api_cfg = {} try: api_cfg = [api for api in service['cluster']['hosts'] if api['name'] == service['host']] if len(api_cfg) > 1: raise TemplateFileError( "There is more than one definition matching" " 'name: {}' in your model".format(service['name'])) api_cfg = api_cfg[0] except KeyError: pass except IndexError: raise TemplateFileError( "There is no Docker Host definition containing" " 'name: {}' in your model.".format(service['host'])) except TypeError: pass service['api_cfg'] = api_cfg return service class TemplateFileError(Exception): pass class Loader(yaml.Loader): """Include This class change the Yaml Load fct to allow file inclusion using the !include keywork. """ def __init__(self, stream): self._root = os.path.split(stream.name)[0] super(Loader, self).__init__(stream) def include(self, node): filename = os.path.join(self._root, self.construct_scalar(node)) # try: with open(filename, 'r') as f: return yaml.load(f, Loader) # except Exception: # raise TemplateFileError( # "The file {} you're trying to include doesn't" # "exist.".format(filename)) def import_str(self, node): return str(self.construct_scalar(node))
epheo/shaddock
shaddock/model.py
Python
apache-2.0
8,118
import requests import pytest import subprocess import os from settings import TEST_DATA from suite.custom_resources_utils import ( create_dos_logconf_from_yaml, create_dos_policy_from_yaml, create_dos_protected_from_yaml, delete_dos_policy, delete_dos_logconf, delete_dos_protected, ) from suite.dos_utils import find_in_log, log_content_to_dic from suite.resources_utils import ( wait_before_test, create_example_app, wait_until_all_pods_are_ready, create_items_from_yaml, delete_items_from_yaml, delete_common_app, ensure_connection_to_public_endpoint, create_ingress_with_dos_annotations, ensure_response_from_backend, get_ingress_nginx_template_conf, get_file_contents, get_test_file_name, write_to_json, replace_configmap_from_yaml, scale_deployment, nginx_reload, get_pods_amount, get_pods_amount_with_name, clear_file_contents, ) from suite.yaml_utils import get_first_ingress_host_from_yaml from datetime import datetime src_ing_yaml = f"{TEST_DATA}/dos/dos-ingress.yaml" valid_resp_addr = "Server address:" valid_resp_name = "Server name:" invalid_resp_title = "Request Rejected" invalid_resp_body = "The requested URL was rejected. Please consult with your administrator." reload_times = {} class DosSetup: """ Encapsulate the example details. Attributes: req_url (str): pol_name (str): log_name (str): """ def __init__(self, req_url, pol_name, log_name): self.req_url = req_url self.pol_name = pol_name self.log_name = log_name @pytest.fixture(scope="class") def dos_setup( request, kube_apis, ingress_controller_endpoint, ingress_controller_prerequisites, test_namespace ) -> DosSetup: """ Deploy simple application and all the DOS resources under test in one namespace. :param request: pytest fixture :param kube_apis: client apis :param ingress_controller_endpoint: public endpoint :param ingress_controller_prerequisites: IC pre-requisites :param test_namespace: :return: DosSetup """ print(f"------------- Replace ConfigMap --------------") replace_configmap_from_yaml( kube_apis.v1, ingress_controller_prerequisites.config_map["metadata"]["name"], ingress_controller_prerequisites.namespace, f"{TEST_DATA}/dos/nginx-config.yaml" ) print("------------------------- Deploy Dos backend application -------------------------") create_example_app(kube_apis, "dos", test_namespace) req_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port}/" wait_until_all_pods_are_ready(kube_apis.v1, test_namespace) ensure_connection_to_public_endpoint( ingress_controller_endpoint.public_ip, ingress_controller_endpoint.port, ingress_controller_endpoint.port_ssl, ) print("------------------------- Deploy Secret -----------------------------") src_sec_yaml = f"{TEST_DATA}/dos/dos-secret.yaml" create_items_from_yaml(kube_apis, src_sec_yaml, test_namespace) print("------------------------- Deploy logconf -----------------------------") src_log_yaml = f"{TEST_DATA}/dos/dos-logconf.yaml" log_name = create_dos_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace) print(f"------------------------- Deploy dospolicy ---------------------------") src_pol_yaml = f"{TEST_DATA}/dos/dos-policy.yaml" pol_name = create_dos_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, test_namespace) print(f"------------------------- Deploy protected resource ---------------------------") src_protected_yaml = f"{TEST_DATA}/dos/dos-protected.yaml" protected_name = create_dos_protected_from_yaml(kube_apis.custom_objects, src_protected_yaml, test_namespace, ingress_controller_prerequisites.namespace) for item in kube_apis.v1.list_namespaced_pod(ingress_controller_prerequisites.namespace).items: if "nginx-ingress" in item.metadata.name: nginx_reload(kube_apis.v1, item.metadata.name, ingress_controller_prerequisites.namespace) def fin(): print("Clean up:") delete_dos_policy(kube_apis.custom_objects, pol_name, test_namespace) delete_dos_logconf(kube_apis.custom_objects, log_name, test_namespace) delete_dos_protected(kube_apis.custom_objects, protected_name, test_namespace) delete_common_app(kube_apis, "dos", test_namespace) delete_items_from_yaml(kube_apis, src_sec_yaml, test_namespace) write_to_json(f"reload-{get_test_file_name(request.node.fspath)}.json", reload_times) request.addfinalizer(fin) return DosSetup(req_url, pol_name, log_name) @pytest.mark.dos @pytest.mark.parametrize( "crd_ingress_controller_with_dos", [ { "extra_args": [ f"-enable-custom-resources", f"-enable-app-protect-dos", f"-v=3", ] } ], indirect=["crd_ingress_controller_with_dos"], ) class TestDos: def getPodNameThatContains(self, kube_apis, namespace, contains_string): for item in kube_apis.v1.list_namespaced_pod(namespace).items: if contains_string in item.metadata.name: return item.metadata.name return "" def test_ap_nginx_config_entries( self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_dos, dos_setup, test_namespace ): """ Test to verify Dos directive in nginx config """ conf_directive = [ f"app_protect_dos_enable on;", f"app_protect_dos_security_log_enable on;", f"app_protect_dos_monitor uri=dos.example.com protocol=http1 timeout=5;", f"app_protect_dos_name \"{test_namespace}/dos-protected/name\";", f"app_protect_dos_policy_file /etc/nginx/dos/policies/{test_namespace}_{dos_setup.pol_name}.json;", f"app_protect_dos_security_log_enable on;", f"app_protect_dos_security_log /etc/nginx/dos/logconfs/{test_namespace}_{dos_setup.log_name}.json syslog:server=syslog-svc.{ingress_controller_prerequisites.namespace}.svc.cluster.local:514;", ] create_ingress_with_dos_annotations( kube_apis, src_ing_yaml, test_namespace, test_namespace + "/dos-protected", ) ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) ensure_response_from_backend(dos_setup.req_url, ingress_host, check404=True) pod_name = self.getPodNameThatContains(kube_apis, ingress_controller_prerequisites.namespace, "nginx-ingress") result_conf = get_ingress_nginx_template_conf( kube_apis.v1, test_namespace, "dos-ingress", pod_name, "nginx-ingress" ) delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) for _ in conf_directive: assert _ in result_conf def test_dos_sec_logs_on( self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_dos, dos_setup, test_namespace, ): """ Test corresponding log entries with correct policy (includes setting up a syslog server as defined in syslog.yaml) """ print("----------------------- Get syslog pod name ----------------------") syslog_pod = self.getPodNameThatContains(kube_apis, ingress_controller_prerequisites.namespace, "syslog") assert "syslog" in syslog_pod log_loc = f"/var/log/messages" clear_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) create_ingress_with_dos_annotations( kube_apis, src_ing_yaml, test_namespace, test_namespace+"/dos-protected" ) ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) print("--------- Run test while DOS module is enabled with correct policy ---------") ensure_response_from_backend(dos_setup.req_url, ingress_host, check404=True) pod_name = self.getPodNameThatContains(kube_apis, ingress_controller_prerequisites.namespace, "nginx-ingress") get_ingress_nginx_template_conf( kube_apis.v1, test_namespace, "dos-ingress", pod_name, "nginx-ingress" ) print("----------------------- Send request ----------------------") response = requests.get( dos_setup.req_url, headers={"host": "dos.example.com"}, verify=False ) print(response.text) wait_before_test(10) print(f'log_loc {log_loc} syslog_pod {syslog_pod} namespace {ingress_controller_prerequisites.namespace}') log_contents = get_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) print(log_contents) assert 'product="app-protect-dos"' in log_contents assert f'vs_name="{test_namespace}/dos-protected/name"' in log_contents assert 'bad_actor' in log_contents @pytest.mark.skip def test_dos_under_attack_no_learning( self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_dos, dos_setup, test_namespace ): """ Test App Protect Dos: Block bad clients attack """ log_loc = f"/var/log/messages" print("----------------------- Get syslog pod name ----------------------") syslog_pod = self.getPodNameThatContains(kube_apis, ingress_controller_prerequisites.namespace, "syslog") assert "syslog" in syslog_pod clear_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) print("------------------------- Deploy ingress -----------------------------") create_ingress_with_dos_annotations( kube_apis, src_ing_yaml, test_namespace, test_namespace+"/dos-protected" ) ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) print("------------------------- Attack -----------------------------") wait_before_test(10) print("start bad clients requests") p_attack = subprocess.Popen( [f"exec {TEST_DATA}/dos/bad_clients_xff.sh {ingress_host} {dos_setup.req_url}"], shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) print("Attack for 30 seconds") wait_before_test(30) print("Stop Attack") p_attack.terminate() print("wait max 140 seconds after attack stop, to get attack ended") find_in_log(kube_apis, log_loc, syslog_pod, ingress_controller_prerequisites.namespace, 140, "attack_event=\"Attack ended\"") log_contents = get_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) log_info_dic = log_content_to_dic(log_contents) # Analyze the log no_attack = False attack_started = False under_attack = False attack_ended = False for log in log_info_dic: # Start with no attack if log['attack_event'] == "No Attack" and int(log['dos_attack_id']) == 0 and not no_attack: no_attack = True # Attack started elif log['attack_event'] == "Attack started" and int(log['dos_attack_id']) > 0 and not attack_started: attack_started = True # Under attack elif log['attack_event'] == "Under Attack" and int(log['dos_attack_id']) > 0 and not under_attack: under_attack = True # Attack ended elif log['attack_event'] == "Attack ended" and int(log['dos_attack_id']) > 0 and not attack_ended: attack_ended = True delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) assert ( no_attack and attack_started and under_attack and attack_ended ) @pytest.mark.skip def test_dos_under_attack_with_learning( self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_dos, dos_setup, test_namespace ): """ Test App Protect Dos: Block bad clients attack with learning """ log_loc = f"/var/log/messages" print("----------------------- Get syslog pod name ----------------------") syslog_pod = self.getPodNameThatContains(kube_apis, ingress_controller_prerequisites.namespace, "syslog") assert "syslog" in syslog_pod clear_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) print("------------------------- Deploy ingress -----------------------------") create_ingress_with_dos_annotations( kube_apis, src_ing_yaml, test_namespace, test_namespace+"/dos-protected" ) ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) print("------------------------- Learning Phase -----------------------------") print("start good clients requests") p_good_client = subprocess.Popen( [f"exec {TEST_DATA}/dos/good_clients_xff.sh {ingress_host} {dos_setup.req_url}"], preexec_fn=os.setsid, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) print("Learning for max 10 minutes") find_in_log(kube_apis, log_loc, syslog_pod, ingress_controller_prerequisites.namespace, 600, "learning_confidence=\"Ready\"") print("------------------------- Attack -----------------------------") print("start bad clients requests") p_attack = subprocess.Popen( [f"exec {TEST_DATA}/dos/bad_clients_xff.sh {ingress_host} {dos_setup.req_url}"], preexec_fn=os.setsid, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) print("Attack for 300 seconds") wait_before_test(300) print("Stop Attack") p_attack.terminate() print("wait max 140 seconds after attack stop, to get attack ended") find_in_log(kube_apis, log_loc, syslog_pod, ingress_controller_prerequisites.namespace, 140, "attack_event=\"Attack ended\"") print("Stop Good Client") p_good_client.terminate() log_contents = get_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) log_info_dic = log_content_to_dic(log_contents) # Analyze the log no_attack = False attack_started = False under_attack = False attack_ended = False bad_actor_detected = False signature_detected = False health_ok = False bad_ip = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] fmt = '%b %d %Y %H:%M:%S' for log in log_info_dic: if log['attack_event'] == 'No Attack': if int(log['dos_attack_id']) == 0 and not no_attack: no_attack = True elif log['attack_event'] == 'Attack started': if int(log['dos_attack_id']) > 0 and not attack_started: attack_started = True start_attack_time = datetime.strptime(log['date_time'], fmt) elif log['attack_event'] == 'Under Attack': under_attack = True if not health_ok and float(log['stress_level']) < 0.6: health_ok = True health_ok_time = datetime.strptime(log['date_time'], fmt) elif log['attack_event'] == 'Attack signature detected': signature_detected = True elif log['attack_event'] == 'Bad actors detected': if under_attack: bad_actor_detected = True elif log['attack_event'] == 'Bad actor detection': if under_attack and log['source_ip'] in bad_ip: bad_ip.remove(log['source_ip']) elif log['attack_event'] == 'Attack ended': attack_ended = True delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) assert ( no_attack and attack_started and under_attack and attack_ended and health_ok and (health_ok_time - start_attack_time).total_seconds() < 150 and signature_detected and bad_actor_detected and len(bad_ip) == 0 ) @pytest.mark.skip def test_dos_arbitrator( self, kube_apis, ingress_controller_prerequisites, crd_ingress_controller_with_dos, dos_setup, test_namespace ): """ Test App Protect Dos: Check new IC pod get learning info """ print("----------------------- Get syslog pod name ----------------------") syslog_pod = self.getPodNameThatContains(kube_apis, ingress_controller_prerequisites.namespace, "syslog") assert "syslog" in syslog_pod log_loc = f"/var/log/messages" clear_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) print("------------------------- Deploy ingress -----------------------------") create_ingress_with_dos_annotations( kube_apis, src_ing_yaml, test_namespace, test_namespace+"/dos-protected" ) ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml) # print("------------------------- Learning Phase -----------------------------") print("start good clients requests") p_good_client = subprocess.Popen( [f"exec {TEST_DATA}/dos/good_clients_xff.sh {ingress_host} {dos_setup.req_url}"], preexec_fn=os.setsid, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) print("Learning for max 10 minutes") find_in_log(kube_apis, log_loc, syslog_pod, ingress_controller_prerequisites.namespace, 600, "learning_confidence=\"Ready\"") print("------------------------- Check new IC pod get info from arbitrator -----------------------------") ic_ns = ingress_controller_prerequisites.namespace scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 2) while get_pods_amount_with_name(kube_apis.v1, "nginx-ingress", "nginx-ingress") is not 2: print(f"Number of replicas is not 2, retrying...") wait_before_test() print("------------------------- Check if new pod receive info from arbitrator -----------------------------") print("Wait for 30 seconds") wait_before_test(30) log_contents = get_file_contents(kube_apis.v1, log_loc, syslog_pod, ingress_controller_prerequisites.namespace) log_info_dic = log_content_to_dic(log_contents) print("Stop Good Client") p_good_client.terminate() learning_units_hostname = [] for log in log_info_dic: if log['unit_hostname'] not in learning_units_hostname and log['learning_confidence'] == "Ready": learning_units_hostname.append(log['unit_hostname']) delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace) assert ( len(learning_units_hostname) == 2 )
nginxinc/kubernetes-ingress
tests/suite/test_dos.py
Python
apache-2.0
19,400
from adobject import * class ADComputer(ADObject): """Python class representing a computer object in Active Directory.""" @classmethod def create(cls, name, container_object, enable=True, optional_attributes={}): """Creates and returns a new computer object.""" assert type(name) == str assert container_object.__class__.__name__ == 'ADContainer' return container_object.create_computer(name=name,enable=enable,optional_attributes=optional_attributes) def get_creator(self): """returns ADUser object of the user who added the computer to the domain. Returns None if user no longer exists.""" try: sid = str(pyadutils.convert_sid(self.get_attribute('mS-DS-CreatorSID', False))).split(':')[1] dn = adsearch.by_sid(sid) return ADUser(dn) except: return None ADObject._py_ad_object_mappings['computer'] = ADComputer
ECS-Security/Hive
pyad/adcomputer.py
Python
apache-2.0
934
''' 9. Find all of the crypto map entries that are using PFS group2 ''' from ciscoconfparse import CiscoConfParse config = CiscoConfParse('cisco_ipsec.txt') pfs_group2 = config.find_objects_w_child(parentspec=r"^crypto map CRYPTO", childspec=r"pfs group2") for entry in pfs_group2: print entry.text
eclecticitguy/pynet_paid
class1/ex9_confparse.py
Python
apache-2.0
346
""" Copyright 2016 adpoliak Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from distutils.version import LooseVersion from sortedcontainers.sortedset import SortedSet # import curses # import os # import sys import typing class VersionItem(object): def __init__(self, name: str): self._name = name @property def name(self): return self._name class VersionChoiceDialog(object): @property def chosen_version(self): return self._chosen_version @property def do_not_ask_again(self): return self._do_not_ask_again @do_not_ask_again.setter def do_not_ask_again(self, value: bool): self._do_not_ask_again = value @property def keep_while_available(self): return self._keep_while_available @keep_while_available.setter def keep_while_available(self, value: bool): self._keep_while_available = value @property def persist(self): return self._persist @persist.setter def persist(self, value: bool): self._persist = value @property def return_code(self): return self._return_code @return_code.setter def return_code(self, value: str): self._return_code = value def persist_action(self): if self.persist is not None: self.persist ^= True if self.persist: self.keep_while_available = False self.do_not_ask_again = False else: self.keep_while_available = None self.do_not_ask_again = None def update_persist_state(self): if self.keep_while_available or self.do_not_ask_again: self.persist = None else: self.persist = False def keep_action(self): if self.keep_while_available is not None: self.keep_while_available ^= True self.update_persist_state() def noprompt_action(self): if self.do_not_ask_again is not None: self.do_not_ask_again ^= True def select_action(self): self._chosen_version = list(self._child_names)[self._index] if self.chosen_version.endswith(':KEEP'): self._can_continue = False self.persist = None self.keep_while_available = None self.do_not_ask_again = None else: self._can_continue = True self.persist = False self.persist = False self.do_not_ask_again = False def cancel_action(self): self.return_code = 'cancel' def accept_action(self): if self._can_continue: self.return_code = 'accept' def __init__(self, master: typing.Optional[object], versions: SortedSet, persist: typing.Optional[bool] = False, keep: typing.Optional[bool] = False, last_used: typing.Optional[str] = None, *args, **kwargs): # assign tkinter-compatible interface items to placeholder to placate PyCharm _ = master _ = args _ = kwargs self._can_continue = None self._child_names = SortedSet(versions) self._child_objects = None self._chosen_version = None self._do_not_ask_again = False self._keep_while_available = keep self._last_used = last_used self._return_code = None self._persist = persist self._index = 0 if persist: self.persist_action() if keep: self.keep_action() if last_used is not None: last_used_version_object = LooseVersion(last_used) self._index = self._child_names.index(last_used_version_object) \ if last_used_version_object in self._child_names else 0 self.select_action()
adpoliak/NSAptr
ui/curses/versionchooser.py
Python
apache-2.0
4,308
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from heat.engine import clients from heat.tests.common import HeatTestCase from heatclient import client as heatclient class ClientsTest(HeatTestCase): def test_clients_chosen_at_module_initilization(self): self.assertFalse(hasattr(clients.Clients, 'nova')) self.assertTrue(hasattr(clients.Clients('fakecontext'), 'nova')) def test_clients_get_heat_url(self): con = mock.Mock() con.tenant_id = "b363706f891f48019483f8bd6503c54b" obj = clients.Clients(con) obj._get_client_option = mock.Mock() obj._get_client_option.return_value = None self.assertEqual(None, obj._get_heat_url()) heat_url = "http://0.0.0.0:8004/v1/%(tenant_id)s" obj._get_client_option.return_value = heat_url tenant_id = "b363706f891f48019483f8bd6503c54b" result = heat_url % {"tenant_id": tenant_id} self.assertEqual(result, obj._get_heat_url()) obj._get_client_option.return_value = result self.assertEqual(result, obj._get_heat_url()) @mock.patch.object(heatclient, 'Client') def test_clients_heat(self, mock_call): con = mock.Mock() con.auth_url = "http://auth.example.com:5000/v2.0" con.tenant_id = "b363706f891f48019483f8bd6503c54b" con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" obj = clients.Clients(con) obj._get_heat_url = mock.Mock(name="_get_heat_url") obj._get_heat_url.return_value = None obj.url_for = mock.Mock(name="url_for") obj.url_for.return_value = "url_from_keystone" obj.heat() self.assertEqual('url_from_keystone', mock_call.call_args[0][1]) obj._get_heat_url.return_value = "url_from_config" obj._heat = None obj.heat() self.assertEqual('url_from_config', mock_call.call_args[0][1])
ntt-sic/heat
heat/tests/test_clients.py
Python
apache-2.0
2,473
from django.db.transaction import atomic from api.api_views import APIView from api.exceptions import VmIsNotOperational, VmHasPendingTasks, VmIsLocked, PreconditionRequired from api.task.response import FailureTaskResponse, SuccessTaskResponse from api.task.utils import task_log_success from api.vm.utils import get_vm from api.vm.messages import LOG_MIGRATE_DC from api.vm.migrate.serializers import VmDcSerializer from que.utils import task_id_from_task_id from vms.models import TaskLogEntry, Backup, Snapshot class VmDc(APIView): """ api.vm.migrate.views.vm_dc """ def __init__(self, request, hostname_or_uuid, data): super(VmDc, self).__init__(request) self.hostname_or_uuid = hostname_or_uuid self.data = data self.vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) @atomic def put(self): request, vm = self.request, self.vm if vm.locked: raise VmIsLocked if vm.status not in (vm.STOPPED, vm.RUNNING, vm.NOTCREATED): raise VmIsNotOperational('VM is not stopped, running or notcreated') if vm.json_changed(): raise PreconditionRequired('VM definition has changed; Update first') ser = VmDcSerializer(request, vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if vm.tasks: raise VmHasPendingTasks old_dc = vm.dc dc = ser.dc # Change DC for one VM, repeat this for other VM + Recalculate node & storage resources in target and source vm.dc = dc vm.save(update_node_resources=True, update_storage_resources=True) # Change task log entries DC for target VM TaskLogEntry.objects.filter(object_pk=vm.uuid).update(dc=dc) # Change related VM backup's DC Backup.objects.filter(vm=vm).update(dc=dc) for ns in ser.nss: # Issue #chili-885 for i in (dc, old_dc): Backup.update_resources(ns, vm, i) Snapshot.update_resources(ns, vm, i) detail = 'Successfully migrated VM %s from datacenter %s to datacenter %s' % (vm.hostname, old_dc.name, dc.name) # Will create task log entry in old DC res = SuccessTaskResponse(request, detail, vm=vm, msg=LOG_MIGRATE_DC, detail=detail) # Create task log entry in new DC too task_log_success(task_id_from_task_id(res.data.get('task_id'), dc_id=dc.id), LOG_MIGRATE_DC, obj=vm, detail=detail, update_user_tasks=False) return res
erigones/esdc-ce
api/vm/migrate/vm_dc.py
Python
apache-2.0
2,612
#!/usr/bin/env python from __future__ import print_function try: # PY2 ip_addr = raw_input("Please enter IP address: ") except NameError: # PY3 ip_addr = input("Please enter IP address: ") ip_addr = ip_addr.split(".") print() print("{:<12} {:<12} {:<12} {:<12}".format(*ip_addr)) print()
damintote/pynet_test
string2.py
Python
apache-2.0
311
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for struct2tensor.reroot.""" from absl.testing import absltest from struct2tensor import calculate from struct2tensor import create_expression from struct2tensor import path from struct2tensor.expression_impl import proto_test_util from struct2tensor.expression_impl import reroot from struct2tensor.test import expression_test_util from struct2tensor.test import prensor_test_util from struct2tensor.test import test_pb2 import tensorflow as tf from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class RerootTest(tf.test.TestCase): def test_reroot_and_create_proto_index(self): expr = create_expression.create_expression_from_prensor( prensor_test_util.create_big_prensor()) new_root = reroot.reroot(expr, path.Path(["doc"])) proto_index = reroot.create_proto_index_field( new_root, "proto_index").get_child("proto_index") new_field = new_root.get_child("bar") leaf_node = expression_test_util.calculate_value_slowly(new_field) proto_index_node = expression_test_util.calculate_value_slowly(proto_index) self.assertIsNotNone(new_field) self.assertTrue(new_field.is_repeated) self.assertEqual(new_field.type, tf.string) self.assertTrue(new_field.is_leaf) self.assertEqual(new_field.known_field_names(), frozenset()) self.assertEqual(leaf_node.values.dtype, tf.string) self.assertIsNotNone(proto_index) self.assertFalse(proto_index.is_repeated) self.assertEqual(proto_index.type, tf.int64) self.assertTrue(proto_index.is_leaf) self.assertEqual(proto_index.known_field_names(), frozenset()) self.assertEqual(proto_index_node.values.dtype, tf.int64) self.assertAllEqual([b"a", b"b", b"c", b"d"], leaf_node.values) self.assertAllEqual([0, 1, 1, 2], leaf_node.parent_index) self.assertAllEqual([0, 1, 1], proto_index_node.values) self.assertAllEqual([0, 1, 2], proto_index_node.parent_index) def test_reroot_and_create_proto_index_deep(self): expr = create_expression.create_expression_from_prensor( prensor_test_util.create_deep_prensor()) new_root = reroot.reroot(expr, path.Path(["event", "doc"])) proto_index = reroot.create_proto_index_field( new_root, "proto_index").get_child("proto_index") new_field = new_root.get_child("bar") leaf_node = expression_test_util.calculate_value_slowly(new_field) proto_index_node = expression_test_util.calculate_value_slowly(proto_index) self.assertIsNotNone(new_field) self.assertTrue(new_field.is_repeated) self.assertEqual(new_field.type, tf.string) self.assertTrue(new_field.is_leaf) self.assertEqual(new_field.known_field_names(), frozenset()) self.assertEqual(leaf_node.values.dtype, tf.string) self.assertIsNotNone(proto_index) self.assertFalse(proto_index.is_repeated) self.assertEqual(proto_index.type, tf.int64) self.assertTrue(proto_index.is_leaf) self.assertEqual(proto_index.known_field_names(), frozenset()) self.assertEqual(proto_index_node.values.dtype, tf.int64) self.assertAllEqual([b"a", b"b", b"c", b"d"], leaf_node.values) self.assertAllEqual([0, 1, 1, 2], leaf_node.parent_index) self.assertAllEqual([0, 1, 1], proto_index_node.values) self.assertAllEqual([0, 1, 2], proto_index_node.parent_index) def test_create_proto_index_directly_reroot_at_action(self): sessions = [ """ event { action {} action {} } event {} event { action {} } """, "", """ event {} event { action {} action {} } event { } """ ] expr = proto_test_util.text_to_expression(sessions, test_pb2.Session) reroot_expr = expr.reroot("event.action") # Reroot with a depth > 1 (all the other cases are depth == 1) proto_index_directly_reroot_at_action = ( reroot_expr.create_proto_index("proto_index_directly_reroot_at_action") .get_child_or_error("proto_index_directly_reroot_at_action")) self.assertFalse(proto_index_directly_reroot_at_action.is_repeated) result = expression_test_util.calculate_value_slowly( proto_index_directly_reroot_at_action) self.assertAllEqual(result.parent_index, [0, 1, 2, 3, 4]) self.assertAllEqual(result.values, [0, 0, 0, 2, 2]) def test_create_proto_index_directly_reroot_at_action_sparse_dense(self): sessions = [ """ event { action {} action {} } event {} event { action {} } """, "", """ event {} event { action {} action {} } event { } """ ] expr = proto_test_util.text_to_expression(sessions, test_pb2.Session) reroot_expr = expr.reroot("event.action") # Reroot with a depth > 1 (all the other cases are depth == 1) [prensor_tree] = calculate.calculate_prensors([ reroot_expr.create_proto_index("proto_index_directly_reroot_at_action") ]) proto_index_node = prensor_tree.get_child_or_error( "proto_index_directly_reroot_at_action").node self.assertFalse(proto_index_node.is_repeated) sparse_tensors = prensor_tree.get_sparse_tensors() proto_index_directly_reroot_at_action = sparse_tensors[path.Path( ["proto_index_directly_reroot_at_action"])] dense_value = tf.sparse.to_dense( proto_index_directly_reroot_at_action) sparse_value = proto_index_directly_reroot_at_action self.assertAllEqual(sparse_value.values, [0, 0, 0, 2, 2]) self.assertAllEqual(sparse_value.indices, [[0], [1], [2], [3], [4]]) self.assertAllEqual(sparse_value.dense_shape, [5]) self.assertAllEqual(dense_value, [0, 0, 0, 2, 2]) if __name__ == "__main__": absltest.main()
google/struct2tensor
struct2tensor/expression_impl/reroot_test.py
Python
apache-2.0
6,436
""" Model classes for AppDynamics REST API .. moduleauthor:: Todd Radel <[email protected]> """ from . import JsonObject, JsonList class ConfigVariable(JsonObject): """ Represents a controller configuration variable. The following attributes are defined: .. data:: name Variable name. .. data:: value Current value. .. data:: description Optional description of the variable. .. data:: updateable If :const:`True`, value can be changed. .. data:: scope Scope of the variable. The scope can be ``'cluster'`` or ``'local'``. Variables with cluster scope are replicated across HA controllers; local variables are not. """ FIELDS = { 'name': '', 'description': '', 'scope': '', 'updateable': '', 'value': '' } def __init__(self, name='', description='', scope='cluster', updateable=True, value=None): (self.name, self.description, self.scope, self.updateable, self.value) = (name, description, scope, updateable, value) class ConfigVariables(JsonList): """ Represents a collection of :class:`ConfigVariable` objects. Extends :class:`UserList`, so it supports the standard array index and :keyword:`for` semantics. """ def __init__(self, initial_list=None): super(ConfigVariables, self).__init__(ConfigVariable, initial_list) def __getitem__(self, i): """ :rtype: ConfigVariable """ return self.data[i] def by_name(self, name): """ Finds a config variable with the matching name. :param str name: Variable name to find. :return: The matching config variable. :rtype: appd.model.ConfigVariable """ found = [x for x in self.data if x.name == name] try: return found[0] except IndexError: raise KeyError(name)
tradel/AppDynamicsREST
appd/model/config_variable.py
Python
apache-2.0
2,002
""" Knowledge combiners take: - Encoded representations of background facts related to the sentence - Attention weights over the background as a single tensor. These are then combined in some way to return a single representation of the background knowledge per sample. The simplest way for this to happen is simply taking a weighted average of the knowledge representations with respect to the attention weights. Input shapes: - (samples, knowledge_len, input_dim + 1) Output shape: - (samples, input_dim) """ from collections import OrderedDict from overrides import overrides from keras.engine import InputSpec from keras import backend as K from keras.layers import Layer from keras.layers.recurrent import GRU, _time_distributed_dense class WeightedAverageKnowledgeCombiner(Layer): ''' A WeightedAverageKnowledgeCombiner takes a tensor formed by prepending an attention mask onto an encoded representation of background knowledge. Here, we simply split off the attention mask and use it to take a weighted average of the background vectors. ''' def __init__(self, **kwargs): self.input_spec = [InputSpec(ndim=3)] self.name = kwargs.pop('name') # These parameters are passed for consistency with the # AttentiveGRUKnowlegeCombiner. They are not used here. kwargs.pop('output_dim') kwargs.pop('input_length') super(WeightedAverageKnowledgeCombiner, self).__init__(**kwargs) @overrides def call(self, inputs): attention = inputs[:, :, 0] # (samples, knowledge_length) inputs = inputs[:, :, 1:] # (samples, knowledge_length, word_dim) return K.sum(K.expand_dims(attention, 2) * inputs, 1) def compute_output_shape(self, input_shape): return (input_shape[0], input_shape[2] - 1) @overrides def get_config(self): config = { "output_dim": -1, "input_length": -1 } base_config = super(WeightedAverageKnowledgeCombiner, self).get_config() config.update(base_config) return config class AttentiveGRUKnowledgeCombiner(GRU): ''' GRUs typically operate over sequences of words. Here we are are operating over the background knowledge sentence representations as though they are a sequence (i.e. each background sentence has already been encoded into a single sentence representation). The motivation behind this encoding is that a weighted average loses ordering information in the background knowledge - for instance, this is important in the BABI tasks. See Dynamic Memory Networks for more information: https://arxiv.org/pdf/1603.01417v1.pdf. This class extends the Keras Gated Recurrent Unit by implementing a method which substitutes the GRU update gate (normally a vector, z - it is noted below where it is normally computed) for a scalar attention weight (one per input, such as from the output of a softmax over the input vectors), which is pre-computed. As mentioned above, instead of using word embedding sequences as input to the GRU, we are using sentence encoding sequences. The implementation of this class is subtle - it is only very slightly different from a standard GRU. When it is initialised, the Keras backend will call the build method. It uses this to check that inputs being passed to this function are the correct size, so we allow this to be the actual input size as normal. However, for the internal implementation, everywhere where this global shape is used, we override it to be one less, as we are passing in a tensor of shape (batch, knowledge_length, 1 + encoding_dim) as we are including the attention mask. Therefore, we need all of the weights to have shape (*, encoding_dim), NOT (*, 1 + encoding_dim). All of the below methods which are overridden use some form of this dimension, so we correct them. ''' def __init__(self, output_dim, input_length, **kwargs): self.name = kwargs.pop('name') super(AttentiveGRUKnowledgeCombiner, self).__init__(output_dim, input_length=input_length, input_dim=output_dim + 1, name=self.name, **kwargs) @overrides def step(self, inputs, states): # pylint: disable=invalid-name ''' The input to step is a tensor of shape (batch, 1 + encoding_dim), i.e. a timeslice of the input to this AttentiveGRU, where the time axis is the knowledge_length. Before we start, we strip off the attention from the beginning. Then we do the equations for a normal GRU, except we don't calculate the output gate z, substituting the attention weight for it instead. Note that there is some redundancy here - for instance, in the GPU mode, we do a larger matrix multiplication than required, as we don't use one part of it. However, for readability and similarity to the original GRU code in Keras, it has not been changed. In each section, there are commented out lines which contain code. If you were to uncomment these, remove the differences in the input size and replace the attention with the z gate at the output, you would have a standard GRU back again. We literally copied the Keras GRU code here, making some small modifications. ''' attention = inputs[:, 0] inputs = inputs[:, 1:] h_tm1 = states[0] # previous memory B_U = states[1] # dropout matrices for recurrent units B_W = states[2] if self.implementation == 2: matrix_x = K.dot(inputs * B_W[0], self.kernel) if self.use_bias: matrix_x = K.bias_add(matrix_x, self.bias) matrix_inner = K.dot(h_tm1 * B_U[0], self.recurrent_kernel[:, :2 * self.units]) x_r = matrix_x[:, self.units: 2 * self.units] inner_r = matrix_inner[:, self.units: 2 * self.units] # x_z = matrix_x[:, :self.units] # inner_z = matrix_inner[:, :self.units] # z = self.recurrent_activation(x_z + inner_z) r = self.recurrent_activation(x_r + inner_r) x_h = matrix_x[:, 2 * self.units:] inner_h = K.dot(r * h_tm1 * B_U[0], self.recurrent_kernel[:, 2 * self.units:]) hh = self.activation(x_h + inner_h) else: if self.implementation == 0: # x_z = inputs[:, :self.units] x_r = inputs[:, self.units: 2 * self.units] x_h = inputs[:, 2 * self.units:] elif self.implementation == 1: # x_z = K.dot(inputs * B_W[0], self.W_z) + self.b_z x_r = K.dot(inputs * B_W[1], self.kernel_r) x_h = K.dot(inputs * B_W[2], self.kernel_h) if self.use_bias: x_r = K.bias_add(x_r, self.bias_r) x_h = K.bias_add(x_h, self.bias_h) else: raise Exception('Unknown implementation') # z = self.recurrent_activation(x_z + K.dot(h_tm1 * B_U[0], self.U_z)) r = self.recurrent_activation(x_r + K.dot(h_tm1 * B_U[1], self.recurrent_kernel_r)) hh = self.activation(x_h + K.dot(r * h_tm1 * B_U[2], self.recurrent_kernel_h)) # Here is the KEY difference between a GRU and an AttentiveGRU. Instead of using # a learnt output gate (z), we use a scalar attention vector (batch, 1) for this # particular background knowledge vector. h = K.expand_dims(attention, 1) * hh + (1 - K.expand_dims(attention, 1)) * h_tm1 return h, [h] @overrides def build(self, input_shape): """ This is used by Keras to verify things, but also to build the weights. The only differences from the Keras GRU (which we copied exactly other than the below) are: - We generate weights with dimension input_dim[2] - 1, rather than dimension input_dim[2]. - There are a few variables which are created in non-'gpu' modes which are not required, and actually raise errors in Theano if you include them in the trainable weights(as Theano will alert you if you try to compute a gradient of a loss wrt a constant). These are commented out but left in for clarity below. """ new_input_shape = list(input_shape) new_input_shape[2] -= 1 super(AttentiveGRUKnowledgeCombiner, self).build(tuple(new_input_shape)) self.input_spec = [InputSpec(shape=input_shape)] @overrides def preprocess_input(self, inputs, training=None): ''' We have to override this preprocessing step, because if we are using the cpu, we do the weight - input multiplications in the internals of the GRU as seperate, smaller matrix multiplications and concatenate them after. Therefore, before this happens, we split off the attention and then add it back afterwards. ''' if self.implementation == 0: attention = inputs[:, :, 0] # Shape:(samples, knowledge_length) inputs = inputs[:, :, 1:] # Shape:(samples, knowledge_length, word_dim) input_shape = self.input_spec[0].shape input_dim = input_shape[2] - 1 timesteps = input_shape[1] x_z = _time_distributed_dense(inputs, self.kernel_z, self.bias_z, self.dropout, input_dim, self.units, timesteps, training=training) x_r = _time_distributed_dense(inputs, self.kernel_r, self.bias_r, self.dropout, input_dim, self.units, timesteps, training=training) x_h = _time_distributed_dense(inputs, self.kernel_h, self.bias_h, self.dropout, input_dim, self.units, timesteps, training=training) # Add attention back on to it's original place. return K.concatenate([K.expand_dims(attention, 2), x_z, x_r, x_h], axis=2) else: return inputs # The first item added here will be used as the default in some cases. knowledge_combiners = OrderedDict() # pylint: disable=invalid-name knowledge_combiners["weighted_average"] = WeightedAverageKnowledgeCombiner knowledge_combiners["attentive_gru"] = AttentiveGRUKnowledgeCombiner
RTHMaK/RPGOne
deep_qa-master/deep_qa/layers/knowledge_combiners.py
Python
apache-2.0
10,674
# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tree_utils.""" import pytest from tensorflow_datasets.core.utils import tree_utils def test_tree_parallel_map(): assert tree_utils.parallel_map(lambda x: x * 10, { 'a': [1, 2, 3], 'b': [4, 5] }) == { 'a': [10, 20, 30], 'b': [40, 50] } def test_tree_parallel_map_reraise(): def fn(x): raise ValueError('Bad value') with pytest.raises(ValueError, match='Bad value'): tree_utils.parallel_map(fn, [1])
tensorflow/datasets
tensorflow_datasets/core/utils/tree_utils_test.py
Python
apache-2.0
1,072
# Copyright 2012 Kevin Minnick # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Record interface """ from dnsclient import base class Record(base.Resource): """ A record. """ HUMAN_ID = False NAME_ATTR = 'name' def __repr__(self): return "<Record: %s" % self.label def delete(self): self.manager.delete(self) class RecordManager(base.ManagerWithFind): """ Manage :class:`Record` resources. """ resource_class = Record def list(self, domainId): """ Get a list of all records for the domain. :rtype: list of :class:`Record`. """ return self._list("/domains/%s/records" % base.getid(domainId), "records") def create(self, args, domainId): """ Create a record in the dns system. The following parameters are required type, name, data. :param type: str :param name: str :param ttl: int :param data: str :param priority: int :param comment: str :rtype: list of :class:`Record` """ body = { "records" : [ { "name" : args.name, "comment" : args.comment, "ttl" : int(args.ttl), "type" : args.type, "data" : args.data, "priority" : args.priority } ] } url = '/domains/%s/records' % base.getid(domainId) if args.type == "PTR": url = '/rdns' body = { "recordsList" : { "records" : [ { "name" : args.name, "comment" : args.comment, "ttl" : int(args.ttl), "type" : args.type, "data" : args.data } ] }, "link" : { "content" : "", "href" : args.server_href, "rel" : "cloudServersOpenStack" } } return self._create_async(url, body, return_raw=False, response_key="") def modify(self, args, domainId): """ Modify a record in the dns system. The following parameters are required recordId and name. :param record_id: str :param domain: str :param name: str :param ttl: int :param data: str :param priority: int :param comment: str :rtype: list of :class:`Record` """ body = { "name" : args.name, "comment" : args.comment, "ttl" : int(args.ttl), "data" : args.data, "priority" : args.priority } url = '/domains/%s/records/%s' % (base.getid(domainId), base.getid(args.record_id)) if hasattr(args, 'type'): if args.type == "PTR": url = '/rdns' body = { "recordsList" : { "records" : [ { "name" : args.name, "id" : args.record_id, "comment" : args.comment, "ttl" : int(args.ttl), "type" : args.type, "data" : args.data } ] }, "link" : { "content" : "", "href" : args.server_href, "rel" : "cloudServersOpenStack" } } return self._update(url, body, return_raw=False, response_key="") def delete(self, domainId, recordId): """ Delete a specific record. :param domainId: The ID of the :class:`Domain` to delete. :param recordId: The ID of the :class:`Record` to delete. """ self._delete("/domains/%s/records/%s" % (base.getid(domainId), base.getid(recordId))) def rdns_list(self, href): """ List all PTR records configured for the specified Cloud device. :param href: The href of the device to get . :rtype: :class:`Record` """ return self._list("/rdns/cloudServersOpenStack?href=%s" % href, "records") def rdns_delete(self, href, ip): """ Remove one or all PTR records associated with a Rackspace Cloud device. Use the optional ip query parameter to specify a specific record to delete. Omitting this parameter removes all PTR records associated with the specified device. :param href: The ID of the device to delete. :param ip: The ip of the specific record to delete. """ self._delete("/rdns/cloudServersOpenStack?href=%s&ip=%s" % (href, ip))
kwminnick/rackspace-dns-cli
dnsclient/v1_0/records.py
Python
apache-2.0
5,748
from django.core.mail import send_mail from django.core.handlers.base import BaseHandler from django.test.client import RequestFactory, FakePayload import collections ##################################################### ### Utility functions def flat_attr(attrs, level=0): '''Flattens the attribute map to a string ready to be put into a start tag. The map can have embedded maps and/or lists, such as a style attribute with multiple items.''' if attrs == None: return '' elif isinstance(attrs, str): return attrs elif isinstance(attrs, collections.Mapping) and level == 0: # dict return ' '.join( '%s="%s"' % (k, flat_attr(v, level+1)) for k, v in attrs.items() if v ) elif isinstance(attrs, collections.Mapping) and level > 0: # dict return ' '.join( '%s: %s;' % (k, flat_attr(v, level+1)) for k, v in attrs.items() if v ) elif isinstance(attrs, collections.Iterable): # list return ' '.join( flat_attr(v, level+1) for v in attrs if v ) else: return str(attrs) ######################################################## ### Helper methods for running celery tasks JSON_SERIALIZABLE = ( dict, list, tuple, str, bytes, int, float, bool, type(None) ) BODY_KEY = 'island_body_cached' def get_fake_request(meta): '''Retrieves a fake request using the given request.META. This allows celery tasks to have a "request" to use in code.''' # if the body was cached in the meta, put it back as the wsgi.input if BODY_KEY in meta: meta['wsgi.input'] = FakePayload(meta[BODY_KEY]) # create a basic request using the Django testing framework request = RequestFactory().request(**meta) # run middleware on it handler = BaseHandler() handler.load_middleware() for middleware_method in handler._request_middleware: response = middleware_method(request) if response: raise Exception("Middleware cannot return a response with a FakeRequest.") # return the request return request def prepare_fake_meta(request, include_body=False): '''Removes any values in the dictionary that can't be serialized. This is done in preparation for sending the request.META to a celery task.''' if request == None: return {} meta = dict([ (k,v) for k,v in request.META.items() if isinstance(v, JSON_SERIALIZABLE) ]) # save the body so we can make it the input when getting the fake request if include_body and request.body: meta[BODY_KEY] = request.body return meta
lanesawyer/island
lib/__init__.py
Python
apache-2.0
2,512
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for building a disk image, launching it via the underlying virtualization driver, responding to calls to check its state, attaching persistent storage, and terminating it. """ import base64 import contextlib import functools import socket import sys import time import traceback import uuid from cinderclient import exceptions as cinder_exception import eventlet.event from eventlet import greenthread import eventlet.semaphore import eventlet.timeout from keystoneclient import exceptions as keystone_exception from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils import six from nova import block_device from nova.cells import rpcapi as cells_rpcapi from nova.cloudpipe import pipelib from nova import compute from nova.compute import build_results from nova.compute import power_state from nova.compute import resource_tracker from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import conductor from nova import consoleauth import nova.context from nova import exception from nova import hooks from nova.i18n import _ from nova.i18n import _LE from nova.i18n import _LI from nova.i18n import _LW from nova import image from nova.image import glance from nova import manager from nova import network from nova.network import model as network_model from nova.network.security_group import openstack_driver from nova import objects from nova.objects import base as obj_base from nova.openstack.common import log as logging from nova.openstack.common import periodic_task from nova import paths from nova import rpc from nova import safe_utils from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import driver from nova.virt import event as virtevent from nova.virt import storage_users from nova.virt import virtapi from nova import volume from nova.volume import encryptors compute_opts = [ cfg.StrOpt('console_host', default=socket.gethostname(), help='Console proxy host to use to connect ' 'to instances on this host.'), cfg.StrOpt('default_access_ip_network_name', help='Name of network to use to set access IPs for instances'), cfg.BoolOpt('defer_iptables_apply', default=False, help='Whether to batch up the application of IPTables rules' ' during a host restart and apply all at the end of the' ' init phase'), cfg.StrOpt('instances_path', default=paths.state_path_def('instances'), help='Where instances are stored on disk'), cfg.BoolOpt('instance_usage_audit', default=False, help="Generate periodic compute.instance.exists" " notifications"), cfg.IntOpt('live_migration_retry_count', default=30, help="Number of 1 second retries needed in live_migration"), cfg.BoolOpt('resume_guests_state_on_host_boot', default=False, help='Whether to start guests that were running before the ' 'host rebooted'), cfg.IntOpt('network_allocate_retries', default=0, help="Number of times to retry network allocation on failures"), cfg.IntOpt('max_concurrent_builds', default=10, help='Maximum number of instance builds to run concurrently'), cfg.IntOpt('block_device_allocate_retries', default=60, help='Number of times to retry block device' ' allocation on failures') ] interval_opts = [ cfg.IntOpt('bandwidth_poll_interval', default=600, help='Interval to pull network bandwidth usage info. Not ' 'supported on all hypervisors. Set to -1 to disable. ' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt('sync_power_state_interval', default=600, help='Interval to sync power states between the database and ' 'the hypervisor. Set to -1 to disable. ' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt("heal_instance_info_cache_interval", default=60, help="Number of seconds between instance info_cache self " "healing updates"), cfg.IntOpt('reclaim_instance_interval', default=0, help='Interval in seconds for reclaiming deleted instances'), cfg.IntOpt('volume_usage_poll_interval', default=0, help='Interval in seconds for gathering volume usages'), cfg.IntOpt('shelved_poll_interval', default=3600, help='Interval in seconds for polling shelved instances to ' 'offload. Set to -1 to disable.' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt('shelved_offload_time', default=0, help='Time in seconds before a shelved instance is eligible ' 'for removing from a host. -1 never offload, 0 offload ' 'when shelved'), cfg.IntOpt('instance_delete_interval', default=300, help='Interval in seconds for retrying failed instance file ' 'deletes. Set to -1 to disable. ' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt('block_device_allocate_retries_interval', default=3, help='Waiting time interval (seconds) between block' ' device allocation retries on failures') ] timeout_opts = [ cfg.IntOpt("reboot_timeout", default=0, help="Automatically hard reboot an instance if it has been " "stuck in a rebooting state longer than N seconds. " "Set to 0 to disable."), cfg.IntOpt("instance_build_timeout", default=0, help="Amount of time in seconds an instance can be in BUILD " "before going into ERROR status. " "Set to 0 to disable."), cfg.IntOpt("rescue_timeout", default=0, help="Automatically unrescue an instance after N seconds. " "Set to 0 to disable."), cfg.IntOpt("resize_confirm_window", default=0, help="Automatically confirm resizes after N seconds. " "Set to 0 to disable."), cfg.IntOpt("shutdown_timeout", default=60, help="Total amount of time to wait in seconds for an instance " "to perform a clean shutdown."), ] running_deleted_opts = [ cfg.StrOpt("running_deleted_instance_action", default="reap", help="Action to take if a running deleted instance is detected." " Valid options are 'noop', 'log', 'shutdown', or 'reap'. " "Set to 'noop' to take no action."), cfg.IntOpt("running_deleted_instance_poll_interval", default=1800, help="Number of seconds to wait between runs of the cleanup " "task."), cfg.IntOpt("running_deleted_instance_timeout", default=0, help="Number of seconds after being deleted when a running " "instance should be considered eligible for cleanup."), ] instance_cleaning_opts = [ cfg.IntOpt('maximum_instance_delete_attempts', default=5, help='The number of times to attempt to reap an instance\'s ' 'files.'), ] CONF = cfg.CONF CONF.register_opts(compute_opts) CONF.register_opts(interval_opts) CONF.register_opts(timeout_opts) CONF.register_opts(running_deleted_opts) CONF.register_opts(instance_cleaning_opts) CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api') CONF.import_opt('console_topic', 'nova.console.rpcapi') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('vnc_enabled', 'nova.vnc') CONF.import_opt('enabled', 'nova.spice', group='spice') CONF.import_opt('enable', 'nova.cells.opts', group='cells') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache') CONF.import_opt('enabled', 'nova.rdp', group='rdp') CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp') CONF.import_opt('enabled', 'nova.console.serial', group='serial_console') CONF.import_opt('base_url', 'nova.console.serial', group='serial_console') LOG = logging.getLogger(__name__) get_notifier = functools.partial(rpc.get_notifier, service='compute') wrap_exception = functools.partial(exception.wrap_exception, get_notifier=get_notifier) @utils.expects_func_args('migration') def errors_out_migration(function): """Decorator to error out migration on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except Exception: with excutils.save_and_reraise_exception(): migration = kwargs['migration'] status = migration.status if status not in ['migrating', 'post-migrating']: return migration.status = 'error' try: with migration.obj_as_admin(): migration.save() except Exception: LOG.debug('Error setting migration status ' 'for instance %s.', migration.instance_uuid, exc_info=True) return decorated_function @utils.expects_func_args('instance') def reverts_task_state(function): """Decorator to revert task_state on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.UnexpectedTaskStateError as e: # Note(maoy): unexpected task state means the current # task is preempted. Do not clear task state in this # case. with excutils.save_and_reraise_exception(): LOG.info(_LI("Task possibly preempted: %s"), e.format_message()) except Exception: with excutils.save_and_reraise_exception(): try: self._instance_update(context, kwargs['instance']['uuid'], task_state=None) except Exception: pass return decorated_function @utils.expects_func_args('instance') def wrap_instance_fault(function): """Wraps a method to catch exceptions related to instances. This decorator wraps a method to catch any exceptions having to do with an instance that may get thrown. It then logs an instance fault in the db. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.InstanceNotFound: raise except Exception as e: # NOTE(gtt): If argument 'instance' is in args rather than kwargs, # we will get a KeyError exception which will cover up the real # exception. So, we update kwargs with the values from args first. # then, we can get 'instance' from kwargs easily. kwargs.update(dict(zip(function.func_code.co_varnames[2:], args))) with excutils.save_and_reraise_exception(): compute_utils.add_instance_fault_from_exc(context, kwargs['instance'], e, sys.exc_info()) return decorated_function @utils.expects_func_args('instance') def wrap_instance_event(function): """Wraps a method to log the event taken on the instance, and result. This decorator wraps a method to log the start and result of an event, as part of an action taken on an instance. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): wrapped_func = utils.get_wrapped_function(function) keyed_args = safe_utils.getcallargs(wrapped_func, context, *args, **kwargs) instance_uuid = keyed_args['instance']['uuid'] event_name = 'compute_{0}'.format(function.func_name) with compute_utils.EventReporter(context, event_name, instance_uuid): return function(self, context, *args, **kwargs) return decorated_function @utils.expects_func_args('image_id', 'instance') def delete_image_on_error(function): """Used for snapshot related method to ensure the image created in compute.api is deleted when an error occurs. """ @functools.wraps(function) def decorated_function(self, context, image_id, instance, *args, **kwargs): try: return function(self, context, image_id, instance, *args, **kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.debug("Cleaning up image %s", image_id, exc_info=True, instance=instance) try: self.image_api.delete(context, image_id) except Exception: LOG.exception(_LE("Error while trying to clean up " "image %s"), image_id, instance=instance) return decorated_function # TODO(danms): Remove me after Icehouse # NOTE(mikal): if the method being decorated has more than one decorator, then # put this one first. Otherwise the various exception handling decorators do # not function correctly. def object_compat(function): """Wraps a method that expects a new-world instance This provides compatibility for callers passing old-style dict instances. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): def _load_instance(instance_or_dict): if isinstance(instance_or_dict, dict): instance = objects.Instance._from_db_object( context, objects.Instance(), instance_or_dict, expected_attrs=metas) instance._context = context return instance return instance_or_dict metas = ['metadata', 'system_metadata'] try: kwargs['instance'] = _load_instance(kwargs['instance']) except KeyError: args = (_load_instance(args[0]),) + args[1:] migration = kwargs.get('migration') if isinstance(migration, dict): migration = objects.Migration._from_db_object( context.elevated(), objects.Migration(), migration) kwargs['migration'] = migration return function(self, context, *args, **kwargs) return decorated_function # TODO(danms): Remove me after Icehouse def aggregate_object_compat(function): """Wraps a method that expects a new-world aggregate.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): aggregate = kwargs.get('aggregate') if isinstance(aggregate, dict): aggregate = objects.Aggregate._from_db_object( context.elevated(), objects.Aggregate(), aggregate) kwargs['aggregate'] = aggregate return function(self, context, *args, **kwargs) return decorated_function class InstanceEvents(object): def __init__(self): self._events = {} @staticmethod def _lock_name(instance): return '%s-%s' % (instance.uuid, 'events') def prepare_for_instance_event(self, instance, event_name): """Prepare to receive an event for an instance. This will register an event for the given instance that we will wait on later. This should be called before initiating whatever action will trigger the event. The resulting eventlet.event.Event object should be wait()'d on to ensure completion. :param instance: the instance for which the event will be generated :param event_name: the name of the event we're expecting :returns: an event object that should be wait()'d on """ @utils.synchronized(self._lock_name(instance)) def _create_or_get_event(): if instance.uuid not in self._events: self._events.setdefault(instance.uuid, {}) return self._events[instance.uuid].setdefault( event_name, eventlet.event.Event()) LOG.debug('Preparing to wait for external event %(event)s', {'event': event_name}, instance=instance) return _create_or_get_event() def pop_instance_event(self, instance, event): """Remove a pending event from the wait list. This will remove a pending event from the wait list so that it can be used to signal the waiters to wake up. :param instance: the instance for which the event was generated :param event: the nova.objects.external_event.InstanceExternalEvent that describes the event :returns: the eventlet.event.Event object on which the waiters are blocked """ no_events_sentinel = object() no_matching_event_sentinel = object() @utils.synchronized(self._lock_name(instance)) def _pop_event(): events = self._events.get(instance.uuid) if not events: return no_events_sentinel _event = events.pop(event.key, None) if not events: del self._events[instance.uuid] if _event is None: return no_matching_event_sentinel return _event result = _pop_event() if result == no_events_sentinel: LOG.debug('No waiting events found dispatching %(event)s', {'event': event.key}, instance=instance) return None elif result == no_matching_event_sentinel: LOG.debug('No event matching %(event)s in %(events)s', {'event': event.key, 'events': self._events.get(instance.uuid, {}).keys()}, instance=instance) return None else: return result def clear_events_for_instance(self, instance): """Remove all pending events for an instance. This will remove all events currently pending for an instance and return them (indexed by event name). :param instance: the instance for which events should be purged :returns: a dictionary of {event_name: eventlet.event.Event} """ @utils.synchronized(self._lock_name(instance)) def _clear_events(): # NOTE(danms): Use getitem syntax for the instance until # all the callers are using objects return self._events.pop(instance['uuid'], {}) return _clear_events() class ComputeVirtAPI(virtapi.VirtAPI): def __init__(self, compute): super(ComputeVirtAPI, self).__init__() self._compute = compute def provider_fw_rule_get_all(self, context): return self._compute.conductor_api.provider_fw_rule_get_all(context) def _default_error_callback(self, event_name, instance): raise exception.NovaException(_('Instance event failed')) @contextlib.contextmanager def wait_for_instance_event(self, instance, event_names, deadline=300, error_callback=None): """Plan to wait for some events, run some code, then wait. This context manager will first create plans to wait for the provided event_names, yield, and then wait for all the scheduled events to complete. Note that this uses an eventlet.timeout.Timeout to bound the operation, so callers should be prepared to catch that failure and handle that situation appropriately. If the event is not received by the specified timeout deadline, eventlet.timeout.Timeout is raised. If the event is received but did not have a 'completed' status, a NovaException is raised. If an error_callback is provided, instead of raising an exception as detailed above for the failure case, the callback will be called with the event_name and instance, and can return True to continue waiting for the rest of the events, False to stop processing, or raise an exception which will bubble up to the waiter. :param instance: The instance for which an event is expected :param event_names: A list of event names. Each element can be a string event name or tuple of strings to indicate (name, tag). :param deadline: Maximum number of seconds we should wait for all of the specified events to arrive. :param error_callback: A function to be called if an event arrives """ if error_callback is None: error_callback = self._default_error_callback events = {} for event_name in event_names: if isinstance(event_name, tuple): name, tag = event_name event_name = objects.InstanceExternalEvent.make_key( name, tag) events[event_name] = ( self._compute.instance_events.prepare_for_instance_event( instance, event_name)) yield with eventlet.timeout.Timeout(deadline): for event_name, event in events.items(): actual_event = event.wait() if actual_event.status == 'completed': continue decision = error_callback(event_name, instance) if decision is False: break class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" target = messaging.Target(version='3.38') # How long to wait in seconds before re-issuing a shutdown # signal to a instance during power off. The overall # time to wait is set by CONF.shutdown_timeout. SHUTDOWN_RETRY_INTERVAL = 10 def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" self.virtapi = ComputeVirtAPI(self) self.network_api = network.API() self.volume_api = volume.API() self.image_api = image.API() self._last_host_check = 0 self._last_bw_usage_poll = 0 self._bw_usage_supported = True self._last_bw_usage_cell_update = 0 self.compute_api = compute.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.conductor_api = conductor.API() self.compute_task_api = conductor.ComputeTaskAPI() self.is_neutron_security_groups = ( openstack_driver.is_neutron_security_groups()) self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self._resource_tracker_dict = {} self.instance_events = InstanceEvents() self._sync_power_pool = eventlet.GreenPool() self._syncs_in_progress = {} if CONF.max_concurrent_builds != 0: self._build_semaphore = eventlet.semaphore.Semaphore( CONF.max_concurrent_builds) else: self._build_semaphore = compute_utils.UnlimitedSemaphore() super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) # NOTE(russellb) Load the driver last. It may call back into the # compute manager via the virtapi, so we want it to be fully # initialized before that happens. self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.use_legacy_block_device_info = \ self.driver.need_legacy_block_device_info def _get_resource_tracker(self, nodename): rt = self._resource_tracker_dict.get(nodename) if not rt: if not self.driver.node_is_available(nodename): raise exception.NovaException( _("%s is not a valid node managed by this " "compute host.") % nodename) rt = resource_tracker.ResourceTracker(self.host, self.driver, nodename) self._resource_tracker_dict[nodename] = rt return rt def _update_resource_tracker(self, context, instance): """Let the resource tracker know that an instance has changed state.""" if (instance['host'] == self.host and self.driver.node_is_available(instance['node'])): rt = self._get_resource_tracker(instance.get('node')) rt.update_usage(context, instance) def _instance_update(self, context, instance_uuid, **kwargs): """Update an instance in the database using kwargs as value.""" instance_ref = self.conductor_api.instance_update(context, instance_uuid, **kwargs) self._update_resource_tracker(context, instance_ref) return instance_ref def _set_instance_error_state(self, context, instance): instance_uuid = instance['uuid'] try: self._instance_update(context, instance_uuid, vm_state=vm_states.ERROR) except exception.InstanceNotFound: LOG.debug('Instance has been destroyed from under us while ' 'trying to set it to ERROR', instance_uuid=instance_uuid) def _set_instance_obj_error_state(self, context, instance): try: instance.vm_state = vm_states.ERROR instance.save() except exception.InstanceNotFound: LOG.debug('Instance has been destroyed from under us while ' 'trying to set it to ERROR', instance=instance) def _get_instances_on_driver(self, context, filters=None): """Return a list of instance records for the instances found on the hypervisor which satisfy the specified filters. If filters=None return a list of instance records for all the instances found on the hypervisor. """ if not filters: filters = {} try: driver_uuids = self.driver.list_instance_uuids() if len(driver_uuids) == 0: # Short circuit, don't waste a DB call return objects.InstanceList() filters['uuid'] = driver_uuids local_instances = objects.InstanceList.get_by_filters( context, filters, use_slave=True) return local_instances except NotImplementedError: pass # The driver doesn't support uuids listing, so we'll have # to brute force. driver_instances = self.driver.list_instances() instances = objects.InstanceList.get_by_filters(context, filters, use_slave=True) name_map = {instance.name: instance for instance in instances} local_instances = [] for driver_instance in driver_instances: instance = name_map.get(driver_instance) if not instance: continue local_instances.append(instance) return local_instances def _destroy_evacuated_instances(self, context): """Destroys evacuated instances. While nova-compute was down, the instances running on it could be evacuated to another host. Check that the instances reported by the driver are still associated with this host. If they are not, destroy them, with the exception of instances which are in the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH task state or RESIZED vm state. """ our_host = self.host filters = {'deleted': False} local_instances = self._get_instances_on_driver(context, filters) for instance in local_instances: if instance.host != our_host: if (instance.task_state in [task_states.MIGRATING, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH] or instance.vm_state in [vm_states.RESIZED]): LOG.debug('Will not delete instance as its host (' '%(instance_host)s) is not equal to our ' 'host (%(our_host)s) but its task state is ' '(%(task_state)s) and vm state is ' '(%(vm_state)s)', {'instance_host': instance.host, 'our_host': our_host, 'task_state': instance.task_state, 'vm_state': instance.vm_state}, instance=instance) continue LOG.info(_LI('Deleting instance as its host (' '%(instance_host)s) is not equal to our ' 'host (%(our_host)s).'), {'instance_host': instance.host, 'our_host': our_host}, instance=instance) try: network_info = self._get_instance_nw_info(context, instance) bdi = self._get_instance_block_device_info(context, instance) destroy_disks = not (self._is_instance_storage_shared( context, instance)) except exception.InstanceNotFound: network_info = network_model.NetworkInfo() bdi = {} LOG.info(_LI('Instance has been marked deleted already, ' 'removing it from the hypervisor.'), instance=instance) # always destroy disks if the instance was deleted destroy_disks = True self.driver.destroy(context, instance, network_info, bdi, destroy_disks) def _is_instance_storage_shared(self, context, instance): shared_storage = True data = None try: data = self.driver.check_instance_shared_storage_local(context, instance) if data: shared_storage = (self.compute_rpcapi. check_instance_shared_storage(context, instance, data)) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'instance shared storage check, ' 'assuming it\'s not on shared storage'), instance=instance) shared_storage = False except Exception: LOG.exception(_LE('Failed to check if instance shared'), instance=instance) finally: if data: self.driver.check_instance_shared_storage_cleanup(context, data) return shared_storage def _complete_partial_deletion(self, context, instance): """Complete deletion for instances in DELETED status but not marked as deleted in the DB """ instance.destroy() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) quotas = objects.Quotas(context) project_id, user_id = objects.quotas.ids_from_instance(context, instance) quotas.reserve(context, project_id=project_id, user_id=user_id, instances=-1, cores=-instance.vcpus, ram=-instance.memory_mb) self._complete_deletion(context, instance, bdms, quotas, instance.system_metadata) def _complete_deletion(self, context, instance, bdms, quotas, system_meta): if quotas: quotas.commit() # ensure block device mappings are not leaked for bdm in bdms: bdm.destroy() self._notify_about_instance_usage(context, instance, "delete.end", system_metadata=system_meta) if CONF.vnc_enabled or CONF.spice.enabled: if CONF.cells.enable: self.cells_rpcapi.consoleauth_delete_tokens(context, instance.uuid) else: self.consoleauth_rpcapi.delete_tokens_for_instance(context, instance.uuid) def _init_instance(self, context, instance): '''Initialize this instance during service init.''' # Instances that are shut down, or in an error state can not be # initialized and are not attempted to be recovered. The exception # to this are instances that are in RESIZE_MIGRATING or DELETING, # which are dealt with further down. if (instance.vm_state == vm_states.SOFT_DELETED or (instance.vm_state == vm_states.ERROR and instance.task_state not in (task_states.RESIZE_MIGRATING, task_states.DELETING))): LOG.debug("Instance is in %s state.", instance.vm_state, instance=instance) return if instance.vm_state == vm_states.DELETED: try: self._complete_partial_deletion(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) return if (instance.vm_state == vm_states.BUILDING or instance.task_state in [task_states.SCHEDULING, task_states.BLOCK_DEVICE_MAPPING, task_states.NETWORKING, task_states.SPAWNING]): # NOTE(dave-mcnally) compute stopped before instance was fully # spawned so set to ERROR state. This is safe to do as the state # may be set by the api but the host is not so if we get here the # instance has already been scheduled to this particular host. LOG.debug("Instance failed to spawn correctly, " "setting to ERROR state", instance=instance) instance.task_state = None instance.vm_state = vm_states.ERROR instance.save() return if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and instance.task_state in [task_states.REBUILDING, task_states.REBUILD_BLOCK_DEVICE_MAPPING, task_states.REBUILD_SPAWNING]): # NOTE(jichenjc) compute stopped before instance was fully # spawned so set to ERROR state. This is consistent to BUILD LOG.debug("Instance failed to rebuild correctly, " "setting to ERROR state", instance=instance) instance.task_state = None instance.vm_state = vm_states.ERROR instance.save() return if (instance.vm_state != vm_states.ERROR and instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING, task_states.IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING, task_states.IMAGE_SNAPSHOT]): LOG.debug("Instance in transitional state %s at start-up " "clearing task state", instance.task_state, instance=instance) try: self._post_interrupted_snapshot_cleanup(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to cleanup snapshot.') LOG.exception(msg, instance=instance) instance.task_state = None instance.save() if instance.task_state == task_states.DELETING: try: LOG.info(_LI('Service started deleting the instance during ' 'the previous run, but did not finish. Restarting' ' the deletion now.'), instance=instance) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # FIXME(comstud): This needs fixed. We should be creating # reservations and updating quotas, because quotas # wouldn't have been updated for this instance since it is # still in DELETING. See bug 1296414. # # Create a dummy quota object for now. quotas = objects.Quotas.from_reservations( context, None, instance=instance) self._delete_instance(context, instance, bdms, quotas) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) self._set_instance_error_state(context, instance) return try_reboot, reboot_type = self._retry_reboot(context, instance) current_power_state = self._get_power_state(context, instance) if try_reboot: LOG.debug("Instance in transitional state (%(task_state)s) at " "start-up and power state is (%(power_state)s), " "triggering reboot", {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) self.compute_rpcapi.reboot_instance(context, instance, block_device_info=None, reboot_type=reboot_type) return elif (current_power_state == power_state.RUNNING and instance.task_state in [task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD, task_states.PAUSING, task_states.UNPAUSING]): LOG.warning(_LW("Instance in transitional state " "(%(task_state)s) at start-up and power state " "is (%(power_state)s), clearing task state"), {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) instance.task_state = None instance.vm_state = vm_states.ACTIVE instance.save() elif (current_power_state == power_state.PAUSED and instance.task_state == task_states.UNPAUSING): LOG.warning(_LW("Instance in transitional state " "(%(task_state)s) at start-up and power state " "is (%(power_state)s), clearing task state " "and unpausing the instance"), {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) try: self.unpause_instance(context, instance) except NotImplementedError: # Some virt driver didn't support pause and unpause pass except Exception: LOG.exception(_LE('Failed to unpause instance'), instance=instance) return if instance.task_state == task_states.POWERING_OFF: try: LOG.debug("Instance in transitional state %s at start-up " "retrying stop request", instance.task_state, instance=instance) self.stop_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to stop instance') LOG.exception(msg, instance=instance) return if instance.task_state == task_states.POWERING_ON: try: LOG.debug("Instance in transitional state %s at start-up " "retrying start request", instance.task_state, instance=instance) self.start_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to start instance') LOG.exception(msg, instance=instance) return net_info = compute_utils.get_nw_info_for_instance(instance) try: self.driver.plug_vifs(instance, net_info) except NotImplementedError as e: LOG.debug(e, instance=instance) except exception.VirtualInterfacePlugException: # we don't want an exception to block the init_host LOG.exception(_LE("Vifs plug failed"), instance=instance) self._set_instance_error_state(context, instance) return if instance.task_state == task_states.RESIZE_MIGRATING: # We crashed during resize/migration, so roll back for safety try: # NOTE(mriedem): check old_vm_state for STOPPED here, if it's # not in system_metadata we default to True for backwards # compatibility power_on = (instance.system_metadata.get('old_vm_state') != vm_states.STOPPED) block_dev_info = self._get_instance_block_device_info(context, instance) self.driver.finish_revert_migration(context, instance, net_info, block_dev_info, power_on) except Exception as e: LOG.exception(_LE('Failed to revert crashed migration'), instance=instance) finally: LOG.info(_LI('Instance found in migrating state during ' 'startup. Resetting task_state'), instance=instance) instance.task_state = None instance.save() if instance.task_state == task_states.MIGRATING: # Live migration did not complete, but instance is on this # host, so reset the state. instance.task_state = None instance.save(expected_task_state=[task_states.MIGRATING]) db_state = instance.power_state drv_state = self._get_power_state(context, instance) expect_running = (db_state == power_state.RUNNING and drv_state != db_state) LOG.debug('Current state is %(drv_state)s, state in DB is ' '%(db_state)s.', {'drv_state': drv_state, 'db_state': db_state}, instance=instance) if expect_running and CONF.resume_guests_state_on_host_boot: LOG.info(_LI('Rebooting instance after nova-compute restart.'), instance=instance) block_device_info = \ self._get_instance_block_device_info(context, instance) try: self.driver.resume_state_on_host_boot( context, instance, net_info, block_device_info) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'resume guests'), instance=instance) except Exception: # NOTE(vish): The instance failed to resume, so we set the # instance to error and attempt to continue. LOG.warning(_LW('Failed to resume instance'), instance=instance) self._set_instance_error_state(context, instance) elif drv_state == power_state.RUNNING: # VMwareAPI drivers will raise an exception try: self.driver.ensure_filtering_rules_for_instance( instance, net_info) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'firewall rules'), instance=instance) def _retry_reboot(self, context, instance): current_power_state = self._get_power_state(context, instance) current_task_state = instance.task_state retry_reboot = False reboot_type = compute_utils.get_reboot_type(current_task_state, current_power_state) pending_soft = (current_task_state == task_states.REBOOT_PENDING and instance.vm_state in vm_states.ALLOW_SOFT_REBOOT) pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD and instance.vm_state in vm_states.ALLOW_HARD_REBOOT) started_not_running = (current_task_state in [task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD] and current_power_state != power_state.RUNNING) if pending_soft or pending_hard or started_not_running: retry_reboot = True return retry_reboot, reboot_type def handle_lifecycle_event(self, event): LOG.info(_LI("VM %(state)s (Lifecycle Event)"), {'state': event.get_name()}, instance_uuid=event.get_instance_uuid()) context = nova.context.get_admin_context(read_deleted='yes') instance = objects.Instance.get_by_uuid(context, event.get_instance_uuid(), expected_attrs=[]) vm_power_state = None if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED: vm_power_state = power_state.SHUTDOWN elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED: vm_power_state = power_state.RUNNING elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED: vm_power_state = power_state.PAUSED elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED: vm_power_state = power_state.RUNNING else: LOG.warning(_LW("Unexpected power state %d"), event.get_transition()) if vm_power_state is not None: LOG.debug('Synchronizing instance power state after lifecycle ' 'event "%(event)s"; current vm_state: %(vm_state)s, ' 'current task_state: %(task_state)s, current DB ' 'power_state: %(db_power_state)s, VM power_state: ' '%(vm_power_state)s', dict(event=event.get_name(), vm_state=instance.vm_state, task_state=instance.task_state, db_power_state=instance.power_state, vm_power_state=vm_power_state), instance_uuid=instance.uuid) self._sync_instance_power_state(context, instance, vm_power_state) def handle_events(self, event): if isinstance(event, virtevent.LifecycleEvent): try: self.handle_lifecycle_event(event) except exception.InstanceNotFound: LOG.debug("Event %s arrived for non-existent instance. The " "instance was probably deleted.", event) else: LOG.debug("Ignoring event %s", event) def init_virt_events(self): self.driver.register_event_listener(self.handle_events) def init_host(self): """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) context = nova.context.get_admin_context() instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=['info_cache']) if CONF.defer_iptables_apply: self.driver.filter_defer_apply_on() self.init_virt_events() try: # checking that instance was not already evacuated to other host self._destroy_evacuated_instances(context) for instance in instances: self._init_instance(context, instance) finally: if CONF.defer_iptables_apply: self.driver.filter_defer_apply_off() def cleanup_host(self): self.driver.cleanup_host(host=self.host) def pre_start_hook(self): """After the service is initialized, but before we fully bring the service up by listening on RPC queues, make sure to update our available resources (and indirectly our available nodes). """ self.update_available_resource(nova.context.get_admin_context()) def _get_power_state(self, context, instance): """Retrieve the power state for the given instance.""" LOG.debug('Checking state', instance=instance) try: return self.driver.get_info(instance).state except exception.InstanceNotFound: return power_state.NOSTATE def get_console_topic(self, context): """Retrieves the console host for a project on this host. Currently this is just set in the flags for each compute host. """ # TODO(mdragon): perhaps make this variable by console_type? return '%s.%s' % (CONF.console_topic, CONF.console_host) def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) @wrap_exception() def refresh_security_group_rules(self, context, security_group_id): """Tell the virtualization driver to refresh security group rules. Passes straight through to the virtualization driver. """ return self.driver.refresh_security_group_rules(security_group_id) @wrap_exception() def refresh_security_group_members(self, context, security_group_id): """Tell the virtualization driver to refresh security group members. Passes straight through to the virtualization driver. """ return self.driver.refresh_security_group_members(security_group_id) @wrap_exception() def refresh_instance_security_rules(self, context, instance): """Tell the virtualization driver to refresh security rules for an instance. Passes straight through to the virtualization driver. Synchronise the call because we may still be in the middle of creating the instance. """ @utils.synchronized(instance['uuid']) def _sync_refresh(): try: return self.driver.refresh_instance_security_rules(instance) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'security groups.'), instance=instance) return _sync_refresh() @wrap_exception() def refresh_provider_fw_rules(self, context): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() def _get_instance_nw_info(self, context, instance, use_slave=False): """Get a list of dictionaries of network data of an instance.""" if (not hasattr(instance, 'system_metadata') or len(instance['system_metadata']) == 0): # NOTE(danms): Several places in the code look up instances without # pulling system_metadata for performance, and call this function. # If we get an instance without it, re-fetch so that the call # to network_api (which requires it for instance_type) will # succeed. attrs = ['system_metadata'] instance = objects.Instance.get_by_uuid(context, instance['uuid'], expected_attrs=attrs, use_slave=use_slave) network_info = self.network_api.get_instance_nw_info(context, instance) return network_info def _await_block_device_map_created(self, context, vol_id): # TODO(yamahata): creating volume simultaneously # reduces creation time? # TODO(yamahata): eliminate dumb polling start = time.time() retries = CONF.block_device_allocate_retries if retries < 0: LOG.warning(_LW("Treating negative config value (%(retries)s) for " "'block_device_retries' as 0."), {'retries': retries}) # (1) treat negative config value as 0 # (2) the configured value is 0, one attempt should be made # (3) the configured value is > 0, then the total number attempts # is (retries + 1) attempts = 1 if retries >= 1: attempts = retries + 1 for attempt in range(1, attempts + 1): volume = self.volume_api.get(context, vol_id) volume_status = volume['status'] if volume_status not in ['creating', 'downloading']: if volume_status != 'available': LOG.warning(_LW("Volume id: %s finished being created but " "was not set as 'available'"), vol_id) return attempt greenthread.sleep(CONF.block_device_allocate_retries_interval) # NOTE(harlowja): Should only happen if we ran out of attempts raise exception.VolumeNotCreated(volume_id=vol_id, seconds=int(time.time() - start), attempts=attempts) def _decode_files(self, injected_files): """Base64 decode the list of files to inject.""" if not injected_files: return [] def _decode(f): path, contents = f try: decoded = base64.b64decode(contents) return path, decoded except TypeError: raise exception.Base64Exception(path=path) return [_decode(f) for f in injected_files] def _run_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, legacy_bdm_in_spec): """Launch a new instance with specified options.""" extra_usage_info = {} def notify(status, msg="", fault=None, **kwargs): """Send a create.{start,error,end} notification.""" type_ = "create.%(status)s" % dict(status=status) info = extra_usage_info.copy() info['message'] = msg self._notify_about_instance_usage(context, instance, type_, extra_usage_info=info, fault=fault, **kwargs) try: self._prebuild_instance(context, instance) if request_spec and request_spec.get('image'): image_meta = request_spec['image'] else: image_meta = {} extra_usage_info = {"image_name": image_meta.get('name', '')} notify("start") # notify that build is starting instance, network_info = self._build_instance(context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, image_meta, legacy_bdm_in_spec) notify("end", msg=_("Success"), network_info=network_info) except exception.RescheduledException as e: # Instance build encountered an error, and has been rescheduled. notify("error", fault=e) except exception.BuildAbortException as e: # Instance build aborted due to a non-failure LOG.info(e) notify("end", msg=e.format_message()) # notify that build is done except Exception as e: # Instance build encountered a non-recoverable error: with excutils.save_and_reraise_exception(): self._set_instance_error_state(context, instance) notify("error", fault=e) # notify that build failed def _prebuild_instance(self, context, instance): self._check_instance_exists(context, instance) try: self._start_building(context, instance) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): msg = _("Instance disappeared before we could start it") # Quickly bail out of here raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) def _validate_instance_group_policy(self, context, instance, filter_properties): # NOTE(russellb) Instance group policy is enforced by the scheduler. # However, there is a race condition with the enforcement of # anti-affinity. Since more than one instance may be scheduled at the # same time, it's possible that more than one instance with an # anti-affinity policy may end up here. This is a validation step to # make sure that starting the instance here doesn't violate the policy. scheduler_hints = filter_properties.get('scheduler_hints') or {} group_hint = scheduler_hints.get('group') if not group_hint: return @utils.synchronized(group_hint) def _do_validation(context, instance, group_hint): group = objects.InstanceGroup.get_by_hint(context, group_hint) if 'anti-affinity' not in group.policies: return group_hosts = group.get_hosts(context, exclude=[instance.uuid]) if self.host in group_hosts: msg = _("Anti-affinity instance group policy was violated.") raise exception.RescheduledException( instance_uuid=instance.uuid, reason=msg) _do_validation(context, instance, group_hint) def _build_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, image_meta, legacy_bdm_in_spec): original_context = context context = context.elevated() # NOTE(danms): This method is deprecated, but could be called, # and if it is, it will have an old megatuple for requested_networks. if requested_networks is not None: requested_networks_obj = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) else: requested_networks_obj = None # If neutron security groups pass requested security # groups to allocate_for_instance() if request_spec and self.is_neutron_security_groups: security_groups = request_spec.get('security_group') else: security_groups = [] if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug("No node specified, defaulting to %s", node) network_info = None bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # b64 decode the files to inject: injected_files_orig = injected_files injected_files = self._decode_files(injected_files) rt = self._get_resource_tracker(node) try: limits = filter_properties.get('limits', {}) with rt.instance_claim(context, instance, limits) as inst_claim: # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where # the host is set on the instance. self._validate_instance_group_policy(context, instance, filter_properties) macs = self.driver.macs_for_instance(instance) dhcp_options = self.driver.dhcp_options_for_instance(instance) network_info = self._allocate_network(original_context, instance, requested_networks_obj, macs, security_groups, dhcp_options) # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. self._default_block_device_names(context, instance, image_meta, bdms) instance.vm_state = vm_states.BUILDING instance.task_state = task_states.BLOCK_DEVICE_MAPPING instance.numa_topology = inst_claim.claimed_numa_topology instance.save() block_device_info = self._prep_block_device( context, instance, bdms) set_access_ip = (is_first_time and not instance.access_ip_v4 and not instance.access_ip_v6) flavor = None if filter_properties is not None: flavor = filter_properties.get('instance_type') instance = self._spawn(context, instance, image_meta, network_info, block_device_info, injected_files, admin_password, set_access_ip=set_access_ip, flavor=flavor) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): # the instance got deleted during the spawn # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) try: self._deallocate_network(context, instance) except Exception: msg = _LE('Failed to dealloc network ' 'for deleted instance') LOG.exception(msg, instance=instance) raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=_("Instance disappeared during build")) except (exception.UnexpectedTaskStateError, exception.VirtualInterfaceCreateException) as e: # Don't try to reschedule, just log and reraise. with excutils.save_and_reraise_exception(): LOG.debug(e.format_message(), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) except exception.InvalidBDM: with excutils.save_and_reraise_exception(): if network_info is not None: network_info.wait(do_raise=False) try: self._deallocate_network(context, instance) except Exception: msg = _LE('Failed to dealloc network ' 'for failed instance') LOG.exception(msg, instance=instance) except Exception: exc_info = sys.exc_info() # try to re-schedule instance: # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) rescheduled = self._reschedule_or_error(original_context, instance, exc_info, requested_networks, admin_password, injected_files_orig, is_first_time, request_spec, filter_properties, bdms, legacy_bdm_in_spec) if rescheduled: # log the original build error self._log_original_error(exc_info, instance.uuid) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=six.text_type(exc_info[1])) else: # not re-scheduling, go to error: raise exc_info[0], exc_info[1], exc_info[2] # spawn success return instance, network_info def _log_original_error(self, exc_info, instance_uuid): LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid, exc_info=exc_info) def _reschedule_or_error(self, context, instance, exc_info, requested_networks, admin_password, injected_files, is_first_time, request_spec, filter_properties, bdms=None, legacy_bdm_in_spec=True): """Try to re-schedule the build or re-raise the original build error to error out the instance. """ original_context = context context = context.elevated() instance_uuid = instance.uuid rescheduled = False compute_utils.add_instance_fault_from_exc(context, instance, exc_info[1], exc_info=exc_info) self._notify_about_instance_usage(context, instance, 'instance.create.error', fault=exc_info[1]) try: LOG.debug("Clean up resource before rescheduling.", instance=instance) if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._shutdown_instance(context, instance, bdms, requested_networks) self._cleanup_volumes(context, instance.uuid, bdms) except Exception: # do not attempt retry if clean up failed: with excutils.save_and_reraise_exception(): self._log_original_error(exc_info, instance_uuid) try: method_args = (request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec) task_state = task_states.SCHEDULING rescheduled = self._reschedule(original_context, request_spec, filter_properties, instance, self.scheduler_rpcapi.run_instance, method_args, task_state, exc_info) except Exception: rescheduled = False LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) return rescheduled def _reschedule(self, context, request_spec, filter_properties, instance, reschedule_method, method_args, task_state, exc_info=None): """Attempt to re-schedule a compute operation.""" instance_uuid = instance.uuid retry = filter_properties.get('retry', None) if not retry: # no retry information, do not reschedule. LOG.debug("Retry info not present, will not reschedule", instance_uuid=instance_uuid) return if not request_spec: LOG.debug("No request spec, will not reschedule", instance_uuid=instance_uuid) return LOG.debug("Re-scheduling %(method)s: attempt %(num)d", {'method': reschedule_method.func_name, 'num': retry['num_attempts']}, instance_uuid=instance_uuid) # reset the task state: self._instance_update(context, instance_uuid, task_state=task_state) if exc_info: # stringify to avoid circular ref problem in json serialization: retry['exc'] = traceback.format_exception_only(exc_info[0], exc_info[1]) reschedule_method(context, *method_args) return True @periodic_task.periodic_task def _check_instance_build_time(self, context): """Ensure that instances are not stuck in build.""" timeout = CONF.instance_build_timeout if timeout == 0: return filters = {'vm_state': vm_states.BUILDING, 'host': self.host} building_insts = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) for instance in building_insts: if timeutils.is_older_than(instance['created_at'], timeout): self._set_instance_error_state(context, instance) LOG.warning(_LW("Instance build timed out. Set to error " "state."), instance=instance) def _check_instance_exists(self, context, instance): """Ensure an instance with the same name is not already present.""" if self.driver.instance_exists(instance): raise exception.InstanceExists(name=instance.name) def _start_building(self, context, instance): """Save the host and launched_on fields and log appropriately.""" LOG.audit(_('Starting instance...'), context=context, instance=instance) self._instance_update(context, instance.uuid, vm_state=vm_states.BUILDING, task_state=None, expected_task_state=(task_states.SCHEDULING, None)) def _allocate_network_async(self, context, instance, requested_networks, macs, security_groups, is_vpn, dhcp_options): """Method used to allocate networks in the background. Broken out for testing. """ LOG.debug("Allocating IP information in the background.", instance=instance) retries = CONF.network_allocate_retries if retries < 0: LOG.warning(_LW("Treating negative config value (%(retries)s) for " "'network_allocate_retries' as 0."), {'retries': retries}) retries = 0 attempts = retries + 1 retry_time = 1 for attempt in range(1, attempts + 1): try: nwinfo = self.network_api.allocate_for_instance( context, instance, vpn=is_vpn, requested_networks=requested_networks, macs=macs, security_groups=security_groups, dhcp_options=dhcp_options) LOG.debug('Instance network_info: |%s|', nwinfo, instance=instance) sys_meta = instance.system_metadata sys_meta['network_allocated'] = 'True' self._instance_update(context, instance.uuid, system_metadata=sys_meta) return nwinfo except Exception: exc_info = sys.exc_info() log_info = {'attempt': attempt, 'attempts': attempts} if attempt == attempts: LOG.exception(_LE('Instance failed network setup ' 'after %(attempts)d attempt(s)'), log_info) raise exc_info[0], exc_info[1], exc_info[2] LOG.warning(_LW('Instance failed network setup ' '(attempt %(attempt)d of %(attempts)d)'), log_info, instance=instance) time.sleep(retry_time) retry_time *= 2 if retry_time > 30: retry_time = 30 # Not reached. def _build_networks_for_instance(self, context, instance, requested_networks, security_groups): # If we're here from a reschedule the network may already be allocated. if strutils.bool_from_string( instance.system_metadata.get('network_allocated', 'False')): # NOTE(alex_xu): The network_allocated is True means the network # resource already allocated at previous scheduling, and the # network setup is cleanup at previous. After rescheduling, the # network resource need setup on the new host. self.network_api.setup_instance_network_on_host( context, instance, instance.host) return self._get_instance_nw_info(context, instance) if not self.is_neutron_security_groups: security_groups = [] macs = self.driver.macs_for_instance(instance) dhcp_options = self.driver.dhcp_options_for_instance(instance) network_info = self._allocate_network(context, instance, requested_networks, macs, security_groups, dhcp_options) if not instance.access_ip_v4 and not instance.access_ip_v6: # If CONF.default_access_ip_network_name is set, grab the # corresponding network and set the access ip values accordingly. # Note that when there are multiple ips to choose from, an # arbitrary one will be chosen. network_name = CONF.default_access_ip_network_name if not network_name: return network_info for vif in network_info: if vif['network']['label'] == network_name: for ip in vif.fixed_ips(): if ip['version'] == 4: instance.access_ip_v4 = ip['address'] if ip['version'] == 6: instance.access_ip_v6 = ip['address'] instance.save() break return network_info def _allocate_network(self, context, instance, requested_networks, macs, security_groups, dhcp_options): """Start network allocation asynchronously. Return an instance of NetworkInfoAsyncWrapper that can be used to retrieve the allocated networks when the operation has finished. """ # NOTE(comstud): Since we're allocating networks asynchronously, # this task state has little meaning, as we won't be in this # state for very long. instance.vm_state = vm_states.BUILDING instance.task_state = task_states.NETWORKING instance.save(expected_task_state=[None]) self._update_resource_tracker(context, instance) is_vpn = pipelib.is_vpn_image(instance.image_ref) return network_model.NetworkInfoAsyncWrapper( self._allocate_network_async, context, instance, requested_networks, macs, security_groups, is_vpn, dhcp_options) def _default_root_device_name(self, instance, image_meta, root_bdm): try: return self.driver.default_root_device_name(instance, image_meta, root_bdm) except NotImplementedError: return compute_utils.get_next_device_name(instance, []) def _default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): try: self.driver.default_device_names_for_instance(instance, root_device_name, *block_device_lists) except NotImplementedError: compute_utils.default_device_names_for_instance( instance, root_device_name, *block_device_lists) def _default_block_device_names(self, context, instance, image_meta, block_devices): """Verify that all the devices have the device_name set. If not, provide a default name. It also ensures that there is a root_device_name and is set to the first block device in the boot sequence (boot_index=0). """ root_bdm = block_device.get_root_bdm(block_devices) if not root_bdm: return # Get the root_device_name from the root BDM or the instance root_device_name = None update_root_bdm = False if root_bdm.device_name: root_device_name = root_bdm.device_name instance.root_device_name = root_device_name elif instance.root_device_name: root_device_name = instance.root_device_name root_bdm.device_name = root_device_name update_root_bdm = True else: root_device_name = self._default_root_device_name(instance, image_meta, root_bdm) instance.root_device_name = root_device_name root_bdm.device_name = root_device_name update_root_bdm = True if update_root_bdm: root_bdm.save() ephemerals = filter(block_device.new_format_is_ephemeral, block_devices) swap = filter(block_device.new_format_is_swap, block_devices) block_device_mapping = filter( driver_block_device.is_block_device_mapping, block_devices) self._default_device_names_for_instance(instance, root_device_name, ephemerals, swap, block_device_mapping) def _prep_block_device(self, context, instance, bdms, do_check_attach=True): """Set up the block device for an instance with error logging.""" try: block_device_info = { 'root_device_name': instance['root_device_name'], 'swap': driver_block_device.convert_swap(bdms), 'ephemerals': driver_block_device.convert_ephemerals(bdms), 'block_device_mapping': ( driver_block_device.attach_block_devices( driver_block_device.convert_volumes(bdms), context, instance, self.volume_api, self.driver, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_snapshots(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_images(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_blanks(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach)) } if self.use_legacy_block_device_info: for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'): block_device_info[bdm_type] = \ driver_block_device.legacy_block_devices( block_device_info[bdm_type]) # Get swap out of the list block_device_info['swap'] = driver_block_device.get_swap( block_device_info['swap']) return block_device_info except exception.OverQuota: msg = _LW('Failed to create block device for instance due to ' 'being over volume resource quota') LOG.warn(msg, instance=instance) raise exception.InvalidBDM() except Exception: LOG.exception(_LE('Instance failed block device setup'), instance=instance) raise exception.InvalidBDM() @object_compat def _spawn(self, context, instance, image_meta, network_info, block_device_info, injected_files, admin_password, set_access_ip=False, flavor=None): """Spawn an instance with error logging and update its power state.""" instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING) try: self.driver.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info, flavor=flavor) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Instance failed to spawn'), instance=instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() def _set_access_ip_values(): """Add access ip values for a given instance. If CONF.default_access_ip_network_name is set, this method will grab the corresponding network and set the access ip values accordingly. Note that when there are multiple ips to choose from, an arbitrary one will be chosen. """ network_name = CONF.default_access_ip_network_name if not network_name: return for vif in network_info: if vif['network']['label'] == network_name: for ip in vif.fixed_ips(): if ip['version'] == 4: instance.access_ip_v4 = ip['address'] if ip['version'] == 6: instance.access_ip_v6 = ip['address'] return if set_access_ip: _set_access_ip_values() network_info.wait(do_raise=True) instance.info_cache.network_info = network_info instance.save(expected_task_state=task_states.SPAWNING) return instance def _notify_about_instance_usage(self, context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, fault=None): compute_utils.notify_about_instance_usage( self.notifier, context, instance, event_suffix, network_info=network_info, system_metadata=system_metadata, extra_usage_info=extra_usage_info, fault=fault) def _deallocate_network(self, context, instance, requested_networks=None): LOG.debug('Deallocating network for instance', instance=instance) self.network_api.deallocate_for_instance( context, instance, requested_networks=requested_networks) def _get_instance_block_device_info(self, context, instance, refresh_conn_info=False, bdms=None): """Transform block devices to the driver block_device format.""" if not bdms: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance['uuid']) swap = driver_block_device.convert_swap(bdms) ephemerals = driver_block_device.convert_ephemerals(bdms) block_device_mapping = ( driver_block_device.convert_volumes(bdms) + driver_block_device.convert_snapshots(bdms) + driver_block_device.convert_images(bdms)) if not refresh_conn_info: # if the block_device_mapping has no value in connection_info # (returned as None), don't include in the mapping block_device_mapping = [ bdm for bdm in block_device_mapping if bdm.get('connection_info')] else: block_device_mapping = driver_block_device.refresh_conn_infos( block_device_mapping, context, instance, self.volume_api, self.driver) if self.use_legacy_block_device_info: swap = driver_block_device.legacy_block_devices(swap) ephemerals = driver_block_device.legacy_block_devices(ephemerals) block_device_mapping = driver_block_device.legacy_block_devices( block_device_mapping) # Get swap out of the list swap = driver_block_device.get_swap(swap) root_device_name = instance.get('root_device_name') return {'swap': swap, 'root_device_name': root_device_name, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} # NOTE(mikal): No object_compat wrapper on this method because its # callers all pass objects already @wrap_exception() @reverts_task_state @wrap_instance_fault def build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): # NOTE(danms): Remove this in v4.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) # NOTE(melwitt): Remove this in v4.0 of the RPC API flavor = filter_properties.get('instance_type') if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) filter_properties = dict(filter_properties, instance_type=flavor) @utils.synchronized(instance.uuid) def _locked_do_build_and_run_instance(*args, **kwargs): # NOTE(danms): We grab the semaphore with the instance uuid # locked because we could wait in line to build this instance # for a while and we want to make sure that nothing else tries # to do anything with this instance while we wait. with self._build_semaphore: self._do_build_and_run_instance(*args, **kwargs) # NOTE(danms): We spawn here to return the RPC worker thread back to # the pool. Since what follows could take a really long time, we don't # want to tie up RPC workers. utils.spawn_n(_locked_do_build_and_run_instance, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node, limits) @hooks.add_hook('build_instance') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def _do_build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node=None, limits=None): try: LOG.audit(_('Starting instance...'), context=context, instance=instance) instance.vm_state = vm_states.BUILDING instance.task_state = None instance.save(expected_task_state= (task_states.SCHEDULING, None)) except exception.InstanceNotFound: msg = 'Instance disappeared before build.' LOG.debug(msg, instance=instance) return build_results.FAILED except exception.UnexpectedTaskStateError as e: LOG.debug(e.format_message(), instance=instance) return build_results.FAILED # b64 decode the files to inject: decoded_files = self._decode_files(injected_files) if limits is None: limits = {} if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug('No node specified, defaulting to %s', node, instance=instance) try: self._build_and_run_instance(context, instance, image, decoded_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties) return build_results.ACTIVE except exception.RescheduledException as e: LOG.debug(e.format_message(), instance=instance) retry = filter_properties.get('retry', None) if not retry: # no retry information, do not reschedule. LOG.debug("Retry info not present, will not reschedule", instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) self._set_instance_error_state(context, instance) return build_results.FAILED retry['exc'] = traceback.format_exception(*sys.exc_info()) # NOTE(comstud): Deallocate networks if the driver wants # us to do so. if self.driver.deallocate_networks_on_reschedule(instance): self._cleanup_allocated_networks(context, instance, requested_networks) else: # NOTE(alex_xu): Network already allocated and we don't # want to deallocate them before rescheduling. But we need # cleanup those network resource setup on this host before # rescheduling. self.network_api.cleanup_instance_network_on_host( context, instance, self.host) instance.task_state = task_states.SCHEDULING instance.save() self.compute_task_api.build_instances(context, [instance], image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping) return build_results.RESCHEDULED except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): msg = 'Instance disappeared during build.' LOG.debug(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) return build_results.FAILED except exception.BuildAbortException as e: LOG.exception(e.format_message(), instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._cleanup_volumes(context, instance.uuid, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) self._set_instance_error_state(context, instance) return build_results.FAILED except Exception as e: # Should not reach here. msg = _LE('Unexpected build failure, not rescheduling build.') LOG.exception(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._cleanup_volumes(context, instance.uuid, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) self._set_instance_error_state(context, instance) return build_results.FAILED def _build_and_run_instance(self, context, instance, image, injected_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties): image_name = image.get('name') self._notify_about_instance_usage(context, instance, 'create.start', extra_usage_info={'image_name': image_name}) try: rt = self._get_resource_tracker(node) with rt.instance_claim(context, instance, limits) as inst_claim: # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where # the host is set on the instance. self._validate_instance_group_policy(context, instance, filter_properties) with self._build_resources(context, instance, requested_networks, security_groups, image, block_device_mapping) as resources: instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING instance.numa_topology = inst_claim.claimed_numa_topology instance.save(expected_task_state= task_states.BLOCK_DEVICE_MAPPING) block_device_info = resources['block_device_info'] network_info = resources['network_info'] flavor = None if filter_properties is not None: flavor = filter_properties.get('instance_type') self.driver.spawn(context, instance, image, injected_files, admin_password, network_info=network_info, block_device_info=block_device_info, flavor=flavor) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): self._notify_about_instance_usage(context, instance, 'create.end', fault=e) except exception.ComputeResourcesUnavailable as e: LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=e.format_message()) except exception.BuildAbortException as e: with excutils.save_and_reraise_exception(): LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) except (exception.FixedIpLimitExceeded, exception.NoMoreNetworks, exception.NoMoreFixedIps) as e: LOG.warning(_LW('No more network or fixed IP to be allocated'), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) msg = _('Failed to allocate the network(s) with error %s, ' 'not rescheduling.') % e.format_message() raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.VirtualInterfaceCreateException, exception.VirtualInterfaceMacAddressException) as e: LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.ImageNotActive, exception.ImageUnacceptable) as e: self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception as e: self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=six.text_type(e)) # NOTE(alaski): This is only useful during reschedules, remove it now. instance.system_metadata.pop('network_allocated', None) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() try: instance.save(expected_task_state=task_states.SPAWNING) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): self._notify_about_instance_usage(context, instance, 'create.end', fault=e) self._notify_about_instance_usage(context, instance, 'create.end', extra_usage_info={'message': _('Success')}, network_info=network_info) @contextlib.contextmanager def _build_resources(self, context, instance, requested_networks, security_groups, image, block_device_mapping): resources = {} network_info = None try: network_info = self._build_networks_for_instance(context, instance, requested_networks, security_groups) resources['network_info'] = network_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): raise except exception.UnexpectedTaskStateError as e: raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: # Because this allocation is async any failures are likely to occur # when the driver accesses network_info during spawn(). LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) try: # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. self._default_block_device_names(context, instance, image, block_device_mapping) instance.vm_state = vm_states.BUILDING instance.task_state = task_states.BLOCK_DEVICE_MAPPING instance.save() block_device_info = self._prep_block_device(context, instance, block_device_mapping) resources['block_device_info'] = block_device_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): with excutils.save_and_reraise_exception() as ctxt: # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) except exception.UnexpectedTaskStateError as e: # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: LOG.exception(_LE('Failure prepping block device'), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) msg = _('Failure prepping block device.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) try: yield resources except Exception as exc: with excutils.save_and_reraise_exception() as ctxt: if not isinstance(exc, (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError)): LOG.exception(_LE('Instance failed to spawn'), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) try: self._shutdown_instance(context, instance, block_device_mapping, requested_networks, try_deallocate_networks=False) except Exception: ctxt.reraise = False msg = _('Could not clean up failed build,' ' not rescheduling') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) def _cleanup_allocated_networks(self, context, instance, requested_networks): try: self._deallocate_network(context, instance, requested_networks) except Exception: msg = _LE('Failed to deallocate networks') LOG.exception(msg, instance=instance) return instance.system_metadata['network_allocated'] = 'False' try: instance.save() except exception.InstanceNotFound: # NOTE(alaski): It's possible that we're cleaning up the networks # because the instance was deleted. If that's the case then this # exception will be raised by instance.save() pass @object_compat @messaging.expected_exceptions(exception.BuildAbortException, exception.UnexpectedTaskStateError, exception.VirtualInterfaceCreateException, exception.RescheduledException) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def run_instance(self, context, instance, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, legacy_bdm_in_spec): # NOTE(alaski) This method should be deprecated when the scheduler and # compute rpc interfaces are bumped to 4.x, and slated for removal in # 5.x as it is no longer used. if filter_properties is None: filter_properties = {} @utils.synchronized(instance.uuid) def do_run_instance(): self._run_instance(context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, legacy_bdm_in_spec) do_run_instance() def _try_deallocate_network(self, context, instance, requested_networks=None): try: # tear down allocated network structure self._deallocate_network(context, instance, requested_networks) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to deallocate network for instance.'), instance=instance) self._set_instance_error_state(context, instance) def _get_power_off_values(self, context, instance, clean_shutdown): """Get the timing configuration for powering down this instance.""" if clean_shutdown: timeout = compute_utils.get_value_from_system_metadata(instance, key='image_os_shutdown_timeout', type=int, default=CONF.shutdown_timeout) retry_interval = self.SHUTDOWN_RETRY_INTERVAL else: timeout = 0 retry_interval = 0 return timeout, retry_interval def _power_off_instance(self, context, instance, clean_shutdown=True): """Power off an instance on this host.""" timeout, retry_interval = self._get_power_off_values(context, instance, clean_shutdown) self.driver.power_off(instance, timeout, retry_interval) def _shutdown_instance(self, context, instance, bdms, requested_networks=None, notify=True, try_deallocate_networks=True): """Shutdown an instance on this host. :param:context: security context :param:instance: a nova.objects.Instance object :param:bdms: the block devices for the instance to be torn down :param:requested_networks: the networks on which the instance has ports :param:notify: true if a final usage notification should be emitted :param:try_deallocate_networks: false if we should avoid trying to teardown networking """ context = context.elevated() LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'}, context=context, instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.start") network_info = compute_utils.get_nw_info_for_instance(instance) # NOTE(vish) get bdms before destroying the instance vol_bdms = [bdm for bdm in bdms if bdm.is_volume] block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) # NOTE(melwitt): attempt driver destroy before releasing ip, may # want to keep ip allocated for certain failures try: self.driver.destroy(context, instance, network_info, block_device_info) except exception.InstancePowerOffFailure: # if the instance can't power off, don't release the ip with excutils.save_and_reraise_exception(): pass except Exception: with excutils.save_and_reraise_exception(): # deallocate ip and fail without proceeding to # volume api calls, preserving current behavior if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) for bdm in vol_bdms: try: # NOTE(vish): actual driver detach done in driver.destroy, so # just tell cinder that we are done with it. connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, bdm.volume_id, connector) self.volume_api.detach(context, bdm.volume_id) except exception.DiskNotFound as exc: LOG.debug('Ignoring DiskNotFound: %s', exc, instance=instance) except exception.VolumeNotFound as exc: LOG.debug('Ignoring VolumeNotFound: %s', exc, instance=instance) except (cinder_exception.EndpointNotFound, keystone_exception.EndpointNotFound) as exc: LOG.warning(_LW('Ignoring EndpointNotFound: %s'), exc, instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.end") def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True): exc_info = None for bdm in bdms: LOG.debug("terminating bdm %s", bdm, instance_uuid=instance_uuid) if bdm.volume_id and bdm.delete_on_termination: try: self.volume_api.delete(context, bdm.volume_id) except Exception as exc: exc_info = sys.exc_info() LOG.warning(_LW('Failed to delete volume: %(volume_id)s ' 'due to %(exc)s'), {'volume_id': bdm.volume_id, 'exc': exc}) if exc_info is not None and raise_exc: six.reraise(exc_info[0], exc_info[1], exc_info[2]) @hooks.add_hook("delete_instance") def _delete_instance(self, context, instance, bdms, quotas): """Delete an instance on this host. Commit or rollback quotas as necessary. :param context: nova request context :param instance: nova.objects.instance.Instance object :param bdms: nova.objects.block_device.BlockDeviceMappingList object :param quotas: nova.objects.quotas.Quotas object """ was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED if was_soft_deleted: # Instances in SOFT_DELETED vm_state have already had quotas # decremented. try: quotas.rollback() except Exception: pass try: events = self.instance_events.clear_events_for_instance(instance) if events: LOG.debug('Events pending at deletion: %(events)s', {'events': ','.join(events.keys())}, instance=instance) instance.info_cache.delete() self._notify_about_instance_usage(context, instance, "delete.start") self._shutdown_instance(context, instance, bdms) # NOTE(vish): We have already deleted the instance, so we have # to ignore problems cleaning up the volumes. It # would be nice to let the user know somehow that # the volume deletion failed, but it is not # acceptable to have an instance that can not be # deleted. Perhaps this could be reworked in the # future to set an instance fault the first time # and to only ignore the failure if the instance # is already in ERROR. self._cleanup_volumes(context, instance.uuid, bdms, raise_exc=False) # if a delete task succeeded, always update vm state and task # state without expecting task state to be DELETING instance.vm_state = vm_states.DELETED instance.task_state = None instance.power_state = power_state.NOSTATE instance.terminated_at = timeutils.utcnow() instance.save() self._update_resource_tracker(context, instance) system_meta = instance.system_metadata instance.destroy() except Exception: with excutils.save_and_reraise_exception(): quotas.rollback() self._complete_deletion(context, instance, bdms, quotas, system_meta) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def terminate_instance(self, context, instance, bdms, reservations): """Terminate an instance on this host.""" # NOTE (ndipanov): If we get non-object BDMs, just get them from the # db again, as this means they are sent in the old format and we want # to avoid converting them back when we can just get them. # Remove this when we bump the RPC major version to 4.0 if (bdms and any(not isinstance(bdm, obj_base.NovaObject) for bdm in bdms)): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) @utils.synchronized(instance.uuid) def do_terminate_instance(instance, bdms): try: self._delete_instance(context, instance, bdms, quotas) except exception.InstanceNotFound: LOG.info(_LI("Instance disappeared during terminate"), instance=instance) except Exception: # As we're trying to delete always go to Error if something # goes wrong that _delete_instance can't handle. with excutils.save_and_reraise_exception(): LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) self._set_instance_error_state(context, instance) do_terminate_instance(instance, bdms) # NOTE(johannes): This is probably better named power_off_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def stop_instance(self, context, instance, clean_shutdown=True): """Stopping an instance on this host.""" @utils.synchronized(instance.uuid) def do_stop_instance(): current_power_state = self._get_power_state(context, instance) LOG.debug('Stopping instance; current vm_state: %(vm_state)s, ' 'current task_state: %(task_state)s, current DB ' 'power_state: %(db_power_state)s, current VM ' 'power_state: %(current_power_state)s', dict(vm_state=instance.vm_state, task_state=instance.task_state, db_power_state=instance.power_state, current_power_state=current_power_state), instance_uuid=instance.uuid) # NOTE(mriedem): If the instance is already powered off, we are # possibly tearing down and racing with other operations, so we can # expect the task_state to be None if something else updates the # instance and we're not locking it. expected_task_state = [task_states.POWERING_OFF] # The list of power states is from _sync_instance_power_state. if current_power_state in (power_state.NOSTATE, power_state.SHUTDOWN, power_state.CRASHED): LOG.info(_LI('Instance is already powered off in the ' 'hypervisor when stop is called.'), instance=instance) expected_task_state.append(None) self._notify_about_instance_usage(context, instance, "power_off.start") self._power_off_instance(context, instance, clean_shutdown) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.STOPPED instance.task_state = None instance.save(expected_task_state=expected_task_state) self._notify_about_instance_usage(context, instance, "power_off.end") do_stop_instance() def _power_on(self, context, instance): network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.power_on(context, instance, network_info, block_device_info) # NOTE(johannes): This is probably better named power_on_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def start_instance(self, context, instance): """Starting an instance on this host.""" self._notify_about_instance_usage(context, instance, "power_on.start") self._power_on(context, instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.POWERING_ON) self._notify_about_instance_usage(context, instance, "power_on.end") @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def soft_delete_instance(self, context, instance, reservations): """Soft delete an instance on this host.""" quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: self._notify_about_instance_usage(context, instance, "soft_delete.start") try: self.driver.soft_delete(instance) except NotImplementedError: # Fallback to just powering off the instance if the # hypervisor doesn't implement the soft_delete method self.driver.power_off(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save(expected_task_state=[task_states.SOFT_DELETING]) except Exception: with excutils.save_and_reraise_exception(): quotas.rollback() quotas.commit() self._notify_about_instance_usage(context, instance, "soft_delete.end") @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def restore_instance(self, context, instance): """Restore a soft-deleted instance on this host.""" self._notify_about_instance_usage(context, instance, "restore.start") try: self.driver.restore(instance) except NotImplementedError: # Fallback to just powering on the instance if the hypervisor # doesn't implement the restore method self._power_on(context, instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.RESTORING) self._notify_about_instance_usage(context, instance, "restore.end") def _rebuild_default_impl(self, context, instance, image_meta, injected_files, admin_password, bdms, detach_block_devices, attach_block_devices, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False): if preserve_ephemeral: # The default code path does not support preserving ephemeral # partitions. raise exception.PreserveEphemeralNotSupported() detach_block_devices(context, bdms) if not recreate: self.driver.destroy(context, instance, network_info, block_device_info=block_device_info) instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) new_block_device_info = attach_block_devices(context, instance, bdms) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) self.driver.spawn(context, instance, image_meta, injected_files, admin_password, network_info=network_info, block_device_info=new_block_device_info) @object_compat @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and remakes the VM with given 'metadata' and 'personalities'. :param context: `nova.RequestContext` object :param instance: Instance object :param orig_image_ref: Original image_ref before rebuild :param image_ref: New image_ref for rebuild :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance :param orig_sys_metadata: instance system metadata from pre-rebuild :param bdms: block-device-mappings to use for rebuild :param recreate: True if the instance is being recreated (e.g. the hypervisor it was on failed) - cleanup of old state will be skipped. :param on_shared_storage: True if instance files on shared storage :param preserve_ephemeral: True if the default ephemeral storage partition must be preserved on rebuild """ context = context.elevated() # NOTE (ndipanov): If we get non-object BDMs, just get them from the # db again, as this means they are sent in the old format and we want # to avoid converting them back when we can just get them. # Remove this on the next major RPC version bump if (bdms and any(not isinstance(bdm, obj_base.NovaObject) for bdm in bdms)): bdms = None orig_vm_state = instance.vm_state with self._error_out_instance_on_exception(context, instance): LOG.audit(_("Rebuilding instance"), context=context, instance=instance) if recreate: if not self.driver.capabilities["supports_recreate"]: raise exception.InstanceRecreateNotSupported self._check_instance_exists(context, instance) # To cover case when admin expects that instance files are on # shared storage, but not accessible and vice versa if on_shared_storage != self.driver.instance_on_disk(instance): raise exception.InvalidSharedStorage( _("Invalid state of instance files on shared" " storage")) if on_shared_storage: LOG.info(_LI('disk on shared storage, recreating using' ' existing disk')) else: image_ref = orig_image_ref = instance.image_ref LOG.info(_LI("disk not on shared storage, rebuilding from:" " '%s'"), str(image_ref)) # NOTE(mriedem): On a recreate (evacuate), we need to update # the instance's host and node properties to reflect it's # destination node for the recreate. node_name = None try: compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.ComputeHostNotFound: LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host instance.node = node_name instance.save() if image_ref: image_meta = self.image_api.get(context, image_ref) else: image_meta = {} # This instance.exists message should contain the original # image_ref, not the new one. Since the DB has been updated # to point to the new one... we have to override it. # TODO(jaypipes): Move generate_image_url() into the nova.image.api orig_image_ref_url = glance.generate_image_url(orig_image_ref) extra_usage_info = {'image_ref_url': orig_image_ref_url} self.conductor_api.notify_usage_exists(context, instance, current_period=True, system_metadata=orig_sys_metadata, extra_usage_info=extra_usage_info) # This message should contain the new image_ref extra_usage_info = {'image_name': image_meta.get('name', '')} self._notify_about_instance_usage(context, instance, "rebuild.start", extra_usage_info=extra_usage_info) instance.power_state = self._get_power_state(context, instance) instance.task_state = task_states.REBUILDING instance.save(expected_task_state=[task_states.REBUILDING]) if recreate: self.network_api.setup_networks_on_host( context, instance, self.host) network_info = compute_utils.get_nw_info_for_instance(instance) if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = \ self._get_instance_block_device_info( context, instance, bdms=bdms) def detach_block_devices(context, bdms): for bdm in bdms: if bdm.is_volume: self.volume_api.detach(context, bdm.volume_id) files = self._decode_files(injected_files) kwargs = dict( context=context, instance=instance, image_meta=image_meta, injected_files=files, admin_password=new_pass, bdms=bdms, detach_block_devices=detach_block_devices, attach_block_devices=self._prep_block_device, block_device_info=block_device_info, network_info=network_info, preserve_ephemeral=preserve_ephemeral, recreate=recreate) try: self.driver.rebuild(**kwargs) except NotImplementedError: # NOTE(rpodolyaka): driver doesn't provide specialized version # of rebuild, fall back to the default implementation self._rebuild_default_impl(**kwargs) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=[task_states.REBUILD_SPAWNING]) if orig_vm_state == vm_states.STOPPED: LOG.info(_LI("bringing vm to original state: '%s'"), orig_vm_state, instance=instance) instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.progress = 0 instance.save() self.stop_instance(context, instance) self._notify_about_instance_usage( context, instance, "rebuild.end", network_info=network_info, extra_usage_info=extra_usage_info) def _handle_bad_volumes_detached(self, context, instance, bad_devices, block_device_info): """Handle cases where the virt-layer had to detach non-working volumes in order to complete an operation. """ for bdm in block_device_info['block_device_mapping']: if bdm.get('mount_device') in bad_devices: try: volume_id = bdm['connection_info']['data']['volume_id'] except KeyError: continue # NOTE(sirp): ideally we'd just call # `compute_api.detach_volume` here but since that hits the # DB directly, that's off limits from within the # compute-manager. # # API-detach LOG.info(_LI("Detaching from volume api: %s"), volume_id) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) self.volume_api.begin_detaching(context, volume_id) # Manager-detach self.detach_volume(context, volume_id, instance) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def reboot_instance(self, context, instance, block_device_info, reboot_type): """Reboot an instance on this host.""" # acknowledge the request made it to the manager if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_PENDING expected_states = (task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED) else: instance.task_state = task_states.REBOOT_PENDING_HARD expected_states = (task_states.REBOOTING_HARD, task_states.REBOOT_PENDING_HARD, task_states.REBOOT_STARTED_HARD) context = context.elevated() LOG.audit(_("Rebooting instance"), context=context, instance=instance) block_device_info = self._get_instance_block_device_info(context, instance) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "reboot.start") instance.power_state = self._get_power_state(context, instance) instance.save(expected_task_state=expected_states) if instance.power_state != power_state.RUNNING: state = instance.power_state running = power_state.RUNNING LOG.warning(_LW('trying to reboot a non-running instance:' ' (state: %(state)s expected: %(running)s)'), {'state': state, 'running': running}, context=context, instance=instance) def bad_volumes_callback(bad_devices): self._handle_bad_volumes_detached( context, instance, bad_devices, block_device_info) try: # Don't change it out of rescue mode if instance.vm_state == vm_states.RESCUED: new_vm_state = vm_states.RESCUED else: new_vm_state = vm_states.ACTIVE new_power_state = None if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_STARTED expected_state = task_states.REBOOT_PENDING else: instance.task_state = task_states.REBOOT_STARTED_HARD expected_state = task_states.REBOOT_PENDING_HARD instance.save(expected_task_state=expected_state) self.driver.reboot(context, instance, network_info, reboot_type, block_device_info=block_device_info, bad_volumes_callback=bad_volumes_callback) except Exception as error: with excutils.save_and_reraise_exception() as ctxt: exc_info = sys.exc_info() # if the reboot failed but the VM is running don't # put it into an error state new_power_state = self._get_power_state(context, instance) if new_power_state == power_state.RUNNING: LOG.warning(_LW('Reboot failed but instance is running'), context=context, instance=instance) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info) self._notify_about_instance_usage(context, instance, 'reboot.error', fault=error) ctxt.reraise = False else: LOG.error(_LE('Cannot reboot instance: %s'), error, context=context, instance=instance) self._set_instance_obj_error_state(context, instance) if not new_power_state: new_power_state = self._get_power_state(context, instance) try: instance.power_state = new_power_state instance.vm_state = new_vm_state instance.task_state = None instance.save() except exception.InstanceNotFound: LOG.warning(_LW("Instance disappeared during reboot"), context=context, instance=instance) self._notify_about_instance_usage(context, instance, "reboot.end") @delete_image_on_error def _do_snapshot_instance(self, context, image_id, instance, rotation): if rotation < 0: raise exception.RotationRequiredForBackup() self._snapshot_instance(context, image_id, instance, task_states.IMAGE_BACKUP) @wrap_exception() @reverts_task_state @wrap_instance_fault def backup_instance(self, context, image_id, instance, backup_type, rotation): """Backup an instance on this host. :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around """ self._do_snapshot_instance(context, image_id, instance, rotation) self._rotate_backups(context, instance, backup_type, rotation) @wrap_exception() @reverts_task_state @wrap_instance_fault @delete_image_on_error def snapshot_instance(self, context, image_id, instance): """Snapshot an instance on this host. :param context: security context :param instance: a nova.objects.instance.Instance object :param image_id: glance.db.sqlalchemy.models.Image.Id """ # NOTE(dave-mcnally) the task state will already be set by the api # but if the compute manager has crashed/been restarted prior to the # request getting here the task state may have been cleared so we set # it again and things continue normally try: instance.task_state = task_states.IMAGE_SNAPSHOT instance.save( expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING) except exception.InstanceNotFound: # possibility instance no longer exists, no point in continuing LOG.debug("Instance not found, could not set state %s " "for instance.", task_states.IMAGE_SNAPSHOT, instance=instance) return except exception.UnexpectedDeletingTaskStateError: LOG.debug("Instance being deleted, snapshot cannot continue", instance=instance) return self._snapshot_instance(context, image_id, instance, task_states.IMAGE_SNAPSHOT) def _snapshot_instance(self, context, image_id, instance, expected_task_state): context = context.elevated() instance.power_state = self._get_power_state(context, instance) try: instance.save() LOG.audit(_('instance snapshotting'), context=context, instance=instance) if instance.power_state != power_state.RUNNING: state = instance.power_state running = power_state.RUNNING LOG.warning(_LW('trying to snapshot a non-running instance: ' '(state: %(state)s expected: %(running)s)'), {'state': state, 'running': running}, instance=instance) self._notify_about_instance_usage( context, instance, "snapshot.start") def update_task_state(task_state, expected_state=expected_task_state): instance.task_state = task_state instance.save(expected_task_state=expected_state) self.driver.snapshot(context, instance, image_id, update_task_state) instance.task_state = None instance.save(expected_task_state=task_states.IMAGE_UPLOADING) self._notify_about_instance_usage(context, instance, "snapshot.end") except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): # the instance got deleted during the snapshot # Quickly bail out of here msg = 'Instance disappeared during snapshot' LOG.debug(msg, instance=instance) try: image_service = glance.get_default_image_service() image = image_service.show(context, image_id) if image['status'] != 'active': image_service.delete(context, image_id) except Exception: LOG.warning(_LW("Error while trying to clean up image %s"), image_id, instance=instance) except exception.ImageNotFound: instance.task_state = None instance.save() msg = _("Image not found during snapshot") LOG.warn(msg, instance=instance) def _post_interrupted_snapshot_cleanup(self, context, instance): self.driver.post_interrupted_snapshot_cleanup(context, instance) @object_compat @messaging.expected_exceptions(NotImplementedError) def volume_snapshot_create(self, context, instance, volume_id, create_info): self.driver.volume_snapshot_create(context, instance, volume_id, create_info) @object_compat @messaging.expected_exceptions(NotImplementedError) def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info): self.driver.volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info) @wrap_instance_fault def _rotate_backups(self, context, instance, backup_type, rotation): """Delete excess backups associated to an instance. Instances are allowed a fixed number of backups (the rotation number); this method deletes the oldest backups that exceed the rotation threshold. :param context: security context :param instance: Instance dict :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ filters = {'property-image_type': 'backup', 'property-backup_type': backup_type, 'property-instance_uuid': instance.uuid} images = self.image_api.get_all(context, filters=filters, sort_key='created_at', sort_dir='desc') num_images = len(images) LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)", {'num_images': num_images, 'rotation': rotation}, instance=instance) if num_images > rotation: # NOTE(sirp): this deletes all backups that exceed the rotation # limit excess = len(images) - rotation LOG.debug("Rotating out %d backups", excess, instance=instance) for i in xrange(excess): image = images.pop() image_id = image['id'] LOG.debug("Deleting image %s", image_id, instance=instance) self.image_api.delete(context, image_id) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def set_admin_password(self, context, instance, new_pass): """Set the root/admin password for an instance on this host. This is generally only called by API password resets after an image has been built. @param context: Nova auth context. @param instance: Nova instance object. @param new_pass: The admin password for the instance. """ context = context.elevated() if new_pass is None: # Generate a random password new_pass = utils.generate_password() current_power_state = self._get_power_state(context, instance) expected_state = power_state.RUNNING if current_power_state != expected_state: instance.task_state = None instance.save(expected_task_state=task_states.UPDATING_PASSWORD) _msg = _('instance %s is not running') % instance.uuid raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) try: self.driver.set_admin_password(instance, new_pass) LOG.audit(_("Root password set"), instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) except NotImplementedError: LOG.warning(_LW('set_admin_password is not implemented ' 'by this driver or guest instance.'), instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) raise NotImplementedError(_('set_admin_password is not ' 'implemented by this driver or guest ' 'instance.')) except exception.UnexpectedTaskStateError: # interrupted by another (most likely delete) task # do not retry raise except Exception as e: # Catch all here because this could be anything. LOG.exception(_LE('set_admin_password failed: %s'), e, instance=instance) self._set_instance_obj_error_state(context, instance) # We create a new exception here so that we won't # potentially reveal password information to the # API caller. The real exception is logged above _msg = _('error setting admin password') raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) @wrap_exception() @reverts_task_state @wrap_instance_fault def inject_file(self, context, path, file_contents, instance): """Write a file to the specified path in an instance on this host.""" # NOTE(russellb) Remove this method, as well as the underlying virt # driver methods, when the compute rpc interface is bumped to 4.x # as it is no longer used. context = context.elevated() current_power_state = self._get_power_state(context, instance) expected_state = power_state.RUNNING if current_power_state != expected_state: LOG.warning(_LW('trying to inject a file into a non-running ' '(state: %(current_state)s expected: ' '%(expected_state)s)'), {'current_state': current_power_state, 'expected_state': expected_state}, instance=instance) LOG.audit(_('injecting file to %s'), path, instance=instance) self.driver.inject_file(instance, path, file_contents) def _get_rescue_image(self, context, instance, rescue_image_ref=None): """Determine what image should be used to boot the rescue VM.""" # 1. If rescue_image_ref is passed in, use that for rescue. # 2. Else, use the base image associated with instance's current image. # The idea here is to provide the customer with a rescue # environment which they are familiar with. # So, if they built their instance off of a Debian image, # their rescue VM will also be Debian. # 3. As a last resort, use instance's current image. if not rescue_image_ref: system_meta = utils.instance_sys_meta(instance) rescue_image_ref = system_meta.get('image_base_image_ref') if not rescue_image_ref: LOG.warning(_LW('Unable to find a different image to use for ' 'rescue VM, using instance\'s current image'), instance=instance) rescue_image_ref = instance.image_ref image_meta = compute_utils.get_image_metadata(context, self.image_api, rescue_image_ref, instance) # NOTE(belliott) bug #1227350 - xenapi needs the actual image id image_meta['id'] = rescue_image_ref return image_meta @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def rescue_instance(self, context, instance, rescue_password, rescue_image_ref=None, clean_shutdown=True): context = context.elevated() LOG.audit(_('Rescuing'), context=context, instance=instance) admin_password = (rescue_password if rescue_password else utils.generate_password()) network_info = self._get_instance_nw_info(context, instance) rescue_image_meta = self._get_rescue_image(context, instance, rescue_image_ref) extra_usage_info = {'rescue_image_name': rescue_image_meta.get('name', '')} self._notify_about_instance_usage(context, instance, "rescue.start", extra_usage_info=extra_usage_info, network_info=network_info) try: self._power_off_instance(context, instance, clean_shutdown) self.driver.rescue(context, instance, network_info, rescue_image_meta, admin_password) except Exception as e: LOG.exception(_LE("Error trying to Rescue Instance"), instance=instance) raise exception.InstanceNotRescuable( instance_id=instance.uuid, reason=_("Driver Error: %s") % e) self.conductor_api.notify_usage_exists(context, instance, current_period=True) instance.vm_state = vm_states.RESCUED instance.task_state = None instance.power_state = self._get_power_state(context, instance) instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESCUING) self._notify_about_instance_usage(context, instance, "rescue.end", extra_usage_info=extra_usage_info, network_info=network_info) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unrescue_instance(self, context, instance): context = context.elevated() LOG.audit(_('Unrescuing'), context=context, instance=instance) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "unrescue.start", network_info=network_info) with self._error_out_instance_on_exception(context, instance): self.driver.unrescue(instance, network_info) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.power_state = self._get_power_state(context, instance) instance.save(expected_task_state=task_states.UNRESCUING) self._notify_about_instance_usage(context, instance, "unrescue.end", network_info=network_info) @object_compat @wrap_exception() @wrap_instance_fault def change_instance_metadata(self, context, diff, instance): """Update the metadata published to the instance.""" LOG.debug("Changing instance metadata according to %r", diff, instance=instance) self.driver.change_instance_metadata(context, instance, diff) def _cleanup_stored_instance_types(self, instance, restore_old=False): """Clean up "old" and "new" instance_type information stored in instance's system_metadata. Optionally update the "current" instance_type to the saved old one first. Returns the updated system_metadata as a dict, the post-cleanup current instance type and the to-be dropped instance type. """ sys_meta = instance.system_metadata if restore_old: instance_type = instance.get_flavor('old') drop_instance_type = instance.get_flavor() instance.set_flavor(instance_type) else: instance_type = instance.get_flavor() drop_instance_type = instance.get_flavor('old') instance.delete_flavor('old') instance.delete_flavor('new') return sys_meta, instance_type, drop_instance_type @wrap_exception() @wrap_instance_event @wrap_instance_fault def confirm_resize(self, context, instance, reservations, migration): quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) @utils.synchronized(instance['uuid']) def do_confirm_resize(context, instance, migration_id): # NOTE(wangpan): Get the migration status from db, if it has been # confirmed, we do nothing and return here LOG.debug("Going to confirm migration %s", migration_id, context=context, instance=instance) try: # TODO(russellb) Why are we sending the migration object just # to turn around and look it up from the db again? migration = objects.Migration.get_by_id( context.elevated(), migration_id) except exception.MigrationNotFound: LOG.error(_LE("Migration %s is not found during confirmation"), migration_id, context=context, instance=instance) quotas.rollback() return if migration.status == 'confirmed': LOG.info(_LI("Migration %s is already confirmed"), migration_id, context=context, instance=instance) quotas.rollback() return elif migration.status not in ('finished', 'confirming'): LOG.warning(_LW("Unexpected confirmation status '%(status)s' " "of migration %(id)s, exit confirmation " "process"), {"status": migration.status, "id": migration_id}, context=context, instance=instance) quotas.rollback() return # NOTE(wangpan): Get the instance from db, if it has been # deleted, we do nothing and return here expected_attrs = ['metadata', 'system_metadata', 'flavor'] try: instance = objects.Instance.get_by_uuid( context, instance.uuid, expected_attrs=expected_attrs) except exception.InstanceNotFound: LOG.info(_LI("Instance is not found during confirmation"), context=context, instance=instance) quotas.rollback() return self._confirm_resize(context, instance, quotas, migration=migration) do_confirm_resize(context, instance, migration.id) def _confirm_resize(self, context, instance, quotas, migration=None): """Destroys the source instance.""" self._notify_about_instance_usage(context, instance, "resize.confirm.start") with self._error_out_instance_on_exception(context, instance, quotas=quotas): # NOTE(danms): delete stashed migration information sys_meta, instance_type, old_instance_type = ( self._cleanup_stored_instance_types(instance)) sys_meta.pop('old_vm_state', None) instance.system_metadata = sys_meta instance.save() # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(context, instance, migration.source_compute, teardown=True) network_info = self._get_instance_nw_info(context, instance) self.driver.confirm_migration(migration, instance, network_info) migration.status = 'confirmed' with migration.obj_as_admin(): migration.save() rt = self._get_resource_tracker(migration.source_node) rt.drop_resize_claim(context, instance, old_instance_type) # NOTE(mriedem): The old_vm_state could be STOPPED but the user # might have manually powered up the instance to confirm the # resize/migrate, so we need to check the current power state # on the instance and set the vm_state appropriately. We default # to ACTIVE because if the power state is not SHUTDOWN, we # assume _sync_instance_power_state will clean it up. p_state = instance.power_state vm_state = None if p_state == power_state.SHUTDOWN: vm_state = vm_states.STOPPED LOG.debug("Resized/migrated instance is powered off. " "Setting vm_state to '%s'.", vm_state, instance=instance) else: vm_state = vm_states.ACTIVE instance.vm_state = vm_state instance.task_state = None instance.save(expected_task_state=[None, task_states.DELETING]) self._notify_about_instance_usage( context, instance, "resize.confirm.end", network_info=network_info) quotas.commit() @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def revert_resize(self, context, instance, migration, reservations): """Destroys the new instance on the destination machine. Reverts the model changes, and powers on the old instance on the source machine. """ quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) # NOTE(comstud): A revert_resize is essentially a resize back to # the old size, so we need to send a usage event here. self.conductor_api.notify_usage_exists( context, instance, current_period=True) with self._error_out_instance_on_exception(context, instance, quotas=quotas): # NOTE(tr3buchet): tear down networks on destination host self.network_api.setup_networks_on_host(context, instance, teardown=True) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_start(context, instance, migration_p) network_info = self._get_instance_nw_info(context, instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) destroy_disks = not self._is_instance_storage_shared(context, instance) self.driver.destroy(context, instance, network_info, block_device_info, destroy_disks) self._terminate_volume_connections(context, instance, bdms) migration.status = 'reverted' with migration.obj_as_admin(): migration.save() rt = self._get_resource_tracker(instance.node) rt.drop_resize_claim(context, instance) self.compute_rpcapi.finish_revert_resize(context, instance, migration, migration.source_compute, quotas.reservations) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def finish_revert_resize(self, context, instance, reservations, migration): """Finishes the second half of reverting a resize. Bring the original source instance state back (active/shutoff) and revert the resized attributes in the database. """ quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "resize.revert.start") sys_meta, instance_type, drop_instance_type = ( self._cleanup_stored_instance_types(instance, True)) # NOTE(mriedem): delete stashed old_vm_state information; we # default to ACTIVE for backwards compatibility if old_vm_state # is not set old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE) instance.system_metadata = sys_meta instance.memory_mb = instance_type['memory_mb'] instance.vcpus = instance_type['vcpus'] instance.root_gb = instance_type['root_gb'] instance.ephemeral_gb = instance_type['ephemeral_gb'] instance.instance_type_id = instance_type['id'] instance.host = migration.source_compute instance.node = migration.source_node instance.save() migration.dest_compute = migration.source_compute with migration.obj_as_admin(): migration.save() self.network_api.setup_networks_on_host(context, instance, migration.source_compute) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) power_on = old_vm_state != vm_states.STOPPED self.driver.finish_revert_migration(context, instance, network_info, block_device_info, power_on) instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_REVERTING) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_finish(context, instance, migration_p) # if the original vm state was STOPPED, set it back to STOPPED LOG.info(_LI("Updating instance to original state: '%s'"), old_vm_state) if power_on: instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save() else: instance.task_state = task_states.POWERING_OFF instance.save() self.stop_instance(context, instance=instance) self._notify_about_instance_usage( context, instance, "resize.revert.end") quotas.commit() def _prep_resize(self, context, image, instance, instance_type, quotas, request_spec, filter_properties, node, clean_shutdown=True): if not filter_properties: filter_properties = {} if not instance['host']: self._set_instance_error_state(context, instance) msg = _('Instance has no source host') raise exception.MigrationError(reason=msg) same_host = instance['host'] == self.host if same_host and not CONF.allow_resize_to_same_host: self._set_instance_error_state(context, instance) msg = _('destination same as source!') raise exception.MigrationError(reason=msg) # NOTE(danms): Stash the new instance_type to avoid having to # look it up in the database later instance.set_flavor(instance_type, 'new') # NOTE(mriedem): Stash the old vm_state so we can set the # resized/reverted instance back to the same state later. vm_state = instance['vm_state'] LOG.debug('Stashing vm_state: %s', vm_state, instance=instance) instance.system_metadata['old_vm_state'] = vm_state instance.save() limits = filter_properties.get('limits', {}) rt = self._get_resource_tracker(node) with rt.resize_claim(context, instance, instance_type, image_meta=image, limits=limits) as claim: LOG.audit(_('Migrating'), context=context, instance=instance) self.compute_rpcapi.resize_instance( context, instance, claim.migration, image, instance_type, quotas.reservations, clean_shutdown) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def prep_resize(self, context, image, instance, instance_type, reservations, request_spec, filter_properties, node, clean_shutdown=True): """Initiates the process of moving a running instance to another host. Possibly changes the RAM and disk size in the process. """ if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug("No node specified, defaulting to %s", node, instance=instance) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): self.conductor_api.notify_usage_exists( context, instance, current_period=True) self._notify_about_instance_usage( context, instance, "resize.prep.start") try: self._prep_resize(context, image, instance, instance_type, quotas, request_spec, filter_properties, node, clean_shutdown) # NOTE(dgenin): This is thrown in LibvirtDriver when the # instance to be migrated is backed by LVM. # Remove when LVM migration is implemented. except exception.MigrationPreCheckError: raise except Exception: # try to re-schedule the resize elsewhere: exc_info = sys.exc_info() self._reschedule_resize_or_reraise(context, image, instance, exc_info, instance_type, quotas, request_spec, filter_properties) finally: extra_usage_info = dict( new_instance_type=instance_type['name'], new_instance_type_id=instance_type['id']) self._notify_about_instance_usage( context, instance, "resize.prep.end", extra_usage_info=extra_usage_info) def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, instance_type, quotas, request_spec, filter_properties): """Try to re-schedule the resize or re-raise the original error to error out the instance. """ if not request_spec: request_spec = {} if not filter_properties: filter_properties = {} rescheduled = False instance_uuid = instance['uuid'] try: reschedule_method = self.compute_task_api.resize_instance scheduler_hint = dict(filter_properties=filter_properties) method_args = (instance, None, scheduler_hint, instance_type, quotas.reservations) task_state = task_states.RESIZE_PREP rescheduled = self._reschedule(context, request_spec, filter_properties, instance, reschedule_method, method_args, task_state, exc_info) except Exception as error: rescheduled = False LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info=sys.exc_info()) self._notify_about_instance_usage(context, instance, 'resize.error', fault=error) if rescheduled: self._log_original_error(exc_info, instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, exc_info[1], exc_info=exc_info) self._notify_about_instance_usage(context, instance, 'resize.error', fault=exc_info[1]) else: # not re-scheduling raise exc_info[0], exc_info[1], exc_info[2] @wrap_exception() @reverts_task_state @wrap_instance_event @errors_out_migration @wrap_instance_fault def resize_instance(self, context, instance, image, reservations, migration, instance_type, clean_shutdown=True): """Starts the migration of a running instance to another host.""" quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): if not instance_type: instance_type = objects.Flavor.get_by_id( context, migration['new_instance_type_id']) network_info = self._get_instance_nw_info(context, instance) migration.status = 'migrating' with migration.obj_as_admin(): migration.save() instance.task_state = task_states.RESIZE_MIGRATING instance.save(expected_task_state=task_states.RESIZE_PREP) self._notify_about_instance_usage( context, instance, "resize.start", network_info=network_info) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) timeout, retry_interval = self._get_power_off_values(context, instance, clean_shutdown) disk_info = self.driver.migrate_disk_and_power_off( context, instance, migration.dest_host, instance_type, network_info, block_device_info, timeout, retry_interval) self._terminate_volume_connections(context, instance, bdms) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_start(context, instance, migration_p) migration.status = 'post-migrating' with migration.obj_as_admin(): migration.save() instance.host = migration.dest_compute instance.node = migration.dest_node instance.task_state = task_states.RESIZE_MIGRATED instance.save(expected_task_state=task_states.RESIZE_MIGRATING) self.compute_rpcapi.finish_resize(context, instance, migration, image, disk_info, migration.dest_compute, reservations=quotas.reservations) self._notify_about_instance_usage(context, instance, "resize.end", network_info=network_info) self.instance_events.clear_events_for_instance(instance) def _terminate_volume_connections(self, context, instance, bdms): connector = self.driver.get_volume_connector(instance) for bdm in bdms: if bdm.is_volume: self.volume_api.terminate_connection(context, bdm.volume_id, connector) @staticmethod def _set_instance_info(instance, instance_type): instance.instance_type_id = instance_type['id'] instance.memory_mb = instance_type['memory_mb'] instance.vcpus = instance_type['vcpus'] instance.root_gb = instance_type['root_gb'] instance.ephemeral_gb = instance_type['ephemeral_gb'] instance.set_flavor(instance_type) def _finish_resize(self, context, instance, migration, disk_info, image): resize_instance = False old_instance_type_id = migration['old_instance_type_id'] new_instance_type_id = migration['new_instance_type_id'] old_instance_type = instance.get_flavor() # NOTE(mriedem): Get the old_vm_state so we know if we should # power on the instance. If old_vm_state is not set we need to default # to ACTIVE for backwards compatibility old_vm_state = instance.system_metadata.get('old_vm_state', vm_states.ACTIVE) instance.set_flavor(old_instance_type, 'old') if old_instance_type_id != new_instance_type_id: instance_type = instance.get_flavor('new') self._set_instance_info(instance, instance_type) resize_instance = True # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, migration['dest_compute']) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_finish(context, instance, migration_p) network_info = self._get_instance_nw_info(context, instance) instance.task_state = task_states.RESIZE_FINISH instance.save(expected_task_state=task_states.RESIZE_MIGRATED) self._notify_about_instance_usage( context, instance, "finish_resize.start", network_info=network_info) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) # NOTE(mriedem): If the original vm_state was STOPPED, we don't # automatically power on the instance after it's migrated power_on = old_vm_state != vm_states.STOPPED try: self.driver.finish_migration(context, migration, instance, disk_info, network_info, image, resize_instance, block_device_info, power_on) except Exception: with excutils.save_and_reraise_exception(): if resize_instance: self._set_instance_info(instance, old_instance_type) migration.status = 'finished' with migration.obj_as_admin(): migration.save() instance.vm_state = vm_states.RESIZED instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_FINISH) self._notify_about_instance_usage( context, instance, "finish_resize.end", network_info=network_info) @wrap_exception() @reverts_task_state @wrap_instance_event @errors_out_migration @wrap_instance_fault def finish_resize(self, context, disk_info, image, instance, reservations, migration): """Completes the migration process. Sets up the newly transferred disk and turns on the instance at its new host machine. """ quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: self._finish_resize(context, instance, migration, disk_info, image) quotas.commit() except Exception: LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) with excutils.save_and_reraise_exception(): try: quotas.rollback() except Exception as qr_error: LOG.exception(_LE("Failed to rollback quota for failed " "finish_resize: %s"), qr_error, instance=instance) self._set_instance_error_state(context, instance) @object_compat @wrap_exception() @wrap_instance_fault def add_fixed_ip_to_instance(self, context, network_id, instance): """Calls network_api to add new fixed_ip to instance then injects the new network info and resets instance networking. """ self._notify_about_instance_usage( context, instance, "create_ip.start") network_info = self.network_api.add_fixed_ip_to_instance(context, instance, network_id) self._inject_network_info(context, instance, network_info) self.reset_network(context, instance) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "create_ip.end", network_info=network_info) @object_compat @wrap_exception() @wrap_instance_fault def remove_fixed_ip_from_instance(self, context, address, instance): """Calls network_api to remove existing fixed_ip from instance by injecting the altered network info and resetting instance networking. """ self._notify_about_instance_usage( context, instance, "delete_ip.start") network_info = self.network_api.remove_fixed_ip_from_instance(context, instance, address) self._inject_network_info(context, instance, network_info) self.reset_network(context, instance) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "delete_ip.end", network_info=network_info) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def pause_instance(self, context, instance): """Pause an instance on this host.""" context = context.elevated() LOG.audit(_('Pausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'pause.start') self.driver.pause(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.PAUSED instance.task_state = None instance.save(expected_task_state=task_states.PAUSING) self._notify_about_instance_usage(context, instance, 'pause.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unpause_instance(self, context, instance): """Unpause a paused instance on this host.""" context = context.elevated() LOG.audit(_('Unpausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'unpause.start') self.driver.unpause(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.UNPAUSING) self._notify_about_instance_usage(context, instance, 'unpause.end') @wrap_exception() def host_power_action(self, context, action): """Reboots, shuts down or powers up the host.""" return self.driver.host_power_action(action) @wrap_exception() def host_maintenance_mode(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ return self.driver.host_maintenance_mode(host, mode) @wrap_exception() def set_host_enabled(self, context, enabled): """Sets the specified host's ability to accept new instances.""" return self.driver.set_host_enabled(enabled) @wrap_exception() def get_host_uptime(self, context): """Returns the result of calling "uptime" on the target host.""" return self.driver.get_host_uptime() @object_compat @wrap_exception() @wrap_instance_fault def get_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(context, instance) if current_power_state == power_state.RUNNING: LOG.audit(_("Retrieving diagnostics"), context=context, instance=instance) return self.driver.get_diagnostics(instance) else: raise exception.InstanceInvalidState( attr='power_state', instance_uuid=instance.uuid, state=instance.power_state, method='get_diagnostics') @object_compat @wrap_exception() @wrap_instance_fault def get_instance_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(context, instance) if current_power_state == power_state.RUNNING: LOG.audit(_("Retrieving diagnostics"), context=context, instance=instance) diags = self.driver.get_instance_diagnostics(instance) return diags.serialize() else: raise exception.InstanceInvalidState( attr='power_state', instance_uuid=instance.uuid, state=instance.power_state, method='get_diagnostics') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def suspend_instance(self, context, instance): """Suspend the given instance.""" context = context.elevated() # Store the old state instance.system_metadata['old_vm_state'] = instance.vm_state self._notify_about_instance_usage(context, instance, 'suspend.start') with self._error_out_instance_on_exception(context, instance, instance_state=instance.vm_state): self.driver.suspend(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.SUSPENDED instance.task_state = None instance.save(expected_task_state=task_states.SUSPENDING) self._notify_about_instance_usage(context, instance, 'suspend.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def resume_instance(self, context, instance): """Resume the given suspended instance.""" context = context.elevated() LOG.audit(_('Resuming'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'resume.start') network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info( context, instance) with self._error_out_instance_on_exception(context, instance, instance_state=instance.vm_state): self.driver.resume(context, instance, network_info, block_device_info) instance.power_state = self._get_power_state(context, instance) # We default to the ACTIVE state for backwards compatibility instance.vm_state = instance.system_metadata.pop('old_vm_state', vm_states.ACTIVE) instance.task_state = None instance.save(expected_task_state=task_states.RESUMING) self._notify_about_instance_usage(context, instance, 'resume.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def shelve_instance(self, context, instance, image_id, clean_shutdown=True): """Shelve an instance. This should be used when you want to take a snapshot of the instance. It also adds system_metadata that can be used by a periodic task to offload the shelved instance after a period of time. :param context: request context :param instance: an Instance object :param image_id: an image id to snapshot to. :param clean_shutdown: give the GuestOS a chance to stop """ self.conductor_api.notify_usage_exists( context, instance, current_period=True) self._notify_about_instance_usage(context, instance, 'shelve.start') def update_task_state(task_state, expected_state=task_states.SHELVING): shelving_state_map = { task_states.IMAGE_PENDING_UPLOAD: task_states.SHELVING_IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING: task_states.SHELVING_IMAGE_UPLOADING, task_states.SHELVING: task_states.SHELVING} task_state = shelving_state_map[task_state] expected_state = shelving_state_map[expected_state] instance.task_state = task_state instance.save(expected_task_state=expected_state) self._power_off_instance(context, instance, clean_shutdown) self.driver.snapshot(context, instance, image_id, update_task_state) instance.system_metadata['shelved_at'] = timeutils.strtime() instance.system_metadata['shelved_image_id'] = image_id instance.system_metadata['shelved_host'] = self.host instance.vm_state = vm_states.SHELVED instance.task_state = None if CONF.shelved_offload_time == 0: instance.task_state = task_states.SHELVING_OFFLOADING instance.power_state = self._get_power_state(context, instance) instance.save(expected_task_state=[ task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING]) self._notify_about_instance_usage(context, instance, 'shelve.end') if CONF.shelved_offload_time == 0: self.shelve_offload_instance(context, instance, clean_shutdown=False) @wrap_exception() @reverts_task_state @wrap_instance_fault def shelve_offload_instance(self, context, instance, clean_shutdown=True): """Remove a shelved instance from the hypervisor. This frees up those resources for use by other instances, but may lead to slower unshelve times for this instance. This method is used by volume backed instances since restoring them doesn't involve the potentially large download of an image. :param context: request context :param instance: nova.objects.instance.Instance :param clean_shutdown: give the GuestOS a chance to stop """ self._notify_about_instance_usage(context, instance, 'shelve_offload.start') self._power_off_instance(context, instance, clean_shutdown) current_power_state = self._get_power_state(context, instance) self.network_api.cleanup_instance_network_on_host(context, instance, instance.host) network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.destroy(context, instance, network_info, block_device_info) instance.power_state = current_power_state instance.host = None instance.node = None instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = None instance.save(expected_task_state=[task_states.SHELVING, task_states.SHELVING_OFFLOADING]) self._notify_about_instance_usage(context, instance, 'shelve_offload.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unshelve_instance(self, context, instance, image, filter_properties=None, node=None): """Unshelve the instance. :param context: request context :param instance: a nova.objects.instance.Instance object :param image: an image to build from. If None we assume a volume backed instance. :param filter_properties: dict containing limits, retry info etc. :param node: target compute node """ if filter_properties is None: filter_properties = {} @utils.synchronized(instance['uuid']) def do_unshelve_instance(): self._unshelve_instance(context, instance, image, filter_properties, node) do_unshelve_instance() def _unshelve_instance_key_scrub(self, instance): """Remove data from the instance that may cause side effects.""" cleaned_keys = dict( key_data=instance.key_data, auto_disk_config=instance.auto_disk_config) instance.key_data = None instance.auto_disk_config = False return cleaned_keys def _unshelve_instance_key_restore(self, instance, keys): """Restore previously scrubbed keys before saving the instance.""" instance.update(keys) def _unshelve_instance(self, context, instance, image, filter_properties, node): self._notify_about_instance_usage(context, instance, 'unshelve.start') instance.task_state = task_states.SPAWNING instance.save() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._prep_block_device(context, instance, bdms, do_check_attach=False) scrubbed_keys = self._unshelve_instance_key_scrub(instance) if node is None: node = self.driver.get_available_nodes()[0] LOG.debug('No node specified, defaulting to %s', node, instance=instance) rt = self._get_resource_tracker(node) limits = filter_properties.get('limits', {}) if image: shelved_image_ref = instance.image_ref instance.image_ref = image['id'] self.network_api.setup_instance_network_on_host(context, instance, self.host) network_info = self._get_instance_nw_info(context, instance) try: with rt.instance_claim(context, instance, limits): flavor = None if filter_properties is not None: flavor = filter_properties.get('instance_type') self.driver.spawn(context, instance, image, injected_files=[], admin_password=None, network_info=network_info, block_device_info=block_device_info, flavor=flavor) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Instance failed to spawn'), instance=instance) if image: instance.image_ref = shelved_image_ref self.image_api.delete(context, image['id']) self._unshelve_instance_key_restore(instance, scrubbed_keys) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.SPAWNING) self._notify_about_instance_usage(context, instance, 'unshelve.end') @messaging.expected_exceptions(NotImplementedError) @wrap_instance_fault def reset_network(self, context, instance): """Reset networking on the given instance.""" LOG.debug('Reset network', context=context, instance=instance) self.driver.reset_network(instance) def _inject_network_info(self, context, instance, network_info): """Inject network info for the given instance.""" LOG.debug('Inject network info', context=context, instance=instance) LOG.debug('network_info to inject: |%s|', network_info, instance=instance) self.driver.inject_network_info(instance, network_info) @wrap_instance_fault def inject_network_info(self, context, instance): """Inject network info, but don't return the info.""" network_info = self._get_instance_nw_info(context, instance) self._inject_network_info(context, instance, network_info) @object_compat @messaging.expected_exceptions(NotImplementedError, exception.InstanceNotFound) @wrap_exception() @wrap_instance_fault def get_console_output(self, context, instance, tail_length): """Send the console output for the given instance.""" context = context.elevated() LOG.audit(_("Get console output"), context=context, instance=instance) output = self.driver.get_console_output(context, instance) if tail_length is not None: output = self._tail_log(output, tail_length) return output.decode('utf-8', 'replace').encode('ascii', 'replace') def _tail_log(self, log, length): try: length = int(length) except ValueError: length = 0 if length == 0: return '' else: return '\n'.join(log.split('\n')[-int(length):]) @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @object_compat @wrap_exception() @wrap_instance_fault def get_vnc_console(self, context, console_type, instance): """Return connection information for a vnc console.""" context = context.elevated() LOG.debug("Getting vnc console", instance=instance) token = str(uuid.uuid4()) if not CONF.vnc_enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'novnc': # For essex, novncproxy_base_url must include the full path # including the html file (like http://myhost/vnc_auto.html) access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token) elif console_type == 'xvpvnc': access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_vnc_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable) @wrap_exception() @wrap_instance_fault def get_spice_console(self, context, console_type, instance): """Return connection information for a spice console.""" context = context.elevated() LOG.debug("Getting spice console", instance=instance) token = str(uuid.uuid4()) if not CONF.spice.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'spice-html5': # For essex, spicehtml5proxy_base_url must include the full path # including the html file (like http://myhost/spice_auto.html) access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_spice_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_rdp_console(self, context, console_type, instance): """Return connection information for a RDP console.""" context = context.elevated() LOG.debug("Getting RDP console", instance=instance) token = str(uuid.uuid4()) if not CONF.rdp.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'rdp-html5': access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_rdp_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @messaging.expected_exceptions( exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, exception.SocketPortRangeExhaustedException, exception.ImageSerialPortNumberInvalid, exception.ImageSerialPortNumberExceedFlavorValue, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_serial_console(self, context, console_type, instance): """Returns connection information for a serial console.""" LOG.debug("Getting serial console", instance=instance) if not CONF.serial_console.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) context = context.elevated() token = str(uuid.uuid4()) access_url = '%s?token=%s' % (CONF.serial_console.base_url, token) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_serial_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance.vm_state != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound) @object_compat @wrap_exception() @wrap_instance_fault def validate_console_port(self, ctxt, instance, port, console_type): if console_type == "spice-html5": console_info = self.driver.get_spice_console(ctxt, instance) elif console_type == "rdp-html5": console_info = self.driver.get_rdp_console(ctxt, instance) elif console_type == "serial": console_info = self.driver.get_serial_console(ctxt, instance) else: console_info = self.driver.get_vnc_console(ctxt, instance) return console_info.port == port @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def reserve_block_device_name(self, context, instance, device, volume_id, disk_bus=None, device_type=None, return_bdm_object=False): # NOTE(ndipanov): disk_bus and device_type will be set to None if not # passed (by older clients) and defaulted by the virt driver. Remove # default values on the next major RPC version bump. @utils.synchronized(instance['uuid']) def do_reserve(): bdms = ( objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid)) device_name = compute_utils.get_device_name_for_instance( context, instance, bdms, device) # NOTE(vish): create bdm here to avoid race condition bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', instance_uuid=instance.uuid, volume_id=volume_id or 'reserved', device_name=device_name, disk_bus=disk_bus, device_type=device_type) bdm.create() if return_bdm_object: return bdm else: return device_name return do_reserve() @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def attach_volume(self, context, volume_id, mountpoint, instance, bdm=None): """Attach a volume to an instance.""" if not bdm: bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm) @utils.synchronized(instance.uuid) def do_attach_volume(context, instance, driver_bdm): try: return self._attach_volume(context, instance, driver_bdm) except Exception: with excutils.save_and_reraise_exception(): bdm.destroy() do_attach_volume(context, instance, driver_bdm) def _attach_volume(self, context, instance, bdm): context = context.elevated() LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) try: bdm.attach(context, instance, self.volume_api, self.driver, do_check_attach=False, do_driver_attach=True) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to attach %(volume_id)s " "at %(mountpoint)s"), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) self.volume_api.unreserve_volume(context, bdm.volume_id) info = {'volume_id': bdm.volume_id} self._notify_about_instance_usage( context, instance, "volume.attach", extra_usage_info=info) def _detach_volume(self, context, instance, bdm): """Do the actual driver detach using block device mapping.""" mp = bdm.device_name volume_id = bdm.volume_id LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) connection_info = jsonutils.loads(bdm.connection_info) # NOTE(vish): We currently don't use the serial when disconnecting, # but added for completeness in case we ever do. if connection_info and 'serial' not in connection_info: connection_info['serial'] = volume_id try: if not self.driver.instance_exists(instance): LOG.warning(_LW('Detaching volume from unknown instance'), context=context, instance=instance) encryption = encryptors.get_encryption_metadata( context, self.volume_api, volume_id, connection_info) self.driver.detach_volume(connection_info, instance, mp, encryption=encryption) except exception.DiskNotFound as err: LOG.warning(_LW('Ignoring DiskNotFound exception while detaching ' 'volume %(volume_id)s from %(mp)s: %(err)s'), {'volume_id': volume_id, 'mp': mp, 'err': err}, instance=instance) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to detach volume %(volume_id)s ' 'from %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) self.volume_api.roll_detaching(context, volume_id) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def detach_volume(self, context, volume_id, instance): """Detach a volume from an instance.""" bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) if CONF.volume_usage_poll_interval > 0: vol_stats = [] mp = bdm.device_name # Handle bootable volumes which will not contain /dev/ if '/dev/' in mp: mp = mp[5:] try: vol_stats = self.driver.block_stats(instance, mp) except NotImplementedError: pass if vol_stats: LOG.debug("Updating volume usage cache with totals", instance=instance) rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats self.conductor_api.vol_usage_update(context, volume_id, rd_req, rd_bytes, wr_req, wr_bytes, instance, update_totals=True) self._detach_volume(context, instance, bdm) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, volume_id, connector) bdm.destroy() info = dict(volume_id=volume_id) self._notify_about_instance_usage( context, instance, "volume.detach", extra_usage_info=info) self.volume_api.detach(context.elevated(), volume_id) def _init_volume_connection(self, context, new_volume_id, old_volume_id, connector, instance, bdm): new_cinfo = self.volume_api.initialize_connection(context, new_volume_id, connector) old_cinfo = jsonutils.loads(bdm['connection_info']) if old_cinfo and 'serial' not in old_cinfo: old_cinfo['serial'] = old_volume_id new_cinfo['serial'] = old_cinfo['serial'] return (old_cinfo, new_cinfo) def _swap_volume(self, context, instance, bdm, connector, old_volume_id, new_volume_id): mountpoint = bdm['device_name'] failed = False new_cinfo = None resize_to = 0 try: old_cinfo, new_cinfo = self._init_volume_connection(context, new_volume_id, old_volume_id, connector, instance, bdm) old_vol_size = self.volume_api.get(context, old_volume_id)['size'] new_vol_size = self.volume_api.get(context, new_volume_id)['size'] if new_vol_size > old_vol_size: resize_to = new_vol_size self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint, resize_to) except Exception: failed = True with excutils.save_and_reraise_exception(): if new_cinfo: msg = _LE("Failed to swap volume %(old_volume_id)s " "for %(new_volume_id)s") LOG.exception(msg, {'old_volume_id': old_volume_id, 'new_volume_id': new_volume_id}, context=context, instance=instance) else: msg = _LE("Failed to connect to volume %(volume_id)s " "with volume at %(mountpoint)s") LOG.exception(msg, {'volume_id': new_volume_id, 'mountpoint': bdm['device_name']}, context=context, instance=instance) self.volume_api.roll_detaching(context, old_volume_id) self.volume_api.unreserve_volume(context, new_volume_id) finally: conn_volume = new_volume_id if failed else old_volume_id if new_cinfo: self.volume_api.terminate_connection(context, conn_volume, connector) # If Cinder initiated the swap, it will keep # the original ID comp_ret = self.volume_api.migrate_volume_completion( context, old_volume_id, new_volume_id, error=failed) return (comp_ret, new_cinfo) @wrap_exception() @reverts_task_state @wrap_instance_fault def swap_volume(self, context, old_volume_id, new_volume_id, instance): """Swap volume for an instance.""" context = context.elevated() bdm = objects.BlockDeviceMapping.get_by_volume_id( context, old_volume_id, instance_uuid=instance.uuid) connector = self.driver.get_volume_connector(instance) comp_ret, new_cinfo = self._swap_volume(context, instance, bdm, connector, old_volume_id, new_volume_id) save_volume_id = comp_ret['save_volume_id'] # Update bdm values = { 'connection_info': jsonutils.dumps(new_cinfo), 'delete_on_termination': False, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': save_volume_id, 'volume_size': None, 'no_device': None} bdm.update(values) bdm.save() @wrap_exception() def remove_volume_connection(self, context, volume_id, instance): """Remove a volume connection using the volume api.""" # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. # NOTE(PhilDay): Can't use object_compat decorator here as # instance is not the second parameter if isinstance(instance, dict): metas = ['metadata', 'system_metadata'] instance = objects.Instance._from_db_object( context, objects.Instance(), instance, expected_attrs=metas) instance._context = context try: bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) self._detach_volume(context, instance, bdm) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, volume_id, connector) except exception.NotFound: pass @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def attach_interface(self, context, instance, network_id, port_id, requested_ip): """Use hotplug to add an network adapter to an instance.""" network_info = self.network_api.allocate_port_for_instance( context, instance, port_id, network_id, requested_ip) if len(network_info) != 1: LOG.error(_LE('allocate_port_for_instance returned %(ports)s ' 'ports'), dict(ports=len(network_info))) raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) image_ref = instance.get('image_ref') image_meta = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) self.driver.attach_interface(instance, image_meta, network_info[0]) return network_info[0] @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def detach_interface(self, context, instance, port_id): """Detach an network adapter from an instance.""" network_info = instance.info_cache.network_info condemned = None for vif in network_info: if vif['id'] == port_id: condemned = vif break if condemned is None: raise exception.PortNotFound(_("Port %s is not " "attached") % port_id) self.network_api.deallocate_port_for_instance(context, instance, port_id) self.driver.detach_interface(instance, condemned) def _get_compute_info(self, context, host): return objects.ComputeNode.get_first_node_by_host_for_old_compat( context, host) @wrap_exception() def check_instance_shared_storage(self, ctxt, instance, data): """Check if the instance files are shared :param ctxt: security context :param instance: dict of instance data :param data: result of driver.check_instance_shared_storage_local Returns True if instance disks located on shared storage and False otherwise. """ return self.driver.check_instance_shared_storage_remote(ctxt, data) @wrap_exception() @wrap_instance_fault def check_can_live_migrate_destination(self, ctxt, instance, block_migration, disk_over_commit): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a dict containing migration info """ src_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, instance.host)) dst_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, CONF.host)) dest_check_data = self.driver.check_can_live_migrate_destination(ctxt, instance, src_compute_info, dst_compute_info, block_migration, disk_over_commit) migrate_data = {} try: migrate_data = self.compute_rpcapi.\ check_can_live_migrate_source(ctxt, instance, dest_check_data) finally: self.driver.check_can_live_migrate_destination_cleanup(ctxt, dest_check_data) if 'migrate_data' in dest_check_data: migrate_data.update(dest_check_data['migrate_data']) return migrate_data @wrap_exception() @wrap_instance_fault def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param ctxt: security context :param instance: dict of instance data :param dest_check_data: result of check_can_live_migrate_destination :returns: a dict containing migration info """ is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt, instance) dest_check_data['is_volume_backed'] = is_volume_backed block_device_info = self._get_instance_block_device_info( ctxt, instance, refresh_conn_info=True) return self.driver.check_can_live_migrate_source(ctxt, instance, dest_check_data, block_device_info) @object_compat @wrap_exception() @wrap_instance_fault def pre_live_migration(self, context, instance, block_migration, disk, migrate_data): """Preparations for live migration at dest host. :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration :param migrate_data: if not None, it is a dict which holds data required for live migration without shared storage. """ block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.pre.start", network_info=network_info) pre_live_migration_data = self.driver.pre_live_migration(context, instance, block_device_info, network_info, disk, migrate_data) # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, self.host) # Creating filters to hypervisors and firewalls. # An example is that nova-instance-instance-xxx, # which is written to libvirt.xml(Check "virsh nwfilter-list") # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. self.driver.ensure_filtering_rules_for_instance(instance, network_info) self._notify_about_instance_usage( context, instance, "live_migration.pre.end", network_info=network_info) return pre_live_migration_data @wrap_exception() @wrap_instance_fault def live_migration(self, context, dest, instance, block_migration, migrate_data): """Executing live migration. :param context: security context :param instance: a nova.objects.instance.Instance object :param dest: destination host :param block_migration: if true, prepare for block migration :param migrate_data: implementation specific params """ # NOTE(danms): since instance is not the first parameter, we can't # use @object_compat on this method. Since this is the only example, # we do this manually instead of complicating the decorator if not isinstance(instance, obj_base.NovaObject): expected = ['metadata', 'system_metadata', 'security_groups', 'info_cache'] instance = objects.Instance._from_db_object( context, objects.Instance(), instance, expected_attrs=expected) # Create a local copy since we'll be modifying the dictionary migrate_data = dict(migrate_data or {}) try: if block_migration: block_device_info = self._get_instance_block_device_info( context, instance) disk = self.driver.get_instance_disk_info( instance, block_device_info=block_device_info) else: disk = None pre_migration_data = self.compute_rpcapi.pre_live_migration( context, instance, block_migration, disk, dest, migrate_data) migrate_data['pre_live_migration_result'] = pre_migration_data except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Pre live migration failed at %s'), dest, instance=instance) self._rollback_live_migration(context, instance, dest, block_migration, migrate_data) # Executing live migration # live_migration might raises exceptions, but # nothing must be recovered in this version. self.driver.live_migration(context, instance, dest, self._post_live_migration, self._rollback_live_migration, block_migration, migrate_data) def _live_migration_cleanup_flags(self, block_migration, migrate_data): """Determine whether disks or intance path need to be cleaned up after live migration (at source on success, at destination on rollback) Block migration needs empty image at destination host before migration starts, so if any failure occurs, any empty images has to be deleted. Also Volume backed live migration w/o shared storage needs to delete newly created instance-xxx dir on the destination as a part of its rollback process :param block_migration: if true, it was a block migration :param migrate_data: implementation specific data :returns: (bool, bool) -- do_cleanup, destroy_disks """ # NOTE(angdraug): block migration wouldn't have been allowed if either # block storage or instance path were shared is_shared_block_storage = not block_migration is_shared_instance_path = not block_migration if migrate_data: is_shared_block_storage = migrate_data.get( 'is_shared_block_storage', is_shared_block_storage) is_shared_instance_path = migrate_data.get( 'is_shared_instance_path', is_shared_instance_path) # No instance booting at source host, but instance dir # must be deleted for preparing next block migration # must be deleted for preparing next live migration w/o shared storage do_cleanup = block_migration or not is_shared_instance_path destroy_disks = not is_shared_block_storage return (do_cleanup, destroy_disks) @wrap_exception() @wrap_instance_fault def _post_live_migration(self, ctxt, instance, dest, block_migration=False, migrate_data=None): """Post operations for live migration. This method is called from live_migration and mainly updating database record. :param ctxt: security context :param instance: instance dict :param dest: destination host :param block_migration: if true, prepare for block migration :param migrate_data: if not None, it is a dict which has data required for live migration without shared storage """ LOG.info(_LI('_post_live_migration() is started..'), instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance['uuid']) # Cleanup source host post live-migration block_device_info = self._get_instance_block_device_info( ctxt, instance, bdms=bdms) self.driver.post_live_migration(ctxt, instance, block_device_info, migrate_data) # Detaching volumes. connector = self.driver.get_volume_connector(instance) for bdm in bdms: # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. # remove the volume connection without detaching from hypervisor # because the instance is not running anymore on the current host if bdm.is_volume: self.volume_api.terminate_connection(ctxt, bdm.volume_id, connector) # Releasing vlan. # (not necessary in current implementation?) network_info = self._get_instance_nw_info(ctxt, instance) self._notify_about_instance_usage(ctxt, instance, "live_migration._post.start", network_info=network_info) # Releasing security group ingress rule. self.driver.unfilter_instance(instance, network_info) migration = {'source_compute': self.host, 'dest_compute': dest, } self.network_api.migrate_instance_start(ctxt, instance, migration) destroy_vifs = False try: self.driver.post_live_migration_at_source(ctxt, instance, network_info) except NotImplementedError as ex: LOG.debug(ex, instance=instance) # For all hypervisors other than libvirt, there is a possibility # they are unplugging networks from source node in the cleanup # method destroy_vifs = True # Define domain at destination host, without doing it, # pause/suspend/terminate do not work. self.compute_rpcapi.post_live_migration_at_destination(ctxt, instance, block_migration, dest) do_cleanup, destroy_disks = self._live_migration_cleanup_flags( block_migration, migrate_data) if do_cleanup: self.driver.cleanup(ctxt, instance, network_info, destroy_disks=destroy_disks, migrate_data=migrate_data, destroy_vifs=destroy_vifs) # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(ctxt, instance, self.host, teardown=True) self.instance_events.clear_events_for_instance(instance) # NOTE(timello): make sure we update available resources on source # host even before next periodic task. self.update_available_resource(ctxt) self._notify_about_instance_usage(ctxt, instance, "live_migration._post.end", network_info=network_info) LOG.info(_LI('Migrating instance to %s finished successfully.'), dest, instance=instance) LOG.info(_LI("You may see the error \"libvirt: QEMU error: " "Domain not found: no domain with matching name.\" " "This error can be safely ignored."), instance=instance) if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled: if CONF.cells.enable: self.cells_rpcapi.consoleauth_delete_tokens(ctxt, instance['uuid']) else: self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt, instance['uuid']) @object_compat @wrap_exception() @wrap_instance_fault def post_live_migration_at_destination(self, context, instance, block_migration): """Post operations for live migration . :param context: security context :param instance: Instance dict :param block_migration: if true, prepare for block migration """ LOG.info(_LI('Post operation of migration started'), instance=instance) # NOTE(tr3buchet): setup networks on destination host # this is called a second time because # multi_host does not create the bridge in # plug_vifs self.network_api.setup_networks_on_host(context, instance, self.host) migration = {'source_compute': instance['host'], 'dest_compute': self.host, } self.network_api.migrate_instance_finish(context, instance, migration) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.start", network_info=network_info) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.post_live_migration_at_destination(context, instance, network_info, block_migration, block_device_info) # Restore instance state current_power_state = self._get_power_state(context, instance) node_name = None try: compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.ComputeHostNotFound: LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host instance.power_state = current_power_state instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.node = node_name instance.save(expected_task_state=task_states.MIGRATING) # NOTE(vish): this is necessary to update dhcp self.network_api.setup_networks_on_host(context, instance, self.host) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.end", network_info=network_info) @wrap_exception() @wrap_instance_fault def _rollback_live_migration(self, context, instance, dest, block_migration, migrate_data=None): """Recovers Instance/volume state from migrating -> running. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param dest: This method is called from live migration src host. This param specifies destination host. :param block_migration: if true, prepare for block migration :param migrate_data: if not none, contains implementation specific data. """ instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=[task_states.MIGRATING]) # NOTE(tr3buchet): setup networks on source host (really it's re-setup) self.network_api.setup_networks_on_host(context, instance, self.host) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance['uuid']) for bdm in bdms: if bdm.is_volume: self.compute_rpcapi.remove_volume_connection( context, instance, bdm.volume_id, dest) self._notify_about_instance_usage(context, instance, "live_migration._rollback.start") do_cleanup, destroy_disks = self._live_migration_cleanup_flags( block_migration, migrate_data) if do_cleanup: self.compute_rpcapi.rollback_live_migration_at_destination( context, instance, dest, destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage(context, instance, "live_migration._rollback.end") @object_compat @wrap_exception() @wrap_instance_fault def rollback_live_migration_at_destination(self, context, instance, destroy_disks=True, migrate_data=None): """Cleaning up image directory that is created pre_live_migration. :param context: security context :param instance: a nova.objects.instance.Instance object sent over rpc """ network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.start", network_info=network_info) # NOTE(tr3buchet): tear down networks on destination host self.network_api.setup_networks_on_host(context, instance, self.host, teardown=True) # NOTE(vish): The mapping is passed in so the driver can disconnect # from remote volumes if necessary block_device_info = self._get_instance_block_device_info(context, instance) self.driver.rollback_live_migration_at_destination( context, instance, network_info, block_device_info, destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.end", network_info=network_info) @periodic_task.periodic_task( spacing=CONF.heal_instance_info_cache_interval) def _heal_instance_info_cache(self, context): """Called periodically. On every call, try to update the info_cache's network information for another instance by calling to the network manager. This is implemented by keeping a cache of uuids of instances that live on this host. On each call, we pop one off of a list, pull the DB record, and try the call to the network API. If anything errors don't fail, as it's possible the instance has been deleted, etc. """ heal_interval = CONF.heal_instance_info_cache_interval if not heal_interval: return instance_uuids = getattr(self, '_instance_uuids_to_heal', []) instance = None LOG.debug('Starting heal instance info cache') if not instance_uuids: # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=[], use_slave=True) for inst in db_instances: # We don't want to refresh the cache for instances # which are building or deleting so don't put them # in the list. If they are building they will get # added to the list next time we build it. if (inst.vm_state == vm_states.BUILDING): LOG.debug('Skipping network cache update for instance ' 'because it is Building.', instance=inst) continue if (inst.task_state == task_states.DELETING): LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) continue if not instance: # Save the first one we find so we don't # have to get it again instance = inst else: instance_uuids.append(inst['uuid']) self._instance_uuids_to_heal = instance_uuids else: # Find the next valid instance on the list while instance_uuids: try: inst = objects.Instance.get_by_uuid( context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache'], use_slave=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue # Check the instance hasn't been migrated if inst.host != self.host: LOG.debug('Skipping network cache update for instance ' 'because it has been migrated to another ' 'host.', instance=inst) # Check the instance isn't being deleting elif inst.task_state == task_states.DELETING: LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) else: instance = inst break if instance: # We have an instance now to refresh try: # Call to network API to get instance info.. this will # force an update to the instance's info_cache self._get_instance_nw_info(context, instance, use_slave=True) LOG.debug('Updated the network info_cache for instance', instance=instance) except exception.InstanceNotFound: # Instance is gone. LOG.debug('Instance no longer exists. Unable to refresh', instance=instance) return except Exception: LOG.error(_LE('An error occurred while refreshing the network ' 'cache.'), instance=instance, exc_info=True) else: LOG.debug("Didn't find any instances for network info cache " "update.") @periodic_task.periodic_task def _poll_rebooting_instances(self, context): if CONF.reboot_timeout > 0: filters = {'task_state': [task_states.REBOOTING, task_states.REBOOT_STARTED, task_states.REBOOT_PENDING], 'host': self.host} rebooting = objects.InstanceList.get_by_filters( context, filters, expected_attrs=[], use_slave=True) to_poll = [] for instance in rebooting: if timeutils.is_older_than(instance['updated_at'], CONF.reboot_timeout): to_poll.append(instance) self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll) @periodic_task.periodic_task def _poll_rescued_instances(self, context): if CONF.rescue_timeout > 0: filters = {'vm_state': vm_states.RESCUED, 'host': self.host} rescued_instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=["system_metadata"], use_slave=True) to_unrescue = [] for instance in rescued_instances: if timeutils.is_older_than(instance['launched_at'], CONF.rescue_timeout): to_unrescue.append(instance) for instance in to_unrescue: self.compute_api.unrescue(context, instance) @periodic_task.periodic_task def _poll_unconfirmed_resizes(self, context): if CONF.resize_confirm_window == 0: return migrations = objects.MigrationList.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, use_slave=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) if migrations_info["migration_count"] > 0: LOG.info(_LI("Found %(migration_count)d unconfirmed migrations " "older than %(confirm_window)d seconds"), migrations_info) def _set_migration_to_error(migration, reason, **kwargs): LOG.warning(_LW("Setting migration %(migration_id)s to error: " "%(reason)s"), {'migration_id': migration['id'], 'reason': reason}, **kwargs) migration.status = 'error' with migration.obj_as_admin(): migration.save() for migration in migrations: instance_uuid = migration.instance_uuid LOG.info(_LI("Automatically confirming migration " "%(migration_id)s for instance %(instance_uuid)s"), {'migration_id': migration.id, 'instance_uuid': instance_uuid}) expected_attrs = ['metadata', 'system_metadata'] try: instance = objects.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, use_slave=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) _set_migration_to_error(migration, reason) continue if instance.vm_state == vm_states.ERROR: reason = _("In ERROR state") _set_migration_to_error(migration, reason, instance=instance) continue # race condition: The instance in DELETING state should not be # set the migration state to error, otherwise the instance in # to be deleted which is in RESIZED state # will not be able to confirm resize if instance.task_state in [task_states.DELETING, task_states.SOFT_DELETING]: msg = ("Instance being deleted or soft deleted during resize " "confirmation. Skipping.") LOG.debug(msg, instance=instance) continue # race condition: This condition is hit when this method is # called between the save of the migration record with a status of # finished and the save of the instance object with a state of # RESIZED. The migration record should not be set to error. if instance.task_state == task_states.RESIZE_FINISH: msg = ("Instance still resizing during resize " "confirmation. Skipping.") LOG.debug(msg, instance=instance) continue vm_state = instance.vm_state task_state = instance.task_state if vm_state != vm_states.RESIZED or task_state is not None: reason = (_("In states %(vm_state)s/%(task_state)s, not " "RESIZED/None") % {'vm_state': vm_state, 'task_state': task_state}) _set_migration_to_error(migration, reason, instance=instance) continue try: self.compute_api.confirm_resize(context, instance, migration=migration) except Exception as e: LOG.info(_LI("Error auto-confirming resize: %s. " "Will retry later."), e, instance=instance) @periodic_task.periodic_task(spacing=CONF.shelved_poll_interval) def _poll_shelved_instances(self, context): filters = {'vm_state': vm_states.SHELVED, 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], use_slave=True) to_gc = [] for instance in shelved_instances: sys_meta = instance.system_metadata shelved_at = timeutils.parse_strtime(sys_meta['shelved_at']) if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time): to_gc.append(instance) for instance in to_gc: try: instance.task_state = task_states.SHELVING_OFFLOADING instance.save() self.shelve_offload_instance(context, instance, clean_shutdown=False) except Exception: LOG.exception(_LE('Periodic task failed to offload instance.'), instance=instance) @periodic_task.periodic_task def _instance_usage_audit(self, context): if not CONF.instance_usage_audit: return if compute_utils.has_audit_been_run(context, self.conductor_api, self.host): return begin, end = utils.last_completed_audit_period() instances = objects.InstanceList.get_active_by_window_joined( context, begin, end, host=self.host, expected_attrs=['system_metadata', 'info_cache', 'metadata'], use_slave=True) num_instances = len(instances) errors = 0 successes = 0 LOG.info(_LI("Running instance usage audit for" " host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s" " instances."), dict(host=self.host, begin_time=begin, end_time=end, number_instances=num_instances)) start_time = time.time() compute_utils.start_instance_usage_audit(context, self.conductor_api, begin, end, self.host, num_instances) for instance in instances: try: self.conductor_api.notify_usage_exists( context, instance, ignore_missing_network_data=False) successes += 1 except Exception: LOG.exception(_LE('Failed to generate usage ' 'audit for instance ' 'on host %s'), self.host, instance=instance) errors += 1 compute_utils.finish_instance_usage_audit(context, self.conductor_api, begin, end, self.host, errors, "Instance usage audit ran " "for host %s, %s instances " "in %s seconds." % ( self.host, num_instances, time.time() - start_time)) @periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval) def _poll_bandwidth_usage(self, context): if not self._bw_usage_supported: return prev_time, start_time = utils.last_completed_audit_period() curr_time = time.time() if (curr_time - self._last_bw_usage_poll > CONF.bandwidth_poll_interval): self._last_bw_usage_poll = curr_time LOG.info(_LI("Updating bandwidth usage cache")) cells_update_interval = CONF.cells.bandwidth_update_interval if (cells_update_interval > 0 and curr_time - self._last_bw_usage_cell_update > cells_update_interval): self._last_bw_usage_cell_update = curr_time update_cells = True else: update_cells = False instances = objects.InstanceList.get_by_host(context, self.host, use_slave=True) try: bw_counters = self.driver.get_all_bw_counters(instances) except NotImplementedError: # NOTE(mdragon): Not all hypervisors have bandwidth polling # implemented yet. If they don't it doesn't break anything, # they just don't get the info in the usage events. # NOTE(PhilDay): Record that its not supported so we can # skip fast on future calls rather than waste effort getting # the list of instances. LOG.warning(_LW("Bandwidth usage not supported by " "hypervisor.")) self._bw_usage_supported = False return refreshed = timeutils.utcnow() for bw_ctr in bw_counters: # Allow switching of greenthreads between queries. greenthread.sleep(0) bw_in = 0 bw_out = 0 last_ctr_in = None last_ctr_out = None usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], start_period=start_time, use_slave=True) if usage: bw_in = usage.bw_in bw_out = usage.bw_out last_ctr_in = usage.last_ctr_in last_ctr_out = usage.last_ctr_out else: usage = (objects.BandwidthUsage. get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], start_period=prev_time, use_slave=True)) if usage: last_ctr_in = usage.last_ctr_in last_ctr_out = usage.last_ctr_out if last_ctr_in is not None: if bw_ctr['bw_in'] < last_ctr_in: # counter rollover bw_in += bw_ctr['bw_in'] else: bw_in += (bw_ctr['bw_in'] - last_ctr_in) if last_ctr_out is not None: if bw_ctr['bw_out'] < last_ctr_out: # counter rollover bw_out += bw_ctr['bw_out'] else: bw_out += (bw_ctr['bw_out'] - last_ctr_out) objects.BandwidthUsage(context=context).create( bw_ctr['uuid'], bw_ctr['mac_address'], bw_in, bw_out, bw_ctr['bw_in'], bw_ctr['bw_out'], start_period=start_time, last_refreshed=refreshed, update_cells=update_cells) def _get_host_volume_bdms(self, context, use_slave=False): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host, use_slave=use_slave) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid, use_slave=use_slave) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) return compute_host_bdms def _update_volume_usage_cache(self, context, vol_usages): """Updates the volume usage cache table with a list of stats.""" for usage in vol_usages: # Allow switching of greenthreads between queries. greenthread.sleep(0) self.conductor_api.vol_usage_update(context, usage['volume'], usage['rd_req'], usage['rd_bytes'], usage['wr_req'], usage['wr_bytes'], usage['instance']) @periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval) def _poll_volume_usage(self, context, start_time=None): if CONF.volume_usage_poll_interval == 0: return if not start_time: start_time = utils.last_completed_audit_period()[1] compute_host_bdms = self._get_host_volume_bdms(context, use_slave=True) if not compute_host_bdms: return LOG.debug("Updating volume usage cache") try: vol_usages = self.driver.get_all_volume_usage(context, compute_host_bdms) except NotImplementedError: return self._update_volume_usage_cache(context, vol_usages) @periodic_task.periodic_task(spacing=CONF.sync_power_state_interval, run_immediately=True) def _sync_power_states(self, context): """Align power states between the database and the hypervisor. To sync power state data we make a DB call to get the number of virtual machines known by the hypervisor and if the number matches the number of virtual machines known by the database, we proceed in a lazy loop, one database record at a time, checking if the hypervisor has the same power state as is in the database. """ db_instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], use_slave=True) num_vm_instances = self.driver.get_num_instances() num_db_instances = len(db_instances) if num_vm_instances != num_db_instances: LOG.warning(_LW("While synchronizing instance power states, found " "%(num_db_instances)s instances in the database " "and %(num_vm_instances)s instances on the " "hypervisor."), {'num_db_instances': num_db_instances, 'num_vm_instances': num_vm_instances}) def _sync(db_instance): # NOTE(melwitt): This must be synchronized as we query state from # two separate sources, the driver and the database. # They are set (in stop_instance) and read, in sync. @utils.synchronized(db_instance.uuid) def query_driver_power_state_and_sync(): self._query_driver_power_state_and_sync(context, db_instance) try: query_driver_power_state_and_sync() except Exception: LOG.exception(_LE("Periodic sync_power_state task had an " "error while processing an instance."), instance=db_instance) self._syncs_in_progress.pop(db_instance.uuid) for db_instance in db_instances: # process syncs asynchronously - don't want instance locking to # block entire periodic task thread uuid = db_instance.uuid if uuid in self._syncs_in_progress: LOG.debug('Sync already in progress for %s' % uuid) else: LOG.debug('Triggering sync for uuid %s' % uuid) self._syncs_in_progress[uuid] = True self._sync_power_pool.spawn_n(_sync, db_instance) def _query_driver_power_state_and_sync(self, context, db_instance): if db_instance.task_state is not None: LOG.info(_LI("During sync_power_state the instance has a " "pending task (%(task)s). Skip."), {'task': db_instance.task_state}, instance=db_instance) return # No pending tasks. Now try to figure out the real vm_power_state. try: vm_instance = self.driver.get_info(db_instance) vm_power_state = vm_instance.state except exception.InstanceNotFound: vm_power_state = power_state.NOSTATE # Note(maoy): the above get_info call might take a long time, # for example, because of a broken libvirt driver. try: self._sync_instance_power_state(context, db_instance, vm_power_state, use_slave=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore. pass def _sync_instance_power_state(self, context, db_instance, vm_power_state, use_slave=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, then a stop() API will be called on the instance. """ # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. db_instance.refresh(use_slave=use_slave) db_power_state = db_instance.power_state vm_state = db_instance.vm_state if self.host != db_instance.host: # on the sending end of nova-compute _sync_power_state # may have yielded to the greenthread performing a live # migration; this in turn has changed the resident-host # for the VM; However, the instance is still active, it # is just in the process of migrating to another host. # This implies that the compute source must relinquish # control to the compute destination. LOG.info(_LI("During the sync_power process the " "instance has moved from " "host %(src)s to host %(dst)s"), {'src': db_instance.host, 'dst': self.host}, instance=db_instance) return elif db_instance.task_state is not None: # on the receiving end of nova-compute, it could happen # that the DB instance already report the new resident # but the actual VM has not showed up on the hypervisor # yet. In this case, let's allow the loop to continue # and run the state sync in a later round LOG.info(_LI("During sync_power_state the instance has a " "pending task (%(task)s). Skip."), {'task': db_instance.task_state}, instance=db_instance) return if vm_power_state != db_power_state: # power_state is always updated from hypervisor to db db_instance.power_state = vm_power_state db_instance.save() db_power_state = vm_power_state # Note(maoy): Now resolve the discrepancy between vm_state and # vm_power_state. We go through all possible vm_states. if vm_state in (vm_states.BUILDING, vm_states.RESCUED, vm_states.RESIZED, vm_states.SUSPENDED, vm_states.ERROR): # TODO(maoy): we ignore these vm_state for now. pass elif vm_state == vm_states.ACTIVE: # The only rational power state should be RUNNING if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warning(_LW("Instance shutdown by itself. Calling the " "stop API. Current vm_state: %(vm_state)s, " "current task_state: %(task_state)s, " "current DB power_state: %(db_power_state)s, " "current VM power_state: %(vm_power_state)s"), {'vm_state': vm_state, 'task_state': db_instance.task_state, 'db_power_state': db_power_state, 'vm_power_state': vm_power_state}, instance=db_instance) try: # Note(maoy): here we call the API instead of # brutally updating the vm_state in the database # to allow all the hooks and checks to be performed. if db_instance.shutdown_terminate: self.compute_api.delete(context, db_instance) else: self.compute_api.stop(context, db_instance) except Exception: # Note(maoy): there is no need to propagate the error # because the same power_state will be retrieved next # time and retried. # For example, there might be another task scheduled. LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.SUSPENDED: LOG.warning(_LW("Instance is suspended unexpectedly. Calling " "the stop API."), instance=db_instance) try: self.compute_api.stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.PAUSED: # Note(maoy): a VM may get into the paused state not only # because the user request via API calls, but also # due to (temporary) external instrumentations. # Before the virt layer can reliably report the reason, # we simply ignore the state discrepancy. In many cases, # the VM state will go back to running after the external # instrumentation is done. See bug 1097806 for details. LOG.warning(_LW("Instance is paused unexpectedly. Ignore."), instance=db_instance) elif vm_power_state == power_state.NOSTATE: # Occasionally, depending on the status of the hypervisor, # which could be restarting for example, an instance may # not be found. Therefore just log the condition. LOG.warning(_LW("Instance is unexpectedly not found. Ignore."), instance=db_instance) elif vm_state == vm_states.STOPPED: if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN, power_state.CRASHED): LOG.warning(_LW("Instance is not stopped. Calling " "the stop API. Current vm_state: %(vm_state)s," " current task_state: %(task_state)s, " "current DB power_state: %(db_power_state)s, " "current VM power_state: %(vm_power_state)s"), {'vm_state': vm_state, 'task_state': db_instance.task_state, 'db_power_state': db_power_state, 'vm_power_state': vm_power_state}, instance=db_instance) try: # NOTE(russellb) Force the stop, because normally the # compute API would not allow an attempt to stop a stopped # instance. self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_state == vm_states.PAUSED: if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warning(_LW("Paused instance shutdown by itself. Calling " "the stop API."), instance=db_instance) try: self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_state in (vm_states.SOFT_DELETED, vm_states.DELETED): if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN): # Note(maoy): this should be taken care of periodically in # _cleanup_running_deleted_instances(). LOG.warning(_LW("Instance is not (soft-)deleted."), instance=db_instance) @periodic_task.periodic_task def _reclaim_queued_deletes(self, context): """Reclaim instances that are queued for deletion.""" interval = CONF.reclaim_instance_interval if interval <= 0: LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...") return # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414. # The only case that the quota might be inconsistent is # the compute node died between set instance state to SOFT_DELETED # and quota commit to DB. When compute node starts again # it will have no idea the reservation is committed or not or even # expired, since it's a rare case, so marked as todo. quotas = objects.Quotas.from_reservations(context, None) filters = {'vm_state': vm_states.SOFT_DELETED, 'task_state': None, 'host': self.host} instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS, use_slave=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) LOG.info(_LI('Reclaiming deleted instance'), instance=instance) try: self._delete_instance(context, instance, bdms, quotas) except Exception as e: LOG.warning(_LW("Periodic reclaim failed to delete " "instance: %s"), e, instance=instance) @periodic_task.periodic_task def update_available_resource(self, context): """See driver.get_available_resource() Periodic process that keeps that the compute host's understanding of resource availability and usage in sync with the underlying hypervisor. :param context: security context """ new_resource_tracker_dict = {} nodenames = set(self.driver.get_available_nodes()) for nodename in nodenames: rt = self._get_resource_tracker(nodename) rt.update_available_resource(context) new_resource_tracker_dict[nodename] = rt # Delete orphan compute node not reported by driver but still in db compute_nodes_in_db = self._get_compute_nodes_in_db(context, use_slave=True) for cn in compute_nodes_in_db: if cn.hypervisor_hostname not in nodenames: LOG.audit(_("Deleting orphan compute node %s") % cn.id) cn.destroy() self._resource_tracker_dict = new_resource_tracker_dict def _get_compute_nodes_in_db(self, context, use_slave=False): try: return objects.ComputeNodeList.get_all_by_host(context, self.host, use_slave=use_slave) except exception.NotFound: LOG.error(_LE("No compute node record for host %s"), self.host) return [] @periodic_task.periodic_task( spacing=CONF.running_deleted_instance_poll_interval) def _cleanup_running_deleted_instances(self, context): """Cleanup any instances which are erroneously still running after having been deleted. Valid actions to take are: 1. noop - do nothing 2. log - log which instances are erroneously running 3. reap - shutdown and cleanup any erroneously running instances 4. shutdown - power off *and disable* any erroneously running instances The use-case for this cleanup task is: for various reasons, it may be possible for the database to show an instance as deleted but for that instance to still be running on a host machine (see bug https://bugs.launchpad.net/nova/+bug/911366). This cleanup task is a cross-hypervisor utility for finding these zombied instances and either logging the discrepancy (likely what you should do in production), or automatically reaping the instances (more appropriate for dev environments). """ action = CONF.running_deleted_instance_action if action == "noop": return # NOTE(sirp): admin contexts don't ordinarily return deleted records with utils.temporary_mutation(context, read_deleted="yes"): for instance in self._running_deleted_instances(context): if action == "log": LOG.warning(_LW("Detected instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) elif action == 'shutdown': LOG.info(_LI("Powering off instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) try: try: # disable starting the instance self.driver.set_bootable(instance, False) except NotImplementedError: LOG.warning(_LW("set_bootable is not implemented " "for the current driver")) # and power it off self.driver.power_off(instance) except Exception: msg = _("Failed to power off instance") LOG.warn(msg, instance=instance, exc_info=True) elif action == 'reap': LOG.info(_LI("Destroying instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid, use_slave=True) self.instance_events.clear_events_for_instance(instance) try: self._shutdown_instance(context, instance, bdms, notify=False) self._cleanup_volumes(context, instance['uuid'], bdms) except Exception as e: LOG.warning(_LW("Periodic cleanup failed to delete " "instance: %s"), e, instance=instance) else: raise Exception(_("Unrecognized value '%s'" " for CONF.running_deleted_" "instance_action") % action) def _running_deleted_instances(self, context): """Returns a list of instances nova thinks is deleted, but the hypervisor thinks is still running. """ timeout = CONF.running_deleted_instance_timeout filters = {'deleted': True, 'soft_deleted': False, 'host': self.host} instances = self._get_instances_on_driver(context, filters) return [i for i in instances if self._deleted_old_enough(i, timeout)] def _deleted_old_enough(self, instance, timeout): deleted_at = instance['deleted_at'] if isinstance(instance, obj_base.NovaObject) and deleted_at: deleted_at = deleted_at.replace(tzinfo=None) return (not deleted_at or timeutils.is_older_than(deleted_at, timeout)) @contextlib.contextmanager def _error_out_instance_on_exception(self, context, instance, quotas=None, instance_state=vm_states.ACTIVE): instance_uuid = instance['uuid'] try: yield except NotImplementedError as error: with excutils.save_and_reraise_exception(): if quotas: quotas.rollback() LOG.info(_LI("Setting instance back to %(state)s after: " "%(error)s"), {'state': instance_state, 'error': error}, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, vm_state=instance_state, task_state=None) except exception.InstanceFaultRollback as error: if quotas: quotas.rollback() LOG.info(_LI("Setting instance back to ACTIVE after: %s"), error, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, vm_state=vm_states.ACTIVE, task_state=None) raise error.inner_exception except Exception: LOG.exception(_LE('Setting instance vm_state to ERROR'), instance_uuid=instance_uuid) with excutils.save_and_reraise_exception(): if quotas: quotas.rollback() self._set_instance_error_state(context, instance) @aggregate_object_compat @wrap_exception() def add_aggregate_host(self, context, aggregate, host, slave_info): """Notify hypervisor of change (for hypervisor pools).""" try: self.driver.add_to_aggregate(context, aggregate, host, slave_info=slave_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'add_aggregate_host') except exception.AggregateError: with excutils.save_and_reraise_exception(): self.driver.undo_aggregate_operation( context, aggregate.delete_host, aggregate, host) @aggregate_object_compat @wrap_exception() def remove_aggregate_host(self, context, host, slave_info, aggregate): """Removes a host from a physical hypervisor pool.""" try: self.driver.remove_from_aggregate(context, aggregate, host, slave_info=slave_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'remove_aggregate_host') except (exception.AggregateError, exception.InvalidAggregateAction) as e: with excutils.save_and_reraise_exception(): self.driver.undo_aggregate_operation( context, aggregate.add_host, aggregate, host, isinstance(e, exception.AggregateError)) def _process_instance_event(self, instance, event): _event = self.instance_events.pop_instance_event(instance, event) if _event: LOG.debug('Processing event %(event)s', {'event': event.key}, instance=instance) _event.send(event) @wrap_exception() def external_instance_event(self, context, instances, events): # NOTE(danms): Some event types are handled by the manager, such # as when we're asked to update the instance's info_cache. If it's # not one of those, look for some thread(s) waiting for the event and # unblock them if so. for event in events: instance = [inst for inst in instances if inst.uuid == event.instance_uuid][0] LOG.debug('Received event %(event)s', {'event': event.key}, instance=instance) if event.name == 'network-changed': self.network_api.get_instance_nw_info(context, instance) else: self._process_instance_event(instance, event) @periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval, external_process_ok=True) def _run_image_cache_manager_pass(self, context): """Run a single pass of the image cache manager.""" if not self.driver.capabilities["has_imagecache"]: return # Determine what other nodes use this storage storage_users.register_storage_use(CONF.instances_path, CONF.host) nodes = storage_users.get_storage_users(CONF.instances_path) # Filter all_instances to only include those nodes which share this # storage path. # TODO(mikal): this should be further refactored so that the cache # cleanup code doesn't know what those instances are, just a remote # count, and then this logic should be pushed up the stack. filters = {'deleted': False, 'soft_deleted': True, 'host': nodes} filtered_instances = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) self.driver.manage_image_cache(context, filtered_instances) @periodic_task.periodic_task(spacing=CONF.instance_delete_interval) def _run_pending_deletes(self, context): """Retry any pending instance file deletes.""" LOG.debug('Cleaning up deleted instances') filters = {'deleted': True, 'soft_deleted': False, 'host': CONF.host, 'cleaned': False} attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=attrs, use_slave=True) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: attempts = int(instance.system_metadata.get('clean_attempts', '0')) LOG.debug('Instance has had %(attempts)s of %(max)s ' 'cleanup attempts', {'attempts': attempts, 'max': CONF.maximum_instance_delete_attempts}, instance=instance) if attempts < CONF.maximum_instance_delete_attempts: success = self.driver.delete_instance_files(instance) instance.system_metadata['clean_attempts'] = str(attempts + 1) if success: instance.cleaned = True with utils.temporary_mutation(context, read_deleted='yes'): instance.save()
sajeeshcs/nested_quota_final
nova/compute/manager.py
Python
apache-2.0
289,419
#!/usr/bin/env python2.7 # Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. """ Demonstrate how to obtain routing information of one network device. 1. Select a configured device from the inventory. 2. Execute the command and print the output. 3. Print the command syntax and output field descriptions. """ from __future__ import print_function from inspect import cleandoc from logging import log, WARN from nxapi.http import cli_show, connect, disconnect, print_command_reference, session_device_url from nxapi.context import sys_exit, EX_OK, EX_TEMPFAIL from nxapi.render import print_table from example import inventory_config from collections import OrderedDict command = 'sh routing' def demonstrate(session): """ Execute a command, print the output, return 'true' if successful. """ response = cli_show(session, command) for c in response: print('Output for command:', c) output = response[c] table_vrf = output['TABLE_vrf'] display_table = [] rows_vrf = table_vrf['ROW_vrf'] if not isinstance(rows_vrf, list): rows_vrf = [rows_vrf] for row_vrf in rows_vrf: display_vrf = OrderedDict() keys = [k for k in row_vrf if not k.startswith('TABLE')] for k in sorted(keys): display_vrf[k] = row_vrf[k] table_addrf = row_vrf['TABLE_addrf'] rows_addrf = table_addrf['ROW_addrf'] if not isinstance(rows_addrf, list): rows_addrf = [rows_addrf] for row_addrf in rows_addrf: display_addrf = OrderedDict(display_vrf) keys = [k for k in row_addrf if not k.startswith('TABLE')] for k in sorted(keys): display_addrf[k] = row_addrf[k] table_prefix = row_addrf['TABLE_prefix'] rows_prefix = table_prefix['ROW_prefix'] if not isinstance(rows_prefix, list): rows_prefix = [rows_prefix] for row_prefix in rows_prefix: display_prefix = OrderedDict(display_addrf) keys = [k for k in row_prefix if not k.startswith('TABLE')] for k in sorted(keys): display_prefix[k] = row_prefix[k] table_path = row_prefix['TABLE_path'] rows_path = table_path['ROW_path'] if not isinstance(rows_path, list): rows_path = [rows_path] for row_path in rows_path: display_path = OrderedDict(display_prefix) keys = [k for k in row_path if not k.startswith('TABLE')] for k in sorted(keys): display_path[k] = row_path[k] display_table.append(display_path) print_table(display_table) print() return True def main(): """ Oversee the sequence of tasks as per the documentation of this script. """ print(cleandoc(__doc__)) print() print('Select an appropriate device from those available.') print_table(inventory_config) print() for device_config in inventory_config: try: http_session = connect(**device_config) try: print('Connected to', session_device_url(http_session)) print() demonstrate(http_session) return EX_OK if print_command_reference(http_session, command) else EX_TEMPFAIL finally: disconnect(http_session) except IOError: log(WARN, 'Unable to connect to Nexus device %s', str(device_config)) continue print("There are no suitable network devices. Demonstration cancelled.") return EX_TEMPFAIL if __name__ == "__main__": sys_exit(main())
SivagnanamCiena/nxapi-learning-labs
python/example/show_routing.py
Python
apache-2.0
4,441
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which is mapped to a specific project. This is required because OpenStack supports a user belonging to multiple projects, whereas the signatures created on ec2-style requests don't allow specification of which project the user wishes to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding OpenStack token. This token allows method calls to other services within the context the access/secret was created. As an example, Nova requests Keystone to validate the signature of a request, receives a token, and then makes a request to Glance to list images needed to perform the requested task. """ import abc import sys import uuid from keystoneclient.contrib.ec2 import utils as ec2_utils from oslo_serialization import jsonutils import six from six.moves import http_client from keystone.common import controller from keystone.common import dependency from keystone.common import utils from keystone.common import wsgi from keystone import exception from keystone.i18n import _ CRED_TYPE_EC2 = 'ec2' @dependency.requires('assignment_api', 'catalog_api', 'credential_api', 'identity_api', 'resource_api', 'role_api', 'token_provider_api') @six.add_metaclass(abc.ABCMeta) class Ec2ControllerCommon(object): def check_signature(self, creds_ref, credentials): signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) # NOTE(davechen): credentials.get('signature') is not guaranteed to # exist, we need check it explicitly. if credentials.get('signature'): if utils.auth_str_equal(credentials['signature'], signature): return True # NOTE(vish): Some client libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['host']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname # NOTE(davechen): we need reinitialize 'signer' to avoid # contaminated status of signature, this is similar with # other programming language libraries, JAVA for example. signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return True raise exception.Unauthorized( message=_('Invalid EC2 signature.')) else: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) # Raise the exception when credentials.get('signature') is None else: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) @abc.abstractmethod def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an OpenStack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: OpenStack token equivalent to access key along with the corresponding service catalog and roles """ raise exception.NotImplemented() def _authenticate(self, credentials=None, ec2credentials=None): """Common code shared between the V2 and V3 authenticate methods. :returns: user_ref, tenant_ref, roles_ref, catalog_ref """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2credentials: credentials = ec2credentials if 'access' not in credentials: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) creds_ref = self._get_credentials(credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate tenant_ref = self.resource_api.get_project(creds_ref['tenant_id']) user_ref = self.identity_api.get_user(creds_ref['user_id']) # Validate that the auth info is valid and nothing is disabled try: self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref) self.resource_api.assert_domain_enabled( domain_id=user_ref['domain_id']) self.resource_api.assert_project_enabled( project_id=tenant_ref['id'], project=tenant_ref) except AssertionError as e: six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) roles = self.assignment_api.get_roles_for_user_and_project( user_ref['id'], tenant_ref['id'] ) if not roles: raise exception.Unauthorized( message=_('User not valid for tenant.')) roles_ref = [self.role_api.get_role(role_id) for role_id in roles] catalog_ref = self.catalog_api.get_catalog( user_ref['id'], tenant_ref['id']) return user_ref, tenant_ref, roles_ref, catalog_ref def create_credential(self, request, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the user/tenant pair. :param request: current request :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ self.identity_api.get_user(user_id) self.resource_api.get_project(tenant_id) blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex, 'trust_id': request.context.trust_id} credential_id = utils.hash_access_key(blob['access']) cred_ref = {'user_id': user_id, 'project_id': tenant_id, 'blob': jsonutils.dumps(blob), 'id': credential_id, 'type': CRED_TYPE_EC2} self.credential_api.create_credential(credential_id, cred_ref) return {'credential': self._convert_v3_to_ec2_credential(cred_ref)} def get_credentials(self, user_id): """List all credentials for a user. :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ self.identity_api.get_user(user_id) credential_refs = self.credential_api.list_credentials_for_user( user_id, type=CRED_TYPE_EC2) return {'credentials': [self._convert_v3_to_ec2_credential(credential) for credential in credential_refs]} def get_credential(self, user_id, credential_id): """Retrieve a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ self.identity_api.get_user(user_id) return {'credential': self._get_credentials(credential_id)} def delete_credential(self, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ self.identity_api.get_user(user_id) self._get_credentials(credential_id) ec2_credential_id = utils.hash_access_key(credential_id) return self.credential_api.delete_credential(ec2_credential_id) @staticmethod def _convert_v3_to_ec2_credential(credential): # Prior to bug #1259584 fix, blob was stored unserialized # but it should be stored as a json string for compatibility # with the v3 credentials API. Fall back to the old behavior # for backwards compatibility with existing DB contents try: blob = jsonutils.loads(credential['blob']) except TypeError: blob = credential['blob'] return {'user_id': credential.get('user_id'), 'tenant_id': credential.get('project_id'), 'access': blob.get('access'), 'secret': blob.get('secret'), 'trust_id': blob.get('trust_id')} def _get_credentials(self, credential_id): """Return credentials from an ID. :param credential_id: id of credential :raises keystone.exception.Unauthorized: when credential id is invalid or when the credential type is not ec2 :returns: credential: dict of ec2 credential. """ ec2_credential_id = utils.hash_access_key(credential_id) cred = self.credential_api.get_credential(ec2_credential_id) if not cred or cred['type'] != CRED_TYPE_EC2: raise exception.Unauthorized( message=_('EC2 access key not found.')) return self._convert_v3_to_ec2_credential(cred) def render_token_data_response(self, token_id, token_data): """Render token data HTTP response. Stash token ID into the X-Subject-Token header. """ status = (http_client.OK, http_client.responses[http_client.OK]) headers = [('X-Subject-Token', token_id)] return wsgi.render_response(body=token_data, status=status, headers=headers) @dependency.requires('policy_api', 'token_provider_api') class Ec2Controller(Ec2ControllerCommon, controller.V2Controller): @controller.v2_ec2_deprecated def authenticate(self, request, credentials=None, ec2Credentials=None): (user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref) = self._authenticate(credentials=credentials, ec2credentials=ec2Credentials) # NOTE(morganfainberg): Make sure the data is in correct form since it # might be consumed external to Keystone and this is a v2.0 controller. # The token provider does not explicitly care about user_ref version # in this case, but the data is stored in the token itself and should # match the version user_ref = self.v3_to_v2_user(user_ref) auth_token_data = dict(user=user_ref, tenant=tenant_ref, metadata=metadata_ref, id='placeholder') (token_id, token_data) = self.token_provider_api.issue_v2_token( auth_token_data, roles_ref, catalog_ref) return token_data @controller.v2_ec2_deprecated def get_credential(self, request, user_id, credential_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) return super(Ec2Controller, self).get_credential(user_id, credential_id) @controller.v2_ec2_deprecated def get_credentials(self, request, user_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) return super(Ec2Controller, self).get_credentials(user_id) @controller.v2_ec2_deprecated def create_credential(self, request, user_id, tenant_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) return super(Ec2Controller, self).create_credential( request, user_id, tenant_id) @controller.v2_ec2_deprecated def delete_credential(self, request, user_id, credential_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) self._assert_owner(user_id, credential_id) return super(Ec2Controller, self).delete_credential(user_id, credential_id) def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises keystone.exception.Forbidden: when token is invalid """ token_ref = utils.get_token_ref(context) if token_ref.user_id != user_id: raise exception.Forbidden(_('Token belongs to another user')) def _is_admin(self, request): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: # NOTE(morganfainberg): policy_api is required for assert_admin # to properly perform policy enforcement. self.assert_admin(request) return True except (exception.Forbidden, exception.Unauthorized): return False def _assert_owner(self, user_id, credential_id): """Ensure the provided user owns the credential. :param user_id: expected credential owner :param credential_id: id of credential object :raises keystone.exception.Forbidden: on failure """ ec2_credential_id = utils.hash_access_key(credential_id) cred_ref = self.credential_api.get_credential(ec2_credential_id) if user_id != cred_ref['user_id']: raise exception.Forbidden(_('Credential belongs to another user')) @dependency.requires('policy_api', 'token_provider_api') class Ec2ControllerV3(Ec2ControllerCommon, controller.V3Controller): collection_name = 'credentials' member_name = 'credential' def _check_credential_owner_and_user_id_match(self, request, prep_info, user_id, credential_id): # NOTE(morganfainberg): this method needs to capture the arguments of # the method that is decorated with @controller.protected() (with # exception of the first argument ('context') since the protected # method passes in *args, **kwargs. In this case, it is easier to see # the expected input if the argspec is `user_id` and `credential_id` # explicitly (matching the :class:`.ec2_delete_credential()` method # below). ref = {} credential_id = utils.hash_access_key(credential_id) ref['credential'] = self.credential_api.get_credential(credential_id) # NOTE(morganfainberg): policy_api is required for this # check_protection to properly be able to perform policy enforcement. self.check_protection(request, prep_info, ref) def authenticate(self, context, credentials=None, ec2Credentials=None): (user_ref, project_ref, roles_ref, catalog_ref) = self._authenticate( credentials=credentials, ec2credentials=ec2Credentials ) method_names = ['ec2credential'] token_id, token_data = self.token_provider_api.issue_token( user_ref['id'], method_names, project_id=project_ref['id']) return self.render_token_data_response(token_id, token_data) @controller.protected(callback=_check_credential_owner_and_user_id_match) def ec2_get_credential(self, request, user_id, credential_id): ref = super(Ec2ControllerV3, self).get_credential(user_id, credential_id) return Ec2ControllerV3.wrap_member(request.context_dict, ref['credential']) @controller.protected() def ec2_list_credentials(self, request, user_id): refs = super(Ec2ControllerV3, self).get_credentials(user_id) return Ec2ControllerV3.wrap_collection(request.context_dict, refs['credentials']) @controller.protected() def ec2_create_credential(self, request, user_id, tenant_id): ref = super(Ec2ControllerV3, self).create_credential( request, user_id, tenant_id) return Ec2ControllerV3.wrap_member(request.context_dict, ref['credential']) @controller.protected(callback=_check_credential_owner_and_user_id_match) def ec2_delete_credential(self, request, user_id, credential_id): return super(Ec2ControllerV3, self).delete_credential(user_id, credential_id) @classmethod def _add_self_referential_link(cls, context, ref): path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s' url = cls.base_url(context, path) % { 'user_id': ref['user_id'], 'credential_id': ref['access']} ref.setdefault('links', {}) ref['links']['self'] = url
ilay09/keystone
keystone/contrib/ec2/controllers.py
Python
apache-2.0
18,455
#!/usr/bin/env python import os import sys import time sys.path.append(os.path.join(os.path.dirname(__file__), "../../pox")) import argparse from collections import defaultdict import networkx as nx from pox.lib.packet.ethernet import ethernet from pox.openflow.libopenflow_01 import ofp_flow_mod_command_rev_map from pox.openflow.libopenflow_01 import OFPT_HELLO from pox.openflow.libopenflow_01 import OFPT_FEATURES_REQUEST from pox.openflow.libopenflow_01 import OFPT_FEATURES_REPLY from pox.openflow.libopenflow_01 import OFPT_SET_CONFIG from pox.openflow.libopenflow_01 import OFPFC_DELETE_STRICT from pox.openflow.libopenflow_01 import OFPT_STATS_REQUEST from pox.openflow.libopenflow_01 import OFPT_VENDOR from pox.openflow.libopenflow_01 import OFPT_GET_CONFIG_REQUEST from pox.openflow.libopenflow_01 import OFPT_GET_CONFIG_REPLY from pox.openflow.libopenflow_01 import OFPT_STATS_REPLY from hb_utils import pkt_info from hb_shadow_table import ShadowFlowTable from hb_race_detector import RaceDetector from hb_race_detector import predecessor_types # To make sure all events are registered from hb_json_event import * from hb_events import * from hb_sts_events import * from hb_utils import dfs_edge_filter from hb_utils import just_mid_iter from hb_utils import pretty_match # # Do not import any STS types! We would like to be able to run this offline # from a trace file without having to depend on STS. # OFP_COMMANDS = {v: k for k, v in ofp_flow_mod_command_rev_map.iteritems()} # OF Message types to skip from the trace SKIP_MSGS = [OFPT_HELLO, OFPT_VENDOR, OFPT_FEATURES_REQUEST, OFPT_FEATURES_REPLY, OFPT_SET_CONFIG, OFPT_GET_CONFIG_REQUEST, OFPT_GET_CONFIG_REPLY, OFPT_STATS_REQUEST, OFPT_STATS_REPLY] class HappensBeforeGraph(object): def __init__(self, results_dir=None, add_hb_time=False, rw_delta=5, ww_delta=1, filter_rw=False, ignore_ethertypes=None, no_race=False, alt_barr=False, disable_path_cache=True, data_deps=False, verify_and_minimize_only=False, is_minimized=False): self.results_dir = results_dir self.g = nx.DiGraph() self.disable_path_cache = disable_path_cache self._cached_paths = None self._cached_reverse_paths = None self.events_by_id = dict() self.events_with_reads_writes = list() self.events_by_pid_out = defaultdict(list) self.events_by_mid_out = defaultdict(list) # events that have a mid_in/mid_in and are still looking for a pid_out/mid_out to match self.events_pending_pid_in = defaultdict(list) self.events_pending_mid_in = defaultdict(list) # for barrier pre rule self.events_before_next_barrier = defaultdict(list) # for barrier post rule self.most_recent_barrier = dict() # for races self.race_detector = RaceDetector( self, filter_rw=filter_rw, add_hb_time=add_hb_time, ww_delta=ww_delta, rw_delta=rw_delta) self.ww_delta = ww_delta self.rw_delta = rw_delta # Only mark time edges in the RaceDetetcor self.add_hb_time = False # Just to keep track of how many HB edges where added based on time self._time_hb_rw_edges_counter = 0 self._time_hb_ww_edges_counter = 0 self.ignore_ethertypes = check_list(ignore_ethertypes) self.no_race = no_race self.packet_traces = None self.host_sends = {} # Handled messages from the controller to the switch self.msg_handles = {} # Messages from the switch to the controller self.msgs = {} self.alt_barr = alt_barr self.versions = {} # add read-after-write dependency edges self.data_deps = data_deps self.shadow_tables = dict() self.covered_races = dict() self.verify_and_minimize_only = verify_and_minimize_only self.is_minimized = is_minimized @property def events(self): for _, data in self.g.nodes_iter(True): yield data['event'] @property def predecessors(self): """Get predecessor events for all events. """ for eid, data in self.g.nodes(data=True): this_predecessors = set() for pred in self.g.predecessors_iter(eid): this_predecessors.add(self.g.node[pred]['event']) yield (data['event'],this_predecessors) def _add_to_lookup_tables(self, event): if hasattr(event, 'pid_out'): for x in event.pid_out: self.events_by_pid_out[x].append(event) if hasattr(event, 'mid_out'): for x in event.mid_out: self.events_by_mid_out[x].append(event) self.lookup_tables = [ #( field name, # condition to be included, # search key #), (self.events_pending_pid_in, lambda x: hasattr(x, 'pid_in'), lambda x: x.pid_in ), (self.events_pending_mid_in, lambda x: hasattr(x, 'mid_in'), lambda x: x.mid_in ), ] for entry in self.lookup_tables: table, condition, key = entry if condition(event): table[key(event)].append(event) def _update_event_is_linked_pid_in(self, event): if event in self.events_pending_pid_in[event.pid_in]: self.events_pending_pid_in[event.pid_in].remove(event) def _update_event_is_linked_mid_in(self, event): if event in self.events_pending_mid_in[event.mid_in]: self.events_pending_mid_in[event.mid_in].remove(event) def update_path_cache(self): print "Updating has_path path cache..." self._cached_paths = nx.all_pairs_shortest_path_length(self.g) def has_path(self, src_eid, dst_eid, bidirectional=True, use_path_cache=True): if self.disable_path_cache or not use_path_cache: return nx.has_path(self.g, src_eid, dst_eid) or (bidirectional and nx.has_path(self.g, dst_eid, src_eid)) else: if self._cached_paths is None: self.update_path_cache() if dst_eid in self._cached_paths[src_eid]: return True if bidirectional: if src_eid in self._cached_paths[dst_eid]: return True return False def _add_edge(self, before, after, sanity_check=True, update_path_cache=True, **attrs): if sanity_check and before.type not in predecessor_types[after.type]: print "Warning: Not a valid HB edge: "+before.typestr+" ("+str(before.eid)+") < "+after.typestr+" ("+str(after.eid)+")" assert False src, dst = before.eid, after.eid if self.g.has_edge(src, dst): rel = self.g.edge[src][dst]['rel'] # Allow edge to be added multiple times because of the same relation # This is useful for time based edges if rel != attrs['rel']: raise ValueError( "Edge already added %d->%d and relation: %s" % (src, dst, rel)) self.g.add_edge(before.eid, after.eid, attrs) if update_path_cache: # TODO(jm): do incremental update later. But for now, this is sufficient. self._cached_paths = None def _rule_01_pid(self, event): # pid_out -> pid_in if hasattr(event, 'pid_in'): if event.pid_in in self.events_by_pid_out: for other in self.events_by_pid_out[event.pid_in]: self._add_edge(other, event, rel='pid') self._update_event_is_linked_pid_in(event) # TODO(jm): remove by reordering first # recheck events added in an order different from the trace order if hasattr(event, 'pid_out'): for pid_out in event.pid_out: if pid_out in self.events_pending_pid_in: for other in self.events_pending_pid_in[pid_out][:]: # copy list [:], so we can remove from it self._add_edge(event, other, rel='pid') self._update_event_is_linked_pid_in(other) def _rule_02_mid(self, event): # mid_out -> mid_in if hasattr(event, 'mid_in'): if event.mid_in in self.events_by_mid_out: for other in self.events_by_mid_out[event.mid_in]: self._add_edge(other, event, rel='mid') self._update_event_is_linked_mid_in(event) # TODO(jm): remove by reordering first # recheck events added in an order different from the trace order (mainly controller events as they are asynchronously logged) if hasattr(event, 'mid_out'): for mid_out in event.mid_out: if mid_out in self.events_pending_mid_in: for other in self.events_pending_mid_in[mid_out][:]: # copy list [:], so we can remove from it self._add_edge(event, other, rel='mid') self._update_event_is_linked_mid_in(other) def _rule_03_barrier_pre(self, event): if event.type == 'HbMessageHandle': if event.msg_type_str == "OFPT_BARRIER_REQUEST": for other in self.events_before_next_barrier[event.dpid]: self._add_edge(other, event, rel='barrier_pre') del self.events_before_next_barrier[event.dpid] else: self.events_before_next_barrier[event.dpid].append(event) def _rule_04_barrier_post(self, event): if event.type == 'HbMessageHandle': if event.msg_type_str == "OFPT_BARRIER_REQUEST": self.most_recent_barrier[event.dpid] = event else: if event.dpid in self.most_recent_barrier: other = self.most_recent_barrier[event.dpid] self._add_edge(other, event, rel='barrier_post') def _find_triggering_HbControllerHandle_for_alternative_barrier(self, event): """ Returns the HbControllerHandle that is responsible for triggering this event event (HbMessageHandle) <- (HbControllerSend) <- trigger (HbControllerHandle) """ preds = self.g.predecessors(event.eid) if len(preds) > 0: candidates = filter(lambda x: self.g.node[x]['event'].type == "HbControllerSend", preds) assert len(candidates) <= 1 # at most one HbControllerSend exists if len(candidates) == 1: send_event_eid = candidates[0] assert self.g.node[send_event_eid]['event'].type == "HbControllerSend" preds = self.g.predecessors(send_event_eid) candidates = filter(lambda x: self.g.node[x]['event'].type == "HbControllerHandle", preds) assert len(candidates) <= 1 # at most one HbControllerHandle exists if len(candidates) == 1: handle_event_eid = candidates[0] assert self.g.node[handle_event_eid]['event'].type == "HbControllerHandle" return handle_event_eid return None def _rule_03b_alternative_barrier_pre(self, event): """ Instead of using the dpid for barriers, this uses the eid of the predecessor HbControllerSend (if it exists). """ if event.type == 'HbMessageHandle': ctrl_handle_eid = self._find_triggering_HbControllerHandle_for_alternative_barrier(event) if ctrl_handle_eid is not None: if event.msg_type_str == "OFPT_BARRIER_REQUEST": for other in self.events_before_next_barrier[ctrl_handle_eid]: self._add_edge(other, event, rel='barrier_pre') del self.events_before_next_barrier[ctrl_handle_eid] else: self.events_before_next_barrier[ctrl_handle_eid].append(event) elif event.type == 'HbControllerSend': succ = self.g.successors(event.eid) for i in succ: self._rule_03b_alternative_barrier_pre(self.g.node[i]['event']) self._rule_04b_alternative_barrier_post(self.g.node[i]['event']) def _rule_04b_alternative_barrier_post(self, event): """ Instead of using the dpid for barriers, this uses the eid of the predecessor HbControllerSend (if it exists). """ if event.type == 'HbMessageHandle': ctrl_handle_eid = self._find_triggering_HbControllerHandle_for_alternative_barrier(event) if ctrl_handle_eid is not None: if event.msg_type_str == "OFPT_BARRIER_REQUEST": self.most_recent_barrier[ctrl_handle_eid] = event else: if ctrl_handle_eid in self.most_recent_barrier: other = self.most_recent_barrier[ctrl_handle_eid] self._add_edge(other, event, rel='barrier_post') elif event.type == 'HbControllerSend': succ = self.g.successors(event.eid) for i in succ: self._rule_03b_alternative_barrier_pre(self.g.node[i]['event']) self._rule_04b_alternative_barrier_post(self.g.node[i]['event']) def _rule_05_flow_removed(self, event): if isinstance(event, HbAsyncFlowExpiry): assert len(event.operations) == 1 expiry = event.operations[0] flow_table = expiry.flow_table # the flow table before the removal flow_mod = expiry.flow_mod # the removed entry reason = expiry.reason # Either idle or hard timeout. Deletes are not handled duration = expiry.duration_sec*10^9 + expiry.duration_nsec # TODO(JM): Handle deletes a different way? Currently deletes are recorded # to the trace as async switch events, same as timeouts. This means # that the instrumentation does NOT add a HB edge between the delete # operation itself and the async delete notification to the controller. # We might want to add such an edge, to do this we need the hb_logger # to link the two events already during instrumentation, as this is # almost impossible to do here as we do not have enough information # and the events might be recorded out of order in the trace. # TODO(jm): We should implement read-after-write data dependency edges # also for flow expiry messages, i.e. flows expire *after* they # have been written. This information is already partially # available in the hb_shadow_table module, but not currently # used for flow expiry. # create "dummy" operation that acts as a strict delete class DummyObject(object): pass dummy_event = DummyObject() dummy_op = DummyObject() dummy_event.eid = event.eid dummy_op.flow_mod = ofp_flow_mod(match=flow_mod.match,priority=flow_mod.priority,command=OFPFC_DELETE_STRICT) # Find other write events in the graph. for e in self.events: if e == event: continue # Skip none switch event if type(e) != HbMessageHandle: continue kw_ops = [] kr_ops = [] # Find the write ops for op in e.operations: if type(op) == TraceSwitchFlowTableWrite: kw_ops.append(op) elif type(op) == TraceSwitchFlowTableRead: kr_ops.append(op) if (not kw_ops) and (not kr_ops): continue # Make the edge for kw_op in kw_ops: # Skip if events commute anyway if self.race_detector.commutativity_checker.check_commutativity_ww( e, kw_op, dummy_event, dummy_op): continue delta = abs(expiry.t - kw_op.t) if delta > self.ww_delta: self._time_hb_ww_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break for kr_op in kr_ops: # Skip if events commute anyway if self.race_detector.commutativity_checker.check_commutativity_rw( e, kr_op, dummy_event, dummy_op): continue delta = abs(expiry.t - kr_op.t) if delta > self.rw_delta: self._time_hb_rw_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break def _rule_06_time_rw(self, event): if type(event) not in [HbPacketHandle]: return packet_match = ofp_match.from_packet(event.packet, event.in_port) operations = [] # Get all the read operations in the event # For OF 1.0 should be only one op, but more for OF1.3 for op in event.operations: if type(op) == TraceSwitchFlowTableRead: operations.append(op) for e in self.events: if type(e) != HbMessageHandle: continue for op in e.operations: if type(op) != TraceSwitchFlowTableWrite: continue if not op.flow_mod.match.matches_with_wildcards(packet_match, consider_other_wildcards=False): continue delta = abs(op.t - operations[0].t) if (delta > self.rw_delta): self._time_hb_rw_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break def _rule_07_time_ww(self, event): if type(event) not in [HbMessageHandle]: return i_ops = [] # Get all the write operations in the event # For OF 1.0 should be only one op, but more for OF1.3 for op in event.operations: if type(op) == TraceSwitchFlowTableWrite: i_ops.append(op) # No write operations in the event, just skip if not i_ops: return # Find other write events in the graph. for e in self.events: if e == event: continue # Skip none switch event if type(e) != HbMessageHandle: continue k_ops = [] # Find the write ops for op in e.operations: if type(op) == TraceSwitchFlowTableWrite: k_ops.append(op) if not k_ops: continue # Make the edge for i_op in i_ops: for k_op in k_ops: # Skip if events commute anyway if self.race_detector.commutativity_checker.check_commutativity_ww( event, i_op, e, k_op): continue delta = abs(i_op.t - k_op.t) if delta > self.ww_delta: self._time_hb_ww_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break def _update_edges(self, event): self._rule_01_pid(event) self._rule_02_mid(event) if self.alt_barr: self._rule_03b_alternative_barrier_pre(event) self._rule_04b_alternative_barrier_post(event) else: self._rule_03_barrier_pre(event) self._rule_04_barrier_post(event) self._rule_05_flow_removed(event) if self.add_hb_time: self._rule_06_time_rw(event) self._rule_07_time_ww(event) def _update_shadow_tables(self, event): if event.dpid not in self.shadow_tables: self.shadow_tables[event.dpid] = ShadowFlowTable(event.dpid, self.is_minimized) self.shadow_tables[event.dpid].apply_event(event) def unpack_line(self, line): # Skip empty lines and the ones start with '#' if not line or line.startswith('#'): return # TODO(jm): I did some tests to see why loading events is so slow. # JsonEvent.from_json is the slow part, everything else # (including json.loads()) is blazing fast. # We might want to speed that up a bit. event = JsonEvent.from_json(json.loads(line)) return event def add_line(self, line): event = self.unpack_line(line) if event: self.add_event(event) def add_event(self, event): assert event.eid not in self.events_by_id if self.ignore_ethertypes: packet = None if hasattr(event, 'packet'): packet = event.packet if type(event) == HbMessageHandle and getattr(event.msg, 'data', None): packet = ethernet(event.msg.data) if packet and packet.type in self.ignore_ethertypes: # print "Filtered PKT in ignore_ethertypes" return msg_type = getattr(event, 'msg_type', None) if msg_type in SKIP_MSGS: return self.g.add_node(event.eid, event=event) self.events_by_id[event.eid] = event self._add_to_lookup_tables(event) if hasattr(event, 'operations'): for op in event.operations: if type(op) in [TraceSwitchFlowTableRead, TraceSwitchFlowTableWrite]: # TODO(jm): Add TraceSwitchFlowTableEntryExpiry events here as well. # But before we can do that, we need to assign monotonicially increasing # eids to the expiry events as well in hb_logger self.events_with_reads_writes.append(event.eid) break def _handle_HbAsyncFlowExpiry(event): if self.data_deps: self._update_shadow_tables(event) self._update_edges(event) def _handle_HbPacketHandle(event): if self.data_deps: self._update_shadow_tables(event) self._update_edges(event) def _handle_HbPacketSend(event): self._update_edges(event) def _handle_HbMessageHandle(event): if self.data_deps: self._update_shadow_tables(event) self._update_edges(event) self.msg_handles[event.eid] = event def _handle_HbMessageSend(event): self._update_edges(event) self.msgs[event.eid] = event def _handle_HbHostHandle(event): self._update_edges(event) def _handle_HbHostSend(event): self._update_edges(event) self.host_sends[event.eid] = event def _handle_HbControllerHandle(event): self._update_edges(event) def _handle_HbControllerSend(event): self._update_edges(event) def _handle_default(event): assert False pass handlers = {'HbAsyncFlowExpiry': _handle_HbAsyncFlowExpiry, 'HbPacketHandle': _handle_HbPacketHandle, 'HbPacketSend': _handle_HbPacketSend, 'HbMessageHandle': _handle_HbMessageHandle, 'HbMessageSend': _handle_HbMessageSend, 'HbHostHandle': _handle_HbHostHandle, 'HbHostSend': _handle_HbHostSend, 'HbControllerHandle': _handle_HbControllerHandle, 'HbControllerSend': _handle_HbControllerSend, } handlers.get(event.type, _handle_default)(event) def load_trace(self, filename): self.g = nx.DiGraph() self.events_by_id = dict() unpacked_events = list() with open(filename) as f: for line in f: event = self.unpack_line(line) if event: unpacked_events.append(event) print "Read " + str(len(unpacked_events)) + " events." for event in unpacked_events: self.add_event(event) print "Added " + str(len(list(self.events))) + " events." def verify_and_minimize_trace(self, filename): unpacked_events = 0 outfilename = filename + ".min" with open(filename + ".min", 'w') as fout: with open(filename) as f: for line in f: event = self.unpack_line(line) if event: unpacked_events += 1 has_reads_writes = False if hasattr(event, 'operations'): for op in event.operations: if type(op) in [TraceSwitchFlowTableRead, TraceSwitchFlowTableWrite, TraceSwitchFlowTableEntryExpiry]: has_reads_writes = True break if type(event) in [HbAsyncFlowExpiry, HbPacketHandle, HbMessageHandle]: self._update_shadow_tables(event) # cleanup operations if hasattr(event, 'operations'): for op in event.operations: if hasattr(op, "flow_table"): delattr(op, "flow_table") # cleanup attributes fout.write(str(event.to_json()) + '\n') fout.flush() print "Verified, minimized, and wrote " + str(unpacked_events) + " events to "+str(outfilename) def store_graph(self, filename="hb.dot", print_packets=False): if self.results_dir is not None: filename = os.path.join(self.results_dir,filename) self.prep_draw(self.g, print_packets) nx.write_dot(self.g, os.path.join(self.results_dir, filename)) @staticmethod def prep_draw(g, print_packets, allow_none_event=False): """ Adds proper annotation for the graph to make drawing it more pleasant. """ for eid, data in g.nodes_iter(data=True): event = data.get('event', None) if not event and allow_none_event: label = "N %s" % eid shape = "oval" g.node[eid]['label'] = label g.node[eid]['shape'] = shape continue label = "ID %d \\n %s" % (eid, event.type) if hasattr(event, 'hid'): label += "\\nHID: " + str(event.hid) if hasattr(event, 'dpid'): label += "\\nDPID: " + str(event.dpid) shape = "oval" op = None if hasattr(event, 'operations'): for x in event.operations: if x.type == 'TraceSwitchFlowTableWrite': op = "FlowTableWrite" op += "\\nCMD: " + OFP_COMMANDS[x.flow_mod.command] op += "\\nMatch: " + pretty_match(x.flow_mod.match) op += "\\nActions: " + str(x.flow_mod.actions) label += "\\nt: " + repr(x.t) shape = 'box' g.node[eid]['style'] = 'bold' break if x.type == 'TraceSwitchFlowTableRead': op = "FlowTableRead" label += "\\nt: " + repr(x.t) shape = 'box' break if hasattr(event, 'msg') and getattr(event.msg, 'actions', None): op = "\\nActions: " + str(event.msg.actions) cmd_type = data.get('cmd_type') if cmd_type: label += "\\n%s" % cmd_type if op: label += "\\nOp: %s" % op if hasattr(event, 'msg_type'): label += "\\nMsgType: " + event.msg_type_str if getattr(event, 'msg', None): label += "\\nXID: %d" % event.msg.xid if hasattr(event, 'in_port'): label += "\\nInPort: " + str(event.in_port) if hasattr(event, 'out_port') and not isinstance(event.out_port, basestring): label += "\\nOut Port: " + str(event.out_port) if hasattr(event, 'buffer_id'): label += "\\nBufferId: " + str(event.buffer_id) if print_packets and hasattr(event, 'packet'): pkt = pkt_info(event.packet) label += "\\nPkt: " + pkt if print_packets and getattr(event, 'msg', None): if getattr(event.msg, 'data', None): pkt = pkt_info(ethernet(event.msg.data)) label += "\\nPkt: " + pkt g.node[eid]['label'] = label g.node[eid]['shape'] = shape for src, dst, data in g.edges_iter(data=True): g.edge[src][dst]['label'] = data['rel'] if data['rel'] == 'race': if data['harmful']: g.edge[src][dst]['color'] = 'red' g.edge[src][dst]['style'] = 'bold' else: g.edge[src][dst]['style'] = 'dotted' elif data['rel'] == 'covered': g.edge[src][dst]['color'] = 'blue' g.edge[src][dst]['style'] = 'bold' def extract_traces(self, g): """ Given HB graph g, this method return a list of subgraph starting from a HostSend event and all the subsequent nodes that happened after it. This method will exclude all the nodes connected because of time and the nodes connected after HostHandle. """ traces = [] # Sort host sends by eid, this will make the output follow the trace order eids = self.host_sends.keys() eids = sorted(eids) for eid in eids: nodes = list(nx.dfs_preorder_nodes(g, eid)) # Remove other HostSends for node in nodes: if eid != node and isinstance(g.node[node]['event'], HbHostSend): nodes.remove(node) subg = nx.DiGraph(g.subgraph(nodes), host_send=g.node[eid]['event']) traces.append(subg) for i, subg in enumerate(traces): for src, dst, data in subg.edges(data=True): if data['rel'] in ['time', 'race']: subg.remove_edge(src, dst) # Remove disconnected subgraph host_send = subg.graph['host_send'] nodes = nx.dfs_preorder_nodes(subg, host_send.eid) traces[i] = nx.DiGraph(subg.subgraph(nodes), host_send=host_send) self.packet_traces = traces return traces def store_traces(self, results_dir, print_packets=True, subgraphs=None): if not subgraphs: subgraphs = self.extract_traces(self.g) for i in range(len(subgraphs)): subg = subgraphs[i] send = subg.graph['host_send'] HappensBeforeGraph.prep_draw(subg, print_packets) nx.write_dot(subg, "%s/trace_%s_%s_%04d.dot" % (results_dir, str(send.packet.src), str(send.packet.dst), send.eid)) def get_racing_events(self, trace, ignore_other_traces=True): """ For a given packet trace, return all the races that races with its events """ # Set of all events that are part of a harmful race all_harmful = set([event.eid for event in self.race_detector.racing_events_harmful]) # Set of event ids of a packet trace eids = set(trace.nodes()) # All events in packet trace that are also part of a race racing_eids = sorted(list(eids.intersection(all_harmful))) # Get the actual reported race; # will get us the other event that has been part of the race rw_races_with_trace = list() for race in self.race_detector.races_harmful_with_covered: if race.rtype == 'r/w': # i_event is read, k_event is write if race.i_event.eid in racing_eids or race.k_event.eid in racing_eids: # We don't care about write on the packet trace that races with reads # on other packet traces. The other traces will be reported anyway. # logical implication: ignore_other_traces ==> race.i_event.eid in racing_eids if (not ignore_other_traces) or (race.i_event.eid in racing_eids): rw_races_with_trace.append(race) # make sure the races are sorted first by read, then by write. The default # sort on the namedtuple already does this return sorted(rw_races_with_trace) def get_all_packet_traces_with_races(self): """ Finds all the races related each packet trace """ races = list() for trace in self.packet_traces: racing_events = self.get_racing_events(trace, True) if len(racing_events) > 0: races.append((trace, racing_events,)) return races def summarize_per_packet_inconsistent(self, traces_races): """ If two packets are inconsistent, but they race with the same set of writes, then only one will be reported """ # TODO(jm): This does not take into account the order of the writes or the path the packets took. Do we care? result = {} removed = defaultdict(list) for trace, races, versions in traces_races: # First get the writes writes = [] for race in races: if isinstance(race.i_op, TraceSwitchFlowTableWrite): writes.append(race.i_op.eid) if isinstance(race.k_op, TraceSwitchFlowTableWrite): writes.append(race.k_op.eid) key = (tuple(sorted(writes))) if key in result: removed[key].append((trace, races, versions)) else: result[key] = (trace, races, versions) return result.values() def print_racing_packet_trace(self, trace, races, label, show_covered=True): """ first is the trace second is the list of races """ host_send = trace.graph['host_send'] g = nx.DiGraph(trace, host_send= host_send) for race in races: if not g.has_node(race.i_event.eid): g.add_node(race.i_event.eid, event=race.i_event) if not g.has_node(race.k_event.eid): g.add_node(race.k_event.eid, event=race.k_event) if show_covered and race in self.covered_races: for path in nx.all_simple_paths(self.g, race.i_event.eid, race.k_event.eid): for src, dst in zip(path, path[1:]): g.node[src] = self.g.node[src] g.node[dst] = self.g.node[dst] g.add_edge(src, dst, self.g.edge[src][dst]) for path in nx.all_simple_paths(self.g, race.k_event.eid, race.i_event.eid): for src, dst in zip(path, path[1:]): g.node[src] = self.g.node[src] g.node[dst] = self.g.node[dst] g.add_edge(src, dst, self.g.edge[src][dst]) g.add_edge(race.i_event.eid, race.k_event.eid, rel='covered', harmful=True) else: #if not g.has_edge(race.i_event.eid, race.k_event.eid) and not \ # g.has_edge(race.k_event.eid, race.i_event.eid): g.add_edge(race.i_event.eid, race.k_event.eid, rel='race', harmful=True) self.prep_draw(g, TraceSwitchPacketUpdateBegin) src = str(host_send.packet.src) dst = str(host_send.packet.dst) name = "%s_%s_%s_%s.dot" %(label, src, dst, host_send.eid) name = os.path.join(self.results_dir, name) print "Storing packet %s for %s->%s in %s " % (label, src, dst, name) nx.write_dot(g, name) def races_graph(self): races = self.race_detector.races_harmful races_graph = nx.DiGraph() for rtype, i_event, i_op, k_event, k_op in races: races_graph.add_node(i_event.eid, event=i_event) races_graph.add_node(k_event.eid, event=k_event) races_graph.add_edge(i_event.eid, k_event.eid, rel='race', harmful=True) return races_graph def save_races_graph(self, print_pkts=True, name=None): if not name: name = "just_races.dot" graph = self.races_graph() self.prep_draw(graph, print_pkts) print "Saving all races graph in", name nx.write_dot(graph, os.path.join(self.results_dir, name)) def find_covered_races(self): """ Go through events in trace order, add a RaW dependency and then check if there are any races that are part of: - the set of predecessors of W, and - the set of successors of R These are now ordered so we can add them to the list. """ if self.covered_races: return self.covered_races covered_races = dict() data_dep_races = set() time_races = set() remaining_harmful_races = set() # remove all races that were already removed due to time based rules for r in self.race_detector.races_harmful_with_covered: if self.has_path(r.i_event.eid, r.k_event.eid, bidirectional=True): # race is not a race anymore time_races.add(r) else: # race is still a race and can become covered when adding data deps remaining_harmful_races.add(r) # check for monotonically increasing eids, i.e. the list must be sorted assert all(x <= y for x, y in zip(self.events_with_reads_writes, self.events_with_reads_writes[1:])) for eid in self.events_with_reads_writes: event = self.events_by_id[eid] dpid = event.dpid shadow_table = self.shadow_tables[dpid] if hasattr(event, 'operations'): has_reads = False for op in event.operations: if type(op) in [TraceSwitchFlowTableRead]: has_reads = True if has_reads: # add RaW dependencies (HB edge from event containing W -> event containing R) for write_eid in shadow_table.data_deps[event.eid]: write_event = self.events_by_id[write_eid] if self.g.has_edge(write_event.eid, event.eid): assert self.g.get_edge_data(write_event.eid, event.eid)['rel'] == 'time' else: self._add_edge(write_event, event, sanity_check=False, rel='dep_raw') # Should we check this after adding *all* dependencies or after each. E.g. for events with a read and a write. # includes write_eid itself write_succs = set(nx.dfs_preorder_nodes(self.g, write_eid)) for r in remaining_harmful_races: # TODO(jm): get rid of this loop here, lots of unnecessary looping # is there a path from our write to the the race if r.i_event.eid in write_succs or r.k_event.eid in write_succs: # ignore races that we just removed using the data dep edge. if (r.i_event == event and r.k_event == write_event) or (r.i_event == write_event and r.k_event == event): data_dep_races.add(r) else: # only add a covered race the first time if r not in covered_races and r not in data_dep_races: if self.has_path(r.i_event.eid, r.k_event.eid, bidirectional=True, use_path_cache=False): # race is not a race anymore self.race_detector._races_harmful.remove(r) self.race_detector.covered_races.append(r) covered_races[r] = (eid, write_eid) self.covered_races = covered_races return self.covered_races def _get_versions_for_races(self, races): # assume races is ordered! assert all(races[i] < races[i+1] for i in xrange(len(races)-1)) versions_for_race = defaultdict(set) for race in races: # get versions for each race for version, cmds in self.versions.iteritems(): if race.i_event.eid in cmds or race.k_event.eid in cmds: versions_for_race[race].add(version) return versions_for_race def _is_inconsistent_packet_entry_version(self, trace, race, dpids_affected): trace_nodes = nx.dfs_preorder_nodes(trace, trace.graph['host_send'].eid) trace_dpids = [getattr(self.g.node[node]['event'], 'dpid', None) for node in trace_nodes] racing_dpid = race.i_event.dpid # which switches/nodes does the packet traverse before hitting this 1 uncovered race? none_racing_dpids = set([x for x in trace_dpids[:trace_dpids.index(racing_dpid)] if x is not None]) return not dpids_affected.intersection(none_racing_dpids) def find_per_packet_inconsistent(self, covered_races=None, summarize=True): """ Returns the following sets of packet traces. 1) all packet traces that race with a write event 2) all per-packet TRUE inconsistent traces 3) Covered packet traces (trace with races cannot happen because of HB) 4) Packet traces with races with first switch on version update 5) Summarize traces after removing covered and trimming traces that races with the same writes all packet traces =TRUE inconsistent traces + covered + entry switch races summazied = all per-packet inconsistent traces - repeatd all per-packet inconsistent traces """ # list of (trace, races), ordered by trace order packet_races = self.get_all_packet_traces_with_races() inconsistent_packet_traces = [] consistent_packet_traces_covered = [] consistent_packet_entry_version = [] summarized = [] dpids_for_version = {} for version, cmds in self.versions.iteritems(): dpids_for_version[version] = set([getattr(self.g.node[cmd]['event'], 'dpid', None) for cmd in cmds]) for trace, races in packet_races: uncovered_races = [race for race in races if race not in covered_races] uncovered_races_dpids = list(set([race.i_event.dpid for race in uncovered_races])) versions_for_race = self._get_versions_for_races(uncovered_races) racing_versions = sorted(list(set(versions_for_race.keys()))) # check if all the races are actually covered if not uncovered_races: consistent_packet_traces_covered.append((trace, races, racing_versions)) elif len(uncovered_races_dpids) == 1: # check entry is_entry = True for race in uncovered_races: version = list(versions_for_race[race])[0] affected_dpids = dpids_for_version[version] is_entry = self._is_inconsistent_packet_entry_version(trace, race, affected_dpids) # If only one of the races is not entry then even though the races # are one switch, one of them makes this trace inconsistent. if not is_entry: break has_covered = len(races) > len(uncovered_races) if is_entry: if has_covered: consistent_packet_traces_covered.append((trace, races, racing_versions)) else: consistent_packet_entry_version.append((trace, races, racing_versions)) else: inconsistent_packet_traces.append((trace, races, racing_versions)) else: inconsistent_packet_traces.append((trace, races, racing_versions)) if summarize: summarized = self.summarize_per_packet_inconsistent(inconsistent_packet_traces) assert len(packet_races) == len(inconsistent_packet_traces) + \ len(consistent_packet_entry_version) + \ len(consistent_packet_traces_covered) return packet_races, inconsistent_packet_traces, \ consistent_packet_traces_covered, \ consistent_packet_entry_version, summarized def find_barrier_replies(self): barrier_replies = [] for eid in self.msgs: if self.msgs[eid].msg_type_str != 'OFPT_BARRIER_REPLY': continue nodes = [] # TODO(jm): Are we sure just_mid_iter is correct? What about packets sent # out by a PACKET_OUT that then trigger a PACKET_IN -> ... -> BARRIER_REPLY?find_barrier_replies edges = dfs_edge_filter(self.g, eid, just_mid_iter) for src, dst in edges: src_event = self.g.node[src]['event'] dst_event = self.g.node[dst]['event'] if isinstance(src_event, HbMessageHandle): nodes.append(src_event) #self.g.node[src]['cmd_type'] = "Reactive to %d" % eid if isinstance(dst_event, HbMessageHandle): nodes.append(dst_event) #self.g.node[dst]['cmd_type'] = "Reactive to %d" % eid # Get unique and sort by time nodes = sorted(list(set(nodes)), key=lambda n: n.operations[0].t if n.operations else 0) barrier_replies.append((self.msgs[eid], nodes)) return barrier_replies def find_reactive_versions2(self): considered = [] cmds = [] ordered_msgs = OrderedDict() #sorted_msgs = sorted(self.msgs.values(), key=lambda m: m.operations[0].t if getattr(m, 'operations', None) else 0) sorted_msgs = sorted(self.msgs.values(), key=lambda m: m.eid) for m in sorted_msgs: ordered_msgs[m.eid] = m for eid in ordered_msgs: if self.msgs[eid].msg_type_str == 'OFPT_BARRIER_REPLY': continue if eid in considered: continue else: considered.append(eid) nodes = [] # TODO(jm): Are we sure just_mid_iter is correct? What about packets sent # out by a PACKET_OUT that then trigger a PACKET_IN -> ... -> BARRIER_REPLY?find_barrier_replies #edges = dfs_edge_filter(self.g, eid, just_mid_iter, filter_msg_type='OFPT_PACKET_IN') edges = dfs_edge_filter(self.g, eid, just_mid_iter) for src, dst in edges: src_event = self.g.node[src]['event'] dst_event = self.g.node[dst]['event'] if isinstance(dst_event, HbMessageSend): considered.append(dst_event.eid) if isinstance(src_event, HbMessageHandle) and src_event.eid not in considered: nodes.append(src_event) self.g.node[src]['cmd_type'] = "Reactive to %d" % eid if isinstance(dst_event, HbMessageHandle) and dst_event.eid not in considered: nodes.append(dst_event) self.g.node[dst]['cmd_type'] = "Reactive to %d" % eid # Get unique and sort by time nodes = sorted(list(set(nodes)), key=lambda n: n.operations[0].t if n.operations else 0) for n in nodes: considered.append(n.eid) cmds.append((self.msgs[eid], nodes)) for l, (x, i) in enumerate(cmds): for k, (y, j) in enumerate(cmds): if l == k: continue assert set(i).intersection(j), "l %s and k %s" % (l, k) return cmds def find_reactive_versions(self): cmds = [] considered = [] cv = dict() for eid in self.msgs: if self.msgs[eid].msg_type_str == 'OFPT_BARRIER_REPLY': continue nodes = [] # TODO(jm): Are we sure just_mid_iter is correct? What about packets sent # out by a PACKET_OUT that then trigger a PACKET_IN -> ... -> BARRIER_REPLY?find_barrier_replies edges = dfs_edge_filter(self.g, eid, just_mid_iter, filter_msg_type=HbMessageSend) for src, dst in edges: src_event = self.g.node[src]['event'] dst_event = self.g.node[dst]['event'] if isinstance(src_event, HbMessageHandle): nodes.append(src_event) self.g.node[src]['cmd_type'] = "Reactive to %d" % eid if isinstance(dst_event, HbMessageHandle): nodes.append(dst_event) self.g.node[dst]['cmd_type'] = "Reactive to %d" % eid # Get unique and sort by time nodes = sorted(list(set(nodes)), key=lambda n: n.operations[0].t if n.operations else 0) for n in nodes: assert n.eid not in considered, "For event %d at eid %d it was considered at %d" % (n.eid, eid, cv[n.eid]) considered.append(n.eid) cv[n.eid] = eid considered.append(n.eid) cmds.append((self.msgs[eid], nodes)) for l, (x, i) in enumerate(cmds): for k, (y, j) in enumerate(cmds): if l == k: continue assert not set(i).intersection(j), "l %s and k%s" % (l, k) return cmds def find_proactive_cmds(self, reactive_versions=None): """ Proactive is all the cmds that were not in the reactive set """ # TODO(jm): At the end of the trace, some of the controller instrumentation might not be there, so some of the commands at the very end could be reactive. Cut them off somehow? if not reactive_versions: reactive_versions = self.find_reactive_versions() reactive_cmds = [] for msgs, cmds in reactive_versions: for cmd in cmds: reactive_cmds.append(cmd.eid) proactive_eid = set(self.msg_handles.keys()).difference(set(reactive_cmds)) proactive = [self.g.node[eid]['event'] for eid in list(proactive_eid)] for cmd in proactive: self.g.node[cmd.eid]['cmd_type'] = 'Proactive' proactive.sort(key=lambda n: n.operations[0].t) return proactive def cluster_cmds(self, cmds): """ Cluster the update commands by time. """ # Cluster by time from scipy.cluster.hierarchy import fclusterdata # TODO(jm): Should we add a setting for the threshold, or use STS rounds instead of time? features = [[e.operations[0].t] for e in cmds] result = fclusterdata(features, 0.8, criterion="distance") clustered = defaultdict(list) for i in range(len(cmds)): clustered[result[i]].append(cmds[i]) # just trying to order the versions ordered = sorted(clustered.keys(), key= lambda i: clustered[i][0].operations[0].t) clustered_ordered = dict() for i in range(len(ordered)): clustered_ordered[i] = clustered[ordered[i]] self.clustered_cmds = clustered_ordered return clustered_ordered def find_versions(self): """ Find all versions, reactive or proactive """ if self.versions: return self.versions reactive = self.find_reactive_versions() proactive = self.find_proactive_cmds(reactive) self.cluster_cmds(proactive) # Consider all proactive and reactive versions versions = {} for version, events in self.clustered_cmds.iteritems(): versions[version] = list(set([event.eid for event in events])) for pktin, events in reactive: versions[pktin] = list(set([event.eid for event in events])) # Now merge versions if one contains a response to a barrier request # from previous version # TODO(jm): Perhaps we should not just consider barrier replies, but also flow removed messages for explicit deletes? Are there more such replies? barrier_replies = self.find_barrier_replies() replies_by_xid = {} # (dpid, xid) -> cmds replies_by_xid_versions = {} # (dpid, xid) -> versions requests_by_xid = {} # (dpid, xid) -> version # Sort replies by dpid and xid for rep, cmds in barrier_replies: key = (rep.dpid, rep.msg.xid) replies_by_xid[key] = [event.eid for event in cmds] replies_by_xid_versions[key] = [] reactive_cmds = set(replies_by_xid[key]) for v, v_cmds in versions.iteritems(): if reactive_cmds.intersection(v_cmds): replies_by_xid_versions[key].append(v) # Sort requests by dpid and xid for v, v_cmds in versions.iteritems(): for v_cmd in v_cmds: event = self.g.node[v_cmd]['event'] if event.msg_type_str == 'OFPT_BARRIER_REQUEST': requests_by_xid[(event.dpid, event.msg.xid)] = v for key, version in requests_by_xid.iteritems(): if version not in versions: continue # already merged if key not in replies_by_xid: continue new_cmds = versions[version] for v in replies_by_xid_versions[key]: if v == version: continue # we already considered the first version if v not in versions: continue # already merged new_cmds += versions[v] del versions[v] # Sort cmds by time, just to make it nicer for version in versions: versions[version].sort(key=lambda x: self.g.node[x]['event'].operations[0].t) versions = dict([k, v] for k, v in versions.iteritems() if v) self.versions = versions return versions def find_inconsistent_updates(self): """Try to find if two versions race with each other""" versions = self.find_versions() # TODO(jm): Could we check the races directly instead of creating the ww_races variable? racing_versions_tuples = [] racing_versions_dict = {} ww_races = defaultdict(list) for race in self.race_detector.races_harmful_with_covered: if race.rtype == 'w/w': ww_races[race.i_event.eid].append(race.k_event.eid) ww_races[race.k_event.eid].append(race.i_event.eid) racing_events = [] for version, cmds in versions.iteritems(): for cmd in cmds: if cmd in ww_races: for other in ww_races[cmd]: if other not in cmds: racing_events.append((cmd, other)) racing_versions = [] for eid1, eid2 in racing_events: v1 = None v2 = None for version, cmds in versions.iteritems(): if eid1 in cmds: v1 = version if eid2 in cmds: v2 = version if v1 and v2 and v1 != v2: break racing_versions.append((v1, v2, (eid1, eid2), (versions[v1], versions[v2]))) if set([v1, v2]) not in racing_versions_tuples: racing_versions_tuples.append(set([v1, v2])) ordered_versions = (v1, v2) er1 = eid1 er2 = eid2 if ordered_versions not in racing_versions_dict: ordered_versions = (v2, v1) er1 = eid2 er2 = eid1 if ordered_versions not in racing_versions_dict: racing_versions_dict[ordered_versions] = [[], []] if er1 not in racing_versions_dict[ordered_versions][0] and\ er2 not in racing_versions_dict[ordered_versions][1]: racing_versions_dict[ordered_versions][0].append(er1) racing_versions_dict[ordered_versions][1].append(er2) return racing_versions, racing_versions_tuples, racing_versions_dict def print_versions(self, versions, selected_versions=[]): # Printing versions if not selected_versions: selected_versions = versions.keys() for v, cmds in versions.iteritems(): if v not in selected_versions: continue print "IN Version", v if isinstance(v, HbMessageSend): print "React to Msg: ", v.msg_type_str for cmd in cmds: node = self.g.node[cmd]['event'] match = '' if getattr(node.msg, 'match', None): match = node.msg.show().replace('\n', ' ') of_cmd = '' if hasattr(node.msg, 'command'): of_cmd = OFP_COMMANDS[node.msg.command] print "\t eid", node.eid, " dpid:", node.dpid, " xid:", node.msg.xid ,\ " cmd:", node.msg_type_str, of_cmd, ' ',\ pretty_match(getattr(node.msg, 'match', None)),\ getattr(node.msg, 'actions', None) def print_covered_races(self): print "Covered races:" eids = [] race_edges = [] nodes_on_path = [] for r,v in self.covered_races.iteritems(): print "Race (r/w): ", r.rtype, r.i_event.eid, r.k_event.eid, ", covered by data dep w -> r: ", v eids.append(r.i_event.eid) eids.append(r.k_event.eid) race_edges.append((r.i_event.eid, r.k_event.eid)) eids.append(v[0]) eids.append(v[1]) for path in nx.all_simple_paths(self.g, r.i_event.eid, r.k_event.eid): nodes_on_path.extend(path) for path in nx.all_simple_paths(self.g, r.k_event.eid, r.i_event.eid): nodes_on_path.extend(path) nodes_on_path = list(set(nodes_on_path)) sub_nodes = nodes_on_path + eids subg = self.g.subgraph(list(set(sub_nodes))) for i, k in race_edges: subg.add_edge(k, i, rel='covered') self.prep_draw(subg, True) nx.write_dot(subg, os.path.join(self.results_dir, 'covered_races.dot')) def racing_versions_graph(self, v1, cmd1, v2, cmd2): nodes = [] extra_nodes = [] extra_edges = [] nodes.extend(cmd1) nodes.extend(cmd2) if hasattr(v1, 'eid') and self.g.has_node(v1.eid): nodes.append(v1.eid) for eid in cmd1: nodes.append(eid) extra_edges.append((v1.eid, eid)) else: vid = 'Proactive%d' % v1 extra_nodes.append(vid) for eid in cmd1: extra_edges.append((vid, eid)) if hasattr(v2, 'eid') and self.g.has_node(v2.eid): nodes.append(v2.eid) for eid in cmd2: nodes.append(eid) extra_edges.append((v2.eid, eid)) else: vid = 'Proactive%d' % v2 extra_nodes.append(vid) for eid in cmd2: extra_edges.append((vid, eid)) vg = self.g.subgraph(nodes) for n in extra_nodes: vg.add_node(n) for src, dst in extra_edges: vg.add_edge(src, dst, rel='version') races = self.race_detector.races_harmful for rtype, i_event, i_op, k_event, k_op in races: if i_event.eid in nodes and k_event.eid in nodes: vg.add_edge(i_event.eid, k_event.eid, rel='race', harmful=True) vg.add_edge(k_event.eid, i_event.eid, rel='race', harmful=True) self.prep_draw(vg, True, allow_none_event=True) return vg class Main(object): def __init__(self, filename, print_pkt, add_hb_time=True, rw_delta=5, ww_delta=5, filter_rw=False, ignore_ethertypes=None, no_race=False, alt_barr=False, verbose=True, ignore_first=False, disable_path_cache=False, data_deps=False, no_dot_files=False, verify_and_minimize_only=False, is_minimized=False): self.filename = os.path.realpath(filename) self.results_dir = os.path.dirname(self.filename) self.output_filename = self.results_dir + "/" + "hb.dot" self.print_pkt = print_pkt self.add_hb_time = add_hb_time self.rw_delta = rw_delta self.ww_delta = ww_delta self.filter_rw = filter_rw self.ignore_ethertypes = ignore_ethertypes self.no_race = no_race self.alt_barr = alt_barr self.verbose = verbose self.ignore_first = ignore_first self.disable_path_cache = disable_path_cache self.data_deps = data_deps self.no_dot_files = no_dot_files self.verify_and_minimize_only = verify_and_minimize_only self.is_minimized = is_minimized def run(self): self.graph = HappensBeforeGraph(results_dir=self.results_dir, add_hb_time=self.add_hb_time, rw_delta=self.rw_delta, ww_delta=self.ww_delta, filter_rw=self.filter_rw, ignore_ethertypes=self.ignore_ethertypes, no_race=self.no_race, alt_barr=self.alt_barr, disable_path_cache=self.disable_path_cache, data_deps=self.data_deps, verify_and_minimize_only=self.verify_and_minimize_only, is_minimized=self.is_minimized) import resource # from guppy import hpy # import objgraph import gc #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss t0 = time.time() if self.verify_and_minimize_only: self.graph.verify_and_minimize_trace(self.filename) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss else: self.graph.load_trace(self.filename) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss t1 = time.time() self.graph.race_detector.detect_races(verbose=True) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss self.graph.update_path_cache() # the race detector doesn't do it, so we do it ourself. #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss self.graph.race_detector.print_races(self.verbose) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss t2 = time.time() packet_traces = self.graph.extract_traces(self.graph.g) t3 = time.time() reactive_cmds = self.graph.find_reactive_versions() t4 = time.time() proactive_cmds = self.graph.find_proactive_cmds(reactive_cmds) versions = self.graph.find_versions() t5 = time.time() if self.data_deps: covered_races = self.graph.find_covered_races() else: covered_races = dict() t6 = time.time() packet_races, inconsistent_packet_traces, \ inconsistent_packet_traces_covered, \ inconsistent_packet_entry_version, summarized = \ self.graph.find_per_packet_inconsistent(covered_races, True) t7 = time.time() racing_versions, racing_versions_tuples, racing_versions_tuples_dict = self.graph.find_inconsistent_updates() t8 = time.time() if not self.no_dot_files: self.graph.store_traces(self.results_dir, print_packets=True, subgraphs=packet_traces) print "Saving HB graph to:", self.output_filename self.graph.store_graph(self.output_filename, self.print_pkt) # Print traces for trace, races in packet_races: self.graph.print_racing_packet_trace(trace, races, label='incoherent', show_covered=False) for trace, races, _ in inconsistent_packet_traces: self.graph.print_racing_packet_trace(trace, races, label='incoherent_remaining') for trace, races, _ in inconsistent_packet_traces_covered: self.graph.print_racing_packet_trace(trace, races, label='covered') for trace, races, _ in inconsistent_packet_entry_version: self.graph.print_racing_packet_trace(trace, races, label='entry') for trace, races, _ in summarized: #self.graph.print_racing_packet_trace(trace, races, label='summarized') pass self.graph.save_races_graph(self.print_pkt) # self.graph.print_versions(versions) # self.graph.print_covered_races() num_writes = len(self.graph.race_detector.write_operations) num_read = len(self.graph.race_detector.read_operations) num_ops = num_writes + num_read num_harmful = self.graph.race_detector.total_harmful num_commute = self.graph.race_detector.total_commute num_races = self.graph.race_detector.total_races num_time_filtered_races = self.graph.race_detector.total_time_filtered_races num_covered = self.graph.race_detector.total_covered num_time_edges = self.graph.race_detector.time_edges_counter num_per_pkt_races = len(packet_races) num_per_pkt_inconsistent = len(inconsistent_packet_traces) num_per_pkt_inconsistent_covered = len(inconsistent_packet_traces_covered) num_per_pkt_entry_version_race = len(inconsistent_packet_entry_version) num_per_pkt_inconsistent_no_repeat = len(summarized) load_time = t1 - t0 detect_races_time = t2 - t1 extract_traces_time = t3 - t2 find_reactive_cmds_time = t4 - t3 find_proactive_cmds_time = t5 - t4 find_covered_races_time = t6 - t5 per_packet_inconsistent_time = t7 - t6 find_inconsistent_update_time = t8 - t7 ##### Final time, everything else is just print statements t_final = time.time() total_time = t_final - t0 print "\n######## Update isolation violations ########" for counter, (v1, v2) in enumerate(racing_versions_tuples_dict): if not self.no_dot_files: rvg = self.graph.racing_versions_graph(v1, racing_versions_tuples_dict[(v1, v2)][0], v2, racing_versions_tuples_dict[(v1, v2)][1]) rvg_path = os.path.join(self.results_dir, 'isolation_violation_%d.dot' % counter) print "Saving update isolation violation graph to %s" % rvg_path nx.write_dot(rvg, rvg_path) if hasattr(v1, 'eid'): pv1 = "React to event %s, %s" % (v1.eid , getattr(v1, 'msg_type_str', '')) else: pv1 = "Practive version %d" % v1 if hasattr(v2, 'eid'): pv2 = "React to event %d" % v2.eid else: pv2 = "Practive version %d" % v2 print "V1:{}".format(pv1) print "\tEventing racing: {}".format(racing_versions_tuples_dict[(v1, v2)][0]) print "V2:{}".format(pv2) print "\tEventing racing: {}".format(racing_versions_tuples_dict[(v1, v2)][1]) print "" print "\n########## Summary ###########" print "* Race analysis *" print "\tTotal number of events in the trace:", self.graph.g.number_of_nodes() print "\tTotal number of events with read operations:", num_read print "\tTotal number of events with write operations:", num_writes print "\tTotal number of events with read or write operations:", num_ops print "\tTotal number of observed races without any filters:", num_races print "\tTotal number of commuting races:", num_commute print "\tTotal number of races filtered by Time HB edges:", num_time_filtered_races print "\tTotal number of races covered by data dependency:", num_covered print "\tRemaining number of races after applying all enabled filters: %d (%.02f%%)" % (num_harmful, (num_harmful / float(num_races) * 100)) print "\n\n" print "* Properties analysis *" print "\tNumber of observed network updates:", len(versions) print "\tNumber of update isolation violations:", len(racing_versions_tuples) print "" print "\tTotal number of packets in the traces:", len(self.graph.host_sends) print "\tNumber of packet coherence violations:", len(packet_races) print "\tNumber of packet coherence violations filtered due covered races: ", len(inconsistent_packet_traces_covered) print "\tNumber of packet coherence but only on the first switch in the update: ", len(inconsistent_packet_entry_version) print "\tNumber of packet coherence violations after filtering covered races: ", len(inconsistent_packet_traces) #print "\tNumber of packet inconsistencies after trimming repeated races: ", len(summarized) #print "\tNumber of packet inconsistent updates: ", len(racing_versions) #print "\tNumber of races: ", self.graph.race_detector.total_races #print "\tNumber of races filtered by time: ", self.graph.race_detector.total_time_filtered_races #print "\tNumber of commuting races: ", len(self.graph.race_detector.races_commute) #print "\tNumber of harmful races: ", len(self.graph.race_detector.races_harmful) #print "\tNumber of covered races: ", self.graph.race_detector.total_covered #print "Number of versions:", len(versions) print "* Timing information *" print "\tDone. Time elapsed:",total_time,"s" print "\tload_trace:", load_time, "s" print "\tdetect_races:", detect_races_time, "s" print "\textract_traces_time:", extract_traces_time, "s" print "\tfind_reactive_cmds_time:", find_reactive_cmds_time, "s" print "\tfind_proactive_cmds_time:", find_proactive_cmds_time, "s" print "\tfind_covered_races_time:", find_covered_races_time, "s" print "\tper_packet_inconsistent_time:", per_packet_inconsistent_time, "s" print "\tfind_inconsistent_update_time:", find_inconsistent_update_time, "s" #print "print_races:"+(str(t3-t2))+"s" #print "store_graph:"+(str(t4-t3))+"s" #print "Extracting Packet traces time: "+ (str(t5 - t4)) + "s" #print "Finding inconsistent traces time: "+ (str(t6 - t5)) + "s" # Printing dat file hbt = self.add_hb_time rw_delta = self.rw_delta if self.add_hb_time else 'inf' ww_delta = self.ww_delta if self.add_hb_time else 'inf' file_name = "results_hbt_%s_altbarr_%s_dep_%s_rw_%s_ww_%s.dat" % (hbt, self.alt_barr, self.data_deps, rw_delta, ww_delta) file_name = os.path.join(self.results_dir, file_name) timings_file_name = "timings_hbt_%s_altbarr_%s_dep_%s_rw_%s_ww_%s.dat" % (hbt, self.alt_barr, self.data_deps, rw_delta, ww_delta) timings_file_name = os.path.join(self.results_dir, timings_file_name) def write_general_info_to_file(f): # General info f.write('key,value\n') f.write('rw_delta,%s\n' % rw_delta) f.write('ww_delta,%s\n' % ww_delta) f.write('alt_barr,%s\n' % self.alt_barr) f.write('data_deps,%s\n' % self.data_deps) with open(file_name, 'w') as f: write_general_info_to_file(f) # Operations f.write('num_events,%d\n' % self.graph.g.number_of_nodes()) f.write('num_edges,%d\n' % self.graph.g.number_of_edges()) f.write('num_read,%d\n' % num_read) f.write('num_writes,%d\n' % num_writes) f.write('num_ops,%d\n' % num_ops) # HB time edges f.write('num_time_edges,%d\n' % num_time_edges) # Races info # One last check assert num_races == num_commute + num_covered + num_harmful + num_time_filtered_races f.write('num_races,%d\n' % num_races) f.write('num_harmful,%d\n' % num_harmful) f.write('num_commute,%d\n' % num_commute) f.write('num_time_filtered_races,%d\n' % num_time_filtered_races) f.write('num_covered,%d\n' % num_covered) # Inconsistency f.write('num_pkts,%d\n' % len(self.graph.host_sends)) assert len(self.graph.host_sends) >= num_per_pkt_races assert num_per_pkt_races == num_per_pkt_inconsistent + num_per_pkt_inconsistent_covered + num_per_pkt_entry_version_race f.write('num_per_pkt_races,%d\n' % num_per_pkt_races) f.write('num_per_pkt_inconsistent,%d\n' % num_per_pkt_inconsistent) f.write('num_per_pkt_inconsistent_covered,%d\n' % num_per_pkt_inconsistent_covered) f.write('num_per_pkt_entry_version_race,%d\n' % num_per_pkt_entry_version_race) f.write('num_per_pkt_inconsistent_no_repeat,%d\n' % num_per_pkt_inconsistent_no_repeat) f.write('num_versions,%d\n' % len(versions)) f.write('num_racing_versions,%d\n' % len(racing_versions_tuples)) with open(timings_file_name, 'w') as f: write_general_info_to_file(f) # Times f.write('total_time_sec,%f\n'% total_time) f.write('load_time_sec,%f\n' % load_time ) f.write('detect_races_time_sec,%f\n' % detect_races_time ) f.write('extract_traces_time_sec,%f\n' % extract_traces_time ) f.write('find_reactive_cmds_time_sec,%f\n' % find_reactive_cmds_time ) f.write('find_proactive_cmds_time_sec,%f\n' % find_proactive_cmds_time ) f.write('find_covered_races_time,%f\n' % find_covered_races_time ) f.write('per_packet_inconsistent_time_sec,%f\n' % per_packet_inconsistent_time ) f.write('find_inconsistent_update_time_sec,%f\n' % find_inconsistent_update_time ) def auto_int(x): return int(x, 0) if __name__ == '__main__': empty_delta = 1000000 parser = argparse.ArgumentParser() parser.add_argument('trace_file', help='Trace file produced by the instrumented sts, usually "hb.json"') parser.add_argument('--no-hbt', dest='no_hbt', action='store_true', default=False, help="Don't add HB edges based on time") parser.add_argument('--time-delta', dest='delta', default=2, type=int, help="delta time (in secs) for adding HB edges based on time") parser.add_argument('--pkt', dest='print_pkt', action='store_true', default=False, help="Print packet headers in the produced dot files") parser.add_argument('--rw_delta', dest='rw_delta', default=2, type=int, help="delta time (in secs) for adding HB edges based on time") parser.add_argument('--ww_delta', dest='ww_delta', default=2, type=int, help="delta time (in secs) for adding HB edges based on time") parser.add_argument('--filter_rw', dest='filter_rw', action='store_true', default=False, help="Filter Read/Write operations with HB relations") parser.add_argument('--ignore-ethertypes', dest='ignore_ethertypes', nargs='*', type=auto_int, default=0, help='Ether types to ignore from the graph') parser.add_argument('--no-race', dest='no_race', action='store_true', default=False, help="Don't add edge between racing events in the visualized graph") parser.add_argument('--alt-barr', dest='alt_barr', action='store_true', default=False, help="Use alternative barrier rules for purely reactive controllers") parser.add_argument('-v', dest='verbose', action='store_true', default=False, help="Print all commute and harmful races") parser.add_argument('--ignore-first', dest='ignore_first', action='store_true', default=False, help="Ignore the first race for per-packet consistency check") parser.add_argument('--disable-path-cache', dest='disable_path_cache', action='store_true', default=False, help="Disable using all_pairs_shortest_path_length() preprocessing.") parser.add_argument('--data-deps', dest='data_deps', action='store_true', default=False, help="Use shadow tables for adding data dependency edges between reads/writes.") parser.add_argument('--no-dot-files', dest='no_dot_files', action='store_true', default=False, help="Do not write any .dot files to the disk.") parser.add_argument('--verify-and-minimize-only', dest='verify_and_minimize_only', action='store_true', default=False, help="Verify the input trace, then write out a minimized version.") parser.add_argument('--is-minimized', dest='is_minimized', action='store_true', default=False, help="Process a minimized trace.") # TODO(jm): Make option naming consistent (use _ everywhere, not a mixture of - and _). args = parser.parse_args() if not args.no_hbt: if args.delta == empty_delta: assert args.rw_delta == args.ww_delta else: args.rw_delta = args.ww_delta = args.delta main = Main(args.trace_file, print_pkt=args.print_pkt, add_hb_time=not args.no_hbt, rw_delta=args.rw_delta, ww_delta=args.ww_delta, filter_rw=args.filter_rw, ignore_ethertypes=args.ignore_ethertypes, no_race=args.no_race, alt_barr=args.alt_barr, verbose=args.verbose, ignore_first=args.ignore_first, disable_path_cache=args.disable_path_cache, data_deps=args.data_deps, no_dot_files=args.no_dot_files, verify_and_minimize_only=args.verify_and_minimize_only, is_minimized=args.is_minimized) main.run()
jmiserez/sts
sts/happensbefore/hb_graph.py
Python
apache-2.0
72,338
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import threading import urlparse from collections import namedtuple from six.moves import range from pants.base.build_environment import get_buildroot from pants.cache.artifact_cache import ArtifactCacheError from pants.cache.local_artifact_cache import LocalArtifactCache, TempLocalArtifactCache from pants.cache.pinger import BestUrlSelector, Pinger from pants.cache.resolver import NoopResolver, Resolver, RESTfulResolver from pants.cache.restful_artifact_cache import RESTfulArtifactCache from pants.subsystem.subsystem import Subsystem class EmptyCacheSpecError(ArtifactCacheError): pass class LocalCacheSpecRequiredError(ArtifactCacheError): pass class CacheSpecFormatError(ArtifactCacheError): pass class InvalidCacheSpecError(ArtifactCacheError): pass class RemoteCacheSpecRequiredError(ArtifactCacheError): pass class TooManyCacheSpecsError(ArtifactCacheError): pass CacheSpec = namedtuple('CacheSpec', ['local', 'remote']) class CacheSetup(Subsystem): options_scope = 'cache' @classmethod def register_options(cls, register): super(CacheSetup, cls).register_options(register) default_cache = [os.path.join(get_buildroot(), '.cache')] register('--read', type=bool, default=True, help='Read build artifacts from cache, if available.') register('--write', type=bool, default=True, help='Write build artifacts to cache, if available.') register('--overwrite', advanced=True, type=bool, help='If writing build artifacts to cache, overwrite existing artifacts ' 'instead of skipping them.') register('--resolver', advanced=True, choices=['none', 'rest'], default='none', help='Select which resolver strategy to use for discovering URIs that access ' 'artifact caches. none: use URIs from static config options, i.e. ' '--read-from, --write-to. rest: look up URIs by querying a RESTful ' 'URL, which is a remote address from --read-from, --write-to.') register('--read-from', advanced=True, type=list, default=default_cache, help='The URIs of artifact caches to read directly from. Each entry is a URL of ' 'a RESTful cache, a path of a filesystem cache, or a pipe-separated list of ' 'alternate caches to choose from. This list is also used as input to ' 'the resolver. When resolver is \'none\' list is used as is.') register('--write-to', advanced=True, type=list, default=default_cache, help='The URIs of artifact caches to write directly to. Each entry is a URL of' 'a RESTful cache, a path of a filesystem cache, or a pipe-separated list of ' 'alternate caches to choose from. This list is also used as input to ' 'the resolver. When resolver is \'none\' list is used as is.') register('--compression-level', advanced=True, type=int, default=5, help='The gzip compression level (0-9) for created artifacts.') register('--max-entries-per-target', advanced=True, type=int, default=8, help='Maximum number of old cache files to keep per task target pair') register('--pinger-timeout', advanced=True, type=float, default=0.5, help='number of seconds before pinger times out') register('--pinger-tries', advanced=True, type=int, default=2, help='number of times pinger tries a cache') @classmethod def create_cache_factory_for_task(cls, task, pinger=None, resolver=None): return CacheFactory(cls.scoped_instance(task).get_options(), task.context.log, task.stable_name(), pinger=pinger, resolver=resolver) class CacheFactory(object): def __init__(self, options, log, stable_name, pinger=None, resolver=None): """Create a cache factory from settings. :param options: Task's scoped options. :param log: Task's context log. :param stable_name: Task's stable name. :param pinger: Pinger to choose the best remote artifact cache URL. :param resolver: Resolver to look up remote artifact cache URLs. :return: cache factory. """ self._options = options self._log = log self._stable_name = stable_name # Created on-demand. self._read_cache = None self._write_cache = None # Protects local filesystem setup, and assignment to the references above. self._cache_setup_lock = threading.Lock() # Caches are supposed to be close, and we don't want to waste time pinging on no-op builds. # So we ping twice with a short timeout. # TODO: Make lazy. self._pinger = pinger or Pinger(timeout=self._options.pinger_timeout, tries=self._options.pinger_tries) # resolver is also close but failing to resolve might have broader impact than # single ping failure, therefore use a higher timeout with more retries. if resolver: self._resolver = resolver elif self._options.resolver == 'rest': self._resolver = RESTfulResolver(timeout=1.0, tries=3) else: self._resolver = NoopResolver() def read_cache_available(self): return self._options.read and bool(self._options.read_from) and self.get_read_cache() def write_cache_available(self): return self._options.write and bool(self._options.write_to) and self.get_write_cache() def overwrite(self): return self._options.overwrite def get_read_cache(self): """Returns the read cache for this setup, creating it if necessary. Returns None if no read cache is configured. """ if self._options.read_from and not self._read_cache: cache_spec = self._resolve(self._sanitize_cache_spec(self._options.read_from)) if cache_spec: with self._cache_setup_lock: self._read_cache = self._do_create_artifact_cache(cache_spec, 'will read from') return self._read_cache def get_write_cache(self): """Returns the write cache for this setup, creating it if necessary. Returns None if no read cache is configured. """ if self._options.write_to and not self._write_cache: cache_spec = self._resolve(self._sanitize_cache_spec(self._options.write_to)) if cache_spec: with self._cache_setup_lock: self._write_cache = self._do_create_artifact_cache(cache_spec, 'will write to') return self._write_cache # VisibleForTesting def _sanitize_cache_spec(self, spec): if not isinstance(spec, (list, tuple)): raise InvalidCacheSpecError('Invalid artifact cache spec type: {0} ({1})'.format( type(spec), spec)) if not spec: raise EmptyCacheSpecError() if len(spec) > 2: raise TooManyCacheSpecsError('Too many artifact cache specs: ({0})'.format(spec)) local_specs = [s for s in spec if self.is_local(s)] remote_specs = [s for s in spec if self.is_remote(s)] if not local_specs and not remote_specs: raise CacheSpecFormatError('Invalid cache spec: {0}, must be either local or remote' .format(spec)) if len(spec) == 2: if not local_specs: raise LocalCacheSpecRequiredError('One of two cache specs must be a local cache path.') if not remote_specs: raise RemoteCacheSpecRequiredError('One of two cache specs must be a remote spec.') local_spec = local_specs[0] if len(local_specs) > 0 else None remote_spec = remote_specs[0] if len(remote_specs) > 0 else None return CacheSpec(local=local_spec, remote=remote_spec) # VisibleForTesting def _resolve(self, spec): """Attempt resolving cache URIs when a remote spec is provided. """ if not spec.remote: return spec try: resolved_urls = self._resolver.resolve(spec.remote) if resolved_urls: # keep the bar separated list of URLs convention return CacheSpec(local=spec.local, remote='|'.join(resolved_urls)) # no-op return spec except Resolver.ResolverError as e: self._log.warn('Error while resolving from {0}: {1}'.format(spec.remote, str(e))) # If for some reason resolver fails we continue to use local cache if spec.local: return CacheSpec(local=spec.local, remote=None) # resolver fails but there is no local cache return None @staticmethod def is_local(string_spec): return string_spec.startswith('/') or string_spec.startswith('~') @staticmethod def is_remote(string_spec): # both artifact cache and resolver use REST, add new protocols here once they are supported return string_spec.startswith('http://') or string_spec.startswith('https://') def get_available_urls(self, urls): """Return reachable urls sorted by their ping times.""" netloc_to_url = {urlparse.urlparse(url).netloc: url for url in urls} pingtimes = self._pinger.pings(netloc_to_url.keys()) # List of pairs (host, time in ms). self._log.debug('Artifact cache server ping times: {}' .format(', '.join(['{}: {:.6f} secs'.format(*p) for p in pingtimes]))) sorted_pingtimes = sorted(pingtimes, key=lambda x: x[1]) available_urls = [netloc_to_url[netloc] for netloc, pingtime in sorted_pingtimes if pingtime < Pinger.UNREACHABLE] self._log.debug('Available cache servers: {0}'.format(available_urls)) return available_urls def _do_create_artifact_cache(self, spec, action): """Returns an artifact cache for the specified spec. spec can be: - a path to a file-based cache root. - a URL of a RESTful cache root. - a bar-separated list of URLs, where we'll pick the one with the best ping times. - A list or tuple of two specs, local, then remote, each as described above """ compression = self._options.compression_level if compression not in range(10): raise ValueError('compression_level must be an integer 0-9: {}'.format(compression)) artifact_root = self._options.pants_workdir def create_local_cache(parent_path): path = os.path.join(parent_path, self._stable_name) self._log.debug('{0} {1} local artifact cache at {2}' .format(self._stable_name, action, path)) return LocalArtifactCache(artifact_root, path, compression, self._options.max_entries_per_target) def create_remote_cache(remote_spec, local_cache): urls = self.get_available_urls(remote_spec.split('|')) if len(urls) > 0: best_url_selector = BestUrlSelector(['{}/{}'.format(url.rstrip('/'), self._stable_name) for url in urls]) local_cache = local_cache or TempLocalArtifactCache(artifact_root, compression) return RESTfulArtifactCache(artifact_root, best_url_selector, local_cache) local_cache = create_local_cache(spec.local) if spec.local else None remote_cache = create_remote_cache(spec.remote, local_cache) if spec.remote else None if remote_cache: return remote_cache return local_cache
dbentley/pants
src/python/pants/cache/cache_setup.py
Python
apache-2.0
11,380
# -*- coding: utf-8 -*- ''' The match module allows for match routines to be run and determine target specs ''' # Import python libs import logging # Import salt libs import salt.minion __func_alias__ = { 'list_': 'list' } log = logging.getLogger(__name__) def compound(tgt): ''' Return True if the minion matches the given compound target CLI Example: .. code-block:: bash salt '*' match.compound 'L@cheese,foo and *' ''' __opts__['grains'] = __grains__ matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.compound_match(tgt) except Exception as exc: log.exception(exc) return False def ipcidr(tgt): ''' Return True if the minion matches the given ipcidr target CLI Example: .. code-block:: bash salt '*' match.ipcidr '192.168.44.0/24' ''' matcher = salt.minion.Matcher({'grains': __grains__}, __salt__) try: return matcher.ipcidr_match(tgt) except Exception as exc: log.exception(exc) return False def pillar(tgt, delim=':'): ''' Return True if the minion matches the given pillar target. The ``delim`` argument can be used to specify a different delimiter. CLI Example: .. code-block:: bash salt '*' match.pillar 'cheese:foo' salt '*' match.pillar 'clone_url|https://github.com/saltstack/salt.git' delim='|' .. versionchanged:: 0.16.4 ``delim`` argument added ''' matcher = salt.minion.Matcher({'pillar': __pillar__}, __salt__) try: return matcher.pillar_match(tgt, delim=delim) except Exception as exc: log.exception(exc) return False def data(tgt): ''' Return True if the minion matches the given data target CLI Example: .. code-block:: bash salt '*' match.data 'spam:eggs' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.data_match(tgt) except Exception as exc: log.exception(exc) return False def grain_pcre(tgt, delim=':'): ''' Return True if the minion matches the given grain_pcre target. The ``delim`` argument can be used to specify a different delimiter. CLI Example: .. code-block:: bash salt '*' match.grain_pcre 'os:Fedo.*' salt '*' match.grain_pcre 'ipv6|2001:.*' delim='|' .. versionchanged:: 0.16.4 ``delim`` argument added ''' matcher = salt.minion.Matcher({'grains': __grains__}, __salt__) try: return matcher.grain_pcre_match(tgt, delim=delim) except Exception as exc: log.exception(exc) return False def grain(tgt, delim=':'): ''' Return True if the minion matches the given grain target. The ``delim`` argument can be used to specify a different delimiter. CLI Example: .. code-block:: bash salt '*' match.grain 'os:Ubuntu' salt '*' match.grain_pcre 'ipv6|2001:db8::ff00:42:8329' delim='|' .. versionchanged:: 0.16.4 ``delim`` argument added ''' matcher = salt.minion.Matcher({'grains': __grains__}, __salt__) try: return matcher.grain_match(tgt, delim=delim) except Exception as exc: log.exception(exc) return False def list_(tgt): ''' Return True if the minion matches the given list target CLI Example: .. code-block:: bash salt '*' match.list 'server1,server2' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.list_match(tgt) except Exception as exc: log.exception(exc) return False def pcre(tgt): ''' Return True if the minion matches the given pcre target CLI Example: .. code-block:: bash salt '*' match.pcre '.*' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.pcre_match(tgt) except Exception as exc: log.exception(exc) return False def glob(tgt): ''' Return True if the minion matches the given glob target CLI Example: .. code-block:: bash salt '*' match.glob '*' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.glob_match(tgt) except Exception as exc: log.exception(exc) return False
victorywang80/Maintenance
saltstack/src/salt/modules/match.py
Python
apache-2.0
4,350
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from struct import pack, unpack def db(v): return pack("<B", v) def dw(v): return pack("<H", v) def dd(v): return pack("<I", v) def dq(v): return pack("<Q", v) def rb(v): return unpack("<B", v[0])[0] def rw(v): return unpack("<H", v[:2])[0] def rd(v): return unpack("<I", v[:4])[0] def rq(v): return unpack("<Q", v[:8])[0]
google/google-ctf
2018/quals/re-basics/src/byteops.py
Python
apache-2.0
922
# coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import unittest from contextlib import closing from pants.engine.exp.fs import Path from pants.engine.exp.scheduler import StepRequest, StepResult from pants.engine.exp.storage import Cache, InvalidKeyError, Key, Lmdb, Storage class StorageTest(unittest.TestCase): TEST_KEY = b'hello' TEST_VALUE = b'world' TEST_PATH = Path('/foo') TEST_PATH2 = Path('/bar') class SomeException(Exception): pass def setUp(self): self.storage = Storage.create(in_memory=True) self.result = StepResult(state='something') self.request = StepRequest(step_id=123, node='some node', dependencies={'some dep': 'some state', 'another dep': 'another state'}, project_tree='some project tree') def test_lmdb_key_value_store(self): lmdb = Lmdb.create()[0] with closing(lmdb) as kvs: # Initially key does not exist. self.assertFalse(kvs.get(self.TEST_KEY)) # Now write a key value pair and read back. written = kvs.put(self.TEST_KEY, self.TEST_VALUE) self.assertTrue(written) self.assertEquals(self.TEST_VALUE, kvs.get(self.TEST_KEY).getvalue()) # Write the same key again will not overwrite. self.assertFalse(kvs.put(self.TEST_KEY, self.TEST_VALUE)) def test_storage(self): with closing(self.storage) as storage: key = storage.put(self.TEST_PATH) self.assertEquals(self.TEST_PATH, storage.get(key)) # The deserialized blob is equal by not the same as the input data. self.assertFalse(storage.get(key) is self.TEST_PATH) # Any other keys won't exist in the subjects. self.assertNotEqual(self.TEST_KEY, key) with self.assertRaises(InvalidKeyError): self.assertFalse(storage.get(self.TEST_KEY)) # Verify key and value's types must match. key._type = str with self.assertRaises(ValueError): storage.get(key) def test_storage_key_mappings(self): with closing(self.storage) as storage: key1 = storage.put(self.TEST_PATH) key2 = storage.put(self.TEST_PATH2) storage.add_mapping(key1, key2) self.assertEquals(key2, storage.get_mapping(key1)) # key2 isn't mapped to any other key. self.assertIsNone(storage.get_mapping(key2)) def test_key_for_request(self): with closing(self.storage) as storage: keyed_request = storage.key_for_request(self.request) for dep, dep_state in keyed_request.dependencies.items(): self.assertEquals(Key, type(dep)) self.assertEquals(Key, type(dep_state)) self.assertIs(self.request.node, keyed_request.node) self.assertIs(self.request.project_tree, keyed_request.project_tree) self.assertEquals(keyed_request, storage.key_for_request(keyed_request)) def test_resolve_request(self): with closing(self.storage) as storage: keyed_request = storage.key_for_request(self.request) resolved_request = storage.resolve_request(keyed_request) self.assertEquals(self.request, resolved_request) self.assertIsNot(self.request, resolved_request) self.assertEquals(resolved_request, self.storage.resolve_request(resolved_request)) def test_key_for_result(self): with closing(self.storage) as storage: keyed_result = storage.key_for_result(self.result) self.assertEquals(Key, type(keyed_result.state)) self.assertEquals(keyed_result, storage.key_for_result(keyed_result)) def test_resolve_result(self): with closing(self.storage) as storage: keyed_result = storage.key_for_result(self.result) resolved_result = storage.resolve_result(keyed_result) self.assertEquals(self.result, resolved_result) self.assertIsNot(self.result, resolved_result) self.assertEquals(resolved_result, self.storage.resolve_result(resolved_result)) class CacheTest(unittest.TestCase): def setUp(self): """Setup cache as well as request and result.""" self.storage = Storage.create(in_memory=True) self.cache = Cache.create(storage=self.storage) request = StepRequest(step_id=123, node='some node', dependencies={'some dep': 'some state', 'another dep': 'another state'}, project_tree='some project tree') self.result = StepResult(state='something') self.keyed_request = self.storage.key_for_request(request) def test_cache(self): """Verify get and put.""" with closing(self.cache): self.assertIsNone(self.cache.get(self.keyed_request)) self._assert_hits_misses(hits=0, misses=1) self.cache.put(self.keyed_request, self.result) self.assertEquals(self.result, self.cache.get(self.keyed_request)) self.assertIsNot(self.result, self.cache.get(self.keyed_request)) self._assert_hits_misses(hits=2, misses=1) def test_failure_to_update_mapping(self): """Verify we can access cached result only if we save both result and the key mapping.""" with closing(self.cache): # This places result to the main storage without saving to key mapping. This # simulates error might happen for saving key mapping after successfully saving the result. self.cache._storage.put(self.result) self.assertIsNone(self.cache.get(self.keyed_request)) self._assert_hits_misses(hits=0, misses=1) def _assert_hits_misses(self, hits, misses): self.assertEquals(hits, self.cache.get_stats().hits) self.assertEquals(misses, self.cache.get_stats().misses) self.assertEquals(hits+misses, self.cache.get_stats().total)
dbentley/pants
tests/python/pants_test/engine/exp/test_storage.py
Python
apache-2.0
5,956
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for ListContexts # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-dialogflow # [START dialogflow_generated_dialogflow_v2_Contexts_ListContexts_async] from google.cloud import dialogflow_v2 async def sample_list_contexts(): # Create a client client = dialogflow_v2.ContextsAsyncClient() # Initialize request argument(s) request = dialogflow_v2.ListContextsRequest( parent="parent_value", ) # Make the request page_result = client.list_contexts(request=request) # Handle the response async for response in page_result: print(response) # [END dialogflow_generated_dialogflow_v2_Contexts_ListContexts_async]
googleapis/python-dialogflow
samples/generated_samples/dialogflow_generated_dialogflow_v2_contexts_list_contexts_async.py
Python
apache-2.0
1,522
""" Django settings for mysite project. Generated by 'django-admin startproject' using Django 1.10.4. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'e9p$k7urj^zl)s-!nmprv1#8z-@m@d6a76j=m9z03#gb!%lf6=' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'haystack', 'blog', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'mysite/templates'), os.path.join(BASE_DIR, 'blog/templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'zh-hans' TIME_ZONE = 'Asia/Shanghai' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = (BASE_DIR,"static") LOGIN_REDIRECT_URL = '/' HAYSTACK_CONNECTIONS={ 'default': { 'ENGINE': 'blog.whoosh_cn_backend.WhooshEngine', 'PATH': os.path.join(BASE_DIR, 'whoosh_index'), } } HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
kleinzh/PythonBlog
mysite/mysite/settings.py
Python
apache-2.0
3,558
class CheckPriceYDPage: def __init__(self, driver): self.driver = driver def get_page_yellow_duck(self): self.driver.find_element_by_xpath( "//a[@href='http://localhost/litecart/en/rubber-ducks-c-1/subcategory-c-2/yellow-duck-p-1']").click() def get_compaign_price_yd(self): p2 = self.driver.find_element_by_xpath("//div[@class='price-wrapper']/strong[@class='campaign-price']").text return p2 def get_regular_price_yd(self): pr2 = self.driver.find_element_by_xpath("//div[@class='price-wrapper']/s[@class='regular-price']").text return pr2
skostya64/Selenium_tasks
pages/check_price_yd_page.py
Python
apache-2.0
622
def brancher( # noqa: E302 self, branches=None, all_branches=False, tags=None, all_tags=False ): """Generator that iterates over specified revisions. Args: branches (list): a list of branches to iterate over. all_branches (bool): iterate over all available branches. tags (list): a list of tags to iterate over. all_tags (bool): iterate over all available tags. Yields: str: the display name for the currently selected tree, it could be: - a git revision identifier - empty string it there is no branches to iterate over - "Working Tree" if there are uncommited changes in the SCM repo """ if not any([branches, all_branches, tags, all_tags]): yield "" return saved_tree = self.tree revs = [] scm = self.scm if self.scm.is_dirty(): from dvc.scm.tree import WorkingTree self.tree = WorkingTree() yield "Working Tree" if all_branches: branches = scm.list_branches() if all_tags: tags = scm.list_tags() if branches is None: revs.extend([scm.active_branch()]) else: revs.extend(branches) if tags is not None: revs.extend(tags) # NOTE: it might be a good idea to wrap this loop in try/finally block # to don't leave the tree on some unexpected branch after the # `brancher()`, but this could cause problems on exception handling # code which might expect the tree on which exception was raised to # stay in place. This behavior is a subject to change. for rev in revs: self.tree = scm.get_tree(rev) yield rev self.tree = saved_tree
dataversioncontrol/dvc
dvc/repo/brancher.py
Python
apache-2.0
1,700
# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import datetime import errno import glob import os import random import re import shutil import signal import threading import time import uuid import eventlet from eventlet import greenthread import fixtures from lxml import etree import mock from mox3 import mox from os_brick.initiator import connector from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import fileutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils import six from six.moves import builtins from six.moves import range from nova.api.metadata import base as instance_metadata from nova.compute import arch from nova.compute import cpumodel from nova.compute import manager from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states from nova import context from nova import db from nova import exception from nova.network import model as network_model from nova import objects from nova.objects import fields from nova.pci import manager as pci_manager from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network import nova.tests.unit.image.fake from nova.tests.unit import matchers from nova.tests.unit.objects import test_pci_device from nova.tests.unit.objects import test_vcpu_model from nova.tests.unit.virt.libvirt import fake_imagebackend from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt from nova import utils from nova import version from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt.disk import api as disk from nova.virt import driver from nova.virt import fake from nova.virt import firewall as base_firewall from nova.virt import hardware from nova.virt.image import model as imgmodel from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import firewall from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host from nova.virt.libvirt import imagebackend from nova.virt.libvirt.storage import dmcrypt from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import volume as volume_drivers libvirt_driver.libvirt = fakelibvirt host.libvirt = fakelibvirt libvirt_guest.libvirt = fakelibvirt CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('instances_path', 'nova.compute.manager') _fake_network_info = fake_network.fake_get_instance_nw_info _fake_NodeDevXml = \ {"pci_0000_04_00_3": """ <device> <name>pci_0000_04_00_3</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igb</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>0</slot> <function>3</function> <product id='0x1521'>I350 Gigabit Network Connection</product> <vendor id='0x8086'>Intel Corporation</vendor> <capability type='virt_functions'> <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/> <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/> <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/> <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/> </capability> </capability> </device>""", "pci_0000_04_10_7": """ <device> <name>pci_0000_04_10_7</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igbvf</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>16</slot> <function>7</function> <product id='0x1520'>I350 Ethernet Controller Virtual Function </product> <vendor id='0x8086'>Intel Corporation</vendor> <capability type='phys_function'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> <capability type='virt_functions'> </capability> </capability> </device>""", "pci_0000_04_11_7": """ <device> <name>pci_0000_04_11_7</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igbvf</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>17</slot> <function>7</function> <product id='0x1520'>I350 Ethernet Controller Virtual Function </product> <vendor id='0x8086'>Intel Corporation</vendor> <numa node='0'/> <capability type='phys_function'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> <capability type='virt_functions'> </capability> </capability> </device>"""} _fake_cpu_info = { "arch": "test_arch", "model": "test_model", "vendor": "test_vendor", "topology": { "sockets": 1, "cores": 8, "threads": 16 }, "features": ["feature1", "feature2"] } def _concurrency(signal, wait, done, target, is_block_dev=False): signal.send() wait.wait() done.send() class FakeVirDomainSnapshot(object): def __init__(self, dom=None): self.dom = dom def delete(self, flags): pass class FakeVirtDomain(object): def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None): if uuidstr is None: uuidstr = str(uuid.uuid4()) self.uuidstr = uuidstr self.id = id self.domname = name self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi, None, None] if fake_xml: self._fake_dom_xml = fake_xml else: self._fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> </devices> </domain> """ def name(self): if self.domname is None: return "fake-domain %s" % self else: return self.domname def ID(self): return self.id def info(self): return self._info def create(self): pass def managedSave(self, *args): pass def createWithFlags(self, launch_flags): pass def XMLDesc(self, flags): return self._fake_dom_xml def UUIDString(self): return self.uuidstr def attachDeviceFlags(self, xml, flags): pass def attachDevice(self, xml): pass def detachDeviceFlags(self, xml, flags): pass def snapshotCreateXML(self, xml, flags): pass def blockCommit(self, disk, base, top, bandwidth=0, flags=0): pass def blockRebase(self, disk, base, bandwidth=0, flags=0): pass def blockJobInfo(self, path, flags): pass def resume(self): pass def destroy(self): pass def fsFreeze(self, disks=None, flags=0): pass def fsThaw(self, disks=None, flags=0): pass class CacheConcurrencyTestCase(test.NoDBTestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # utils.synchronized() will create the lock_path for us if it # doesn't already exist. It will also delete it when it's done, # which can cause race conditions with the multiple threads we # use for tests. So, create the path here so utils.synchronized() # won't delete it out from under one of the threads. self.lock_path = os.path.join(CONF.instances_path, 'locks') fileutils.ensure_tree(self.lock_path) def fake_exists(fname): basedir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if fname == basedir or fname == self.lock_path: return True return False def fake_execute(*args, **kwargs): pass def fake_extend(image, size, use_cow=False): pass self.stubs.Set(os.path, 'exists', fake_exists) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(imagebackend.disk, 'extend', fake_extend) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def _fake_instance(self, uuid): return objects.Instance(id=1, uuid=uuid) def test_same_fname_concurrency(self): # Ensures that the same fname cache runs at a sequentially. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig2, wait=wait2, done=done2) wait2.send() eventlet.sleep(0) try: self.assertFalse(done2.ready()) finally: wait1.send() done1.wait() eventlet.sleep(0) self.assertTrue(done2.ready()) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() def test_different_fname_concurrency(self): # Ensures that two different fname caches are concurrent. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname2', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname1', None, signal=sig2, wait=wait2, done=done2) eventlet.sleep(0) # Wait for thread 2 to start. sig2.wait() wait2.send() tries = 0 while not done2.ready() and tries < 10: eventlet.sleep(0) tries += 1 try: self.assertTrue(done2.ready()) finally: wait1.send() eventlet.sleep(0) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() class FakeVolumeDriver(object): def __init__(self, *args, **kwargs): pass def attach_volume(self, *args): pass def detach_volume(self, *args): pass def get_xml(self, *args): return "" def get_config(self, *args): """Connect the volume to a fake device.""" conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "network" conf.source_protocol = "fake" conf.source_name = "fake" conf.target_dev = "fake" conf.target_bus = "fake" return conf def connect_volume(self, *args): """Connect the volume to a fake device.""" return self.get_config() class FakeConfigGuestDisk(object): def __init__(self, *args, **kwargs): self.source_type = None self.driver_cache = None class FakeConfigGuest(object): def __init__(self, *args, **kwargs): self.driver_cache = None class FakeNodeDevice(object): def __init__(self, fakexml): self.xml = fakexml def XMLDesc(self, flags): return self.xml def _create_test_instance(): flavor = objects.Flavor(memory_mb=2048, swap=0, vcpu_weight=None, root_gb=1, id=2, name=u'm1.small', ephemeral_gb=0, rxtx_factor=1.0, flavorid=u'1', vcpus=1, extra_specs={}) return { 'id': 1, 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310', 'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', 'display_name': "Acme webserver", 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'root_gb': 10, 'ephemeral_gb': 20, 'instance_type_id': '5', # m1.small 'extra_specs': {}, 'system_metadata': { 'image_disk_format': 'raw', }, 'flavor': flavor, 'new_flavor': None, 'old_flavor': None, 'pci_devices': objects.PciDeviceList(), 'numa_topology': None, 'config_drive': None, 'vm_mode': None, 'kernel_id': None, 'ramdisk_id': None, 'os_type': 'linux', 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb', 'ephemeral_key_uuid': None, 'vcpu_model': None, 'host': 'fake-host', } class LibvirtConnTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True _EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' % utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7]) def setUp(self): super(LibvirtConnTestCase, self).setUp() self.flags(fake_call=True) self.user_id = 'fake' self.project_id = 'fake' self.context = context.get_admin_context() temp_dir = self.useFixture(fixtures.TempDir()).path self.flags(instances_path=temp_dir) self.flags(snapshots_directory=temp_dir, group='libvirt') self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.flags(sysinfo_serial="hardware", group="libvirt") self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def fake_extend(image, size, use_cow=False): pass self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend) self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.test_instance = _create_test_instance() self.test_image_meta = { "disk_format": "raw", } self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self.stubs) self.device_xml_tmpl = """ <domain type='kvm'> <devices> <disk type='block' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source dev='{device_path}'/> <target bus='virtio' dev='vdb'/> <serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial> <address type='pci' domain='0x0' bus='0x0' slot='0x04' \ function='0x0'/> </disk> </devices> </domain> """ def relpath(self, path): return os.path.relpath(path, CONF.instances_path) def tearDown(self): nova.tests.unit.image.fake.FakeImageService_reset() super(LibvirtConnTestCase, self).tearDown() def test_driver_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr.capabilities['has_imagecache'], 'Driver capabilities for \'has_imagecache\' ' 'is invalid') self.assertTrue(drvr.capabilities['supports_recreate'], 'Driver capabilities for \'supports_recreate\' ' 'is invalid') self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'], 'Driver capabilities for ' '\'supports_migrate_to_same_host\' is invalid') def create_fake_libvirt_mock(self, **kwargs): """Defining mocks for LibvirtDriver(libvirt is not used).""" # A fake libvirt.virConnect class FakeLibvirtDriver(object): def defineXML(self, xml): return FakeVirtDomain() # Creating mocks volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver' '.FakeVolumeDriver'] fake = FakeLibvirtDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake) self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers', lambda x: volume_driver) self.stubs.Set(host.Host, 'get_connection', lambda x: fake) def fake_lookup(self, instance_name): return FakeVirtDomain() def fake_execute(self, *args, **kwargs): open(args[-1], "a").close() def _create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), 'disabled': kwargs.get('disabled', False), 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0} return objects.Service(**service_ref) def _get_pause_flag(self, drvr, network_info, power_on=True, vifs_already_plugged=False): timeout = CONF.vif_plugging_timeout events = [] if (drvr._conn_supports_start_paused and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): events = drvr._get_neutron_events(network_info) return bool(events) def test_public_api_signatures(self): baseinst = driver.ComputeDriver(None) inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(baseinst, inst) def test_legacy_block_device_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr.need_legacy_block_device_info) @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_ok(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_abort(self, mock_version): mock_version.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=utils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_deprecation_warning(self, mock_warning, mock_get_libversion): # Test that a warning is logged if the libvirt version is less than # the next required minimum version. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': '0.10.2'} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=utils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION)) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_ok(self, mock_warning, mock_get_libversion): # Test that a warning is not logged if the libvirt version is greater # than or equal to NEXT_MIN_LIBVIRT_VERSION. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': '0.10.2'} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertFalse(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=utils.convert_version_to_int( libvirt_driver.MIN_QEMU_S390_VERSION)) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_libvirt(self, mock_arch, mock_qemu_version, mock_lv_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION)) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=utils.convert_version_to_int( libvirt_driver.MIN_QEMU_S390_VERSION) - 1) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_qemu(self, mock_arch, mock_qemu_version, mock_lv_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION)) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=utils.convert_version_to_int( libvirt_driver.MIN_QEMU_S390_VERSION)) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_ok(self, mock_arch, mock_qemu_version, mock_lv_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("root", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) instance.os_type = "windows" mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with( "Administrator", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_image(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes", "os_admin_user": "foo" }} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("foo", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_set_admin_password_bad_version(self, mock_svc, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_bad_hyp(self, mock_svc, mock_image): self.flags(virt_type='foo', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_guest_agent_not_running(self, mock_svc): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.QemuGuestAgentNotEnabled, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_error(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.set_user_password.side_effect = ( fakelibvirt.libvirtError("error")) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.set_admin_password, instance, "123") @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable(self, mock_svc): # Tests disabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable(self, mock_svc): # Tests enabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable_state_enabled(self, mock_svc): # Tests enabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=False, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertFalse(svc.disabled) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable_state_disabled(self, mock_svc): # Tests disabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) def test_set_host_enabled_swallows_exceptions(self): # Tests that set_host_enabled will swallow exceptions coming from the # db_api code so they don't break anything calling it, e.g. the # _get_new_connection method. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(db, 'service_get_by_compute_host') as db_mock: # Make db.service_get_by_compute_host raise NovaException; this # is more robust than just raising ComputeHostNotFound. db_mock.side_effect = exception.NovaException drvr._set_host_enabled(False) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") def test_prepare_pci_device(self, mock_lookup): pci_devices = [dict(hypervisor_name='xxx')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) drvr._prepare_pci_devices_for_use(pci_devices) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") @mock.patch.object(fakelibvirt.virNodeDevice, "dettach") def test_prepare_pci_device_exception(self, mock_detach, mock_lookup): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) mock_detach.side_effect = fakelibvirt.libvirtError("xxxx") self.assertRaises(exception.PciDevicePrepareFailed, drvr._prepare_pci_devices_for_use, pci_devices) def test_detach_pci_devices_exception(self): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(host.Host, 'has_min_version') host.Host.has_min_version = lambda x, y: False self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, None, pci_devices) def test_detach_pci_devices(self): fake_domXML1 =\ """<domain> <devices> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='xxx'/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> <hostdev mode="subsystem" type="pci" managed="yes"> <source> <address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/> </source> </hostdev></devices></domain>""" pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid', address="0001:04:10:1")] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(host.Host, 'has_min_version') host.Host.has_min_version = lambda x, y: True self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_get_guest_pci_device') class FakeDev(object): def to_xml(self): pass libvirt_driver.LibvirtDriver._get_guest_pci_device =\ lambda x, y: FakeDev() class FakeDomain(object): def detachDeviceFlags(self, xml, flags): pci_devices[0]['hypervisor_name'] = 'marked' pass def XMLDesc(self, flags): return fake_domXML1 guest = libvirt_guest.Guest(FakeDomain()) drvr._detach_pci_devices(guest, pci_devices) self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked') def test_detach_pci_devices_timeout(self): fake_domXML1 =\ """<domain> <devices> <hostdev mode="subsystem" type="pci" managed="yes"> <source> <address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/> </source> </hostdev> </devices> </domain>""" pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid', address="0000:04:10:1")] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(host.Host, 'has_min_version') host.Host.has_min_version = lambda x, y: True self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_get_guest_pci_device') class FakeDev(object): def to_xml(self): pass libvirt_driver.LibvirtDriver._get_guest_pci_device =\ lambda x, y: FakeDev() class FakeDomain(object): def detachDeviceFlags(self, xml, flags): pass def XMLDesc(self, flags): return fake_domXML1 guest = libvirt_guest.Guest(FakeDomain()) self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, guest, pci_devices) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector(self, fake_get_connector): initiator = 'fake.initiator.iqn' ip = 'fakeip' host = 'fakehost' wwpns = ['100010604b019419'] wwnns = ['200010604b019419'] self.flags(my_ip=ip) self.flags(host=host) expected = { 'ip': ip, 'initiator': initiator, 'host': host, 'wwpns': wwpns, 'wwnns': wwnns } volume = { 'id': 'fake' } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertThat(expected, matchers.DictMatches(result)) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector_storage_ip(self, fake_get_connector): ip = '100.100.100.100' storage_ip = '101.101.101.101' self.flags(my_block_storage_ip=storage_ip, my_ip=ip) volume = { 'id': 'fake' } expected = { 'ip': storage_ip } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertEqual(storage_ip, result['ip']) def test_lifecycle_event_registration(self): calls = [] def fake_registerErrorHandler(*args, **kwargs): calls.append('fake_registerErrorHandler') def fake_get_host_capabilities(**args): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu calls.append('fake_get_host_capabilities') return caps @mock.patch.object(fakelibvirt, 'registerErrorHandler', side_effect=fake_registerErrorHandler) @mock.patch.object(host.Host, "get_capabilities", side_effect=fake_get_host_capabilities) def test_init_host(get_host_capabilities, register_error_handler): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("test_host") test_init_host() # NOTE(dkliban): Will fail if get_host_capabilities is called before # registerErrorHandler self.assertEqual(['fake_registerErrorHandler', 'fake_get_host_capabilities'], calls) def test_sanitize_log_to_xml(self): # setup fake data data = {'auth_password': 'scrubme'} bdm = [{'connection_info': {'data': data}}] bdi = {'block_device_mapping': bdm} # Tests that the parameters to the _get_guest_xml method # are sanitized for passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: self.assertNotIn('scrubme', args[0]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = mock.Mock() with contextlib.nested( mock.patch.object(libvirt_driver.LOG, 'debug', side_effect=fake_debug), mock.patch.object(drvr, '_get_guest_config', return_value=conf) ) as ( debug_mock, conf_mock ): drvr._get_guest_xml(self.context, self.test_instance, network_info={}, disk_info={}, image_meta={}, block_device_info=bdi) # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) @mock.patch.object(time, "time") def test_get_guest_config(self, time_mock): time_mock.return_value = 1234567.89 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) test_instance = copy.deepcopy(self.test_instance) test_instance["display_name"] = "purple tomatoes" ctxt = context.RequestContext(project_id=123, project_name="aubergine", user_id=456, user_name="pie") flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs={}) instance_ref = objects.Instance(**test_instance) instance_ref.flavor = flavor image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info, context=ctxt) self.assertEqual(cfg.uuid, instance_ref["uuid"]) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 6 * units.Ki) self.assertEqual(cfg.vcpus, 28) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(len(cfg.metadata), 1) self.assertIsInstance(cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance) self.assertEqual(version.version_string_with_package(), cfg.metadata[0].package) self.assertEqual("purple tomatoes", cfg.metadata[0].name) self.assertEqual(1234567.89, cfg.metadata[0].creationTime) self.assertEqual("image", cfg.metadata[0].roottype) self.assertEqual(str(instance_ref["image_ref"]), cfg.metadata[0].rootid) self.assertIsInstance(cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner) self.assertEqual(456, cfg.metadata[0].owner.userid) self.assertEqual("pie", cfg.metadata[0].owner.username) self.assertEqual(123, cfg.metadata[0].owner.projectid) self.assertEqual("aubergine", cfg.metadata[0].owner.projectname) self.assertIsInstance(cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor) self.assertEqual("m1.small", cfg.metadata[0].flavor.name) self.assertEqual(6, cfg.metadata[0].flavor.memory) self.assertEqual(28, cfg.metadata[0].flavor.vcpus) self.assertEqual(496, cfg.metadata[0].flavor.disk) self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral) self.assertEqual(33550336, cfg.metadata[0].flavor.swap) def test_get_guest_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_get_guest_config_lxc_with_id_maps(self): self.flags(virt_type='lxc', group='libvirt') self.flags(uid_maps=['0:1000:100'], group='libvirt') self.flags(gid_maps=['0:1000:100'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertEqual(len(cfg.idmaps), 2) self.assertIsInstance(cfg.idmaps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertIsInstance(cfg.idmaps[1], vconfig.LibvirtConfigGuestGIDMap) def test_get_guest_config_numa_host_instance_fits(self): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def test_get_guest_config_numa_host_instance_no_fit(self): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice') ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def _test_get_guest_memory_backing_config( self, host_topology, inst_topology, numatune): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object( drvr, "_get_host_numa_topology", return_value=host_topology): return drvr._get_guest_memory_backing_config( inst_topology, numatune) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_large_success(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertEqual(1, len(result.hugepages)) self.assertEqual(2048, result.hugepages[0].size_kb) self.assertEqual([0], result.hugepages[0].nodeset) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_smallest(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertIsNone(result) def test_get_guest_config_numa_host_instance_pci_no_numa_info(self): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=None) pci_device = objects.PciDevice(**pci_device_info) with contextlib.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device])): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def test_get_guest_config_numa_host_instance_2pci_no_fit(self): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=1) pci_device = objects.PciDevice(**pci_device_info) pci_device_info.update(numa_node=0, address='0000:00:00.2') pci_device2 = objects.PciDevice(**pci_device_info) with contextlib.nested( mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice'), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device, pci_device2]) ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock, pci_mock): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') @mock.patch.object(host.Host, 'get_capabilities') @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') def _test_get_guest_config_numa_unsupported(self, fake_lib_version, fake_version, fake_type, fake_arch, exception_class, pagesize, mock_host, mock_caps, mock_lib_version, mock_version, mock_type): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=pagesize)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = fake_arch caps.host.topology = self._fake_caps_numa_topology() mock_type.return_value = fake_type mock_version.return_value = fake_version mock_lib_version.return_value = fake_lib_version mock_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception_class, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_numa_old_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1, utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_bad_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( utils.convert_version_to_int( libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]), utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) @mock.patch.object(libvirt_driver.LOG, 'warn') def test_has_numa_support_bad_version_libvirt_log(self, mock_warn): # Tests that a warning is logged once and only once when there is a bad # BAD_LIBVIRT_NUMA_VERSIONS detected. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn')) with mock.patch.object(drvr._host, 'has_version', return_value=True): for i in xrange(2): self.assertFalse(drvr._has_numa_support()) self.assertTrue(drvr._bad_libvirt_numa_version_warn) self.assertEqual(1, mock_warn.call_count) # assert the version is logged properly self.assertEqual('1.2.9.2', mock_warn.call_args[0][1]) def test_get_guest_config_numa_old_version_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_other_arch_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.PPC64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_xen(self): self.flags(virt_type='xen', group='libvirt') self._test_get_guest_config_numa_unsupported( utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), utils.convert_version_to_int((4, 5, 0)), 'XEN', arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_pages_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1, utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.MemoryPagesUnsupported, 2048) def test_get_guest_config_numa_old_pages_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION), utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, 2048) def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))) ) as (has_min_version_mock, get_host_cap_mock, get_vcpu_pin_set_mock, get_online_cpus_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # NOTE(ndipanov): we make sure that pin_set was taken into account # when choosing viable cells self.assertEqual(set([2, 3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def test_get_guest_config_non_numa_host_instance_topo(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([2]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.numatune) self.assertIsNotNone(cfg.cpu.numa) for instance_cell, numa_cfg_cell in zip( instance_topology.cells, cfg.cpu.numa.cells): self.assertEqual(instance_cell.id, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) def test_get_guest_config_numa_host_instance_topo(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=None), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=None)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_reordered(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset) for index, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for index, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, cpu_pinning={0: 24, 1: 25}), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024, cpu_pinning={2: 0, 3: 1})]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology( sockets_per_cell=4, cores_per_socket=3, threads_per_core=2) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) # Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 24, 25]), cfg.cputune.emulatorpin.cpuset) for i, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(i, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for i, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(i, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_mempages_shared(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with contextlib.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_cpu_numa_config_from_instance(self): topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), ]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(topology, True) self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA) self.assertEqual(0, conf.cells[0].id) self.assertEqual(set([1, 2]), conf.cells[0].cpus) self.assertEqual(131072, conf.cells[0].memory) self.assertEqual("shared", conf.cells[0].memAccess) self.assertEqual(1, conf.cells[1].id) self.assertEqual(set([3, 4]), conf.cells[1].cpus) self.assertEqual(131072, conf.cells[1].memory) self.assertEqual("shared", conf.cells[1].memAccess) def test_get_cpu_numa_config_from_instance_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(None, False) self.assertIsNone(conf) @mock.patch.object(host.Host, 'has_version', return_value=True) def test_has_cpu_policy_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.CPUPinningNotSupported, drvr._has_cpu_policy_support) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_not_want_hugepages(self, mock_caps, mock_numa, mock_hp): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=4), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=4)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertFalse(drvr._wants_hugepages(None, None)) self.assertFalse(drvr._wants_hugepages(host_topology, None)) self.assertFalse(drvr._wants_hugepages(None, instance_topology)) self.assertFalse(drvr._wants_hugepages(host_topology, instance_topology)) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_want_hugepages(self, mock_caps, mock_numa, mock_hp): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertTrue(drvr._wants_hugepages(host_topology, instance_topology)) def test_get_guest_config_clock(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) hpet_map = { arch.X86_64: True, arch.I686: True, arch.PPC: False, arch.PPC64: False, arch.ARMV7: False, arch.AARCH64: False, } for guestarch, expect_hpet in hpet_map.items(): with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "utc") self.assertIsInstance(cfg.clock.timers[0], vconfig.LibvirtConfigGuestTimer) self.assertIsInstance(cfg.clock.timers[1], vconfig.LibvirtConfigGuestTimer) self.assertEqual(cfg.clock.timers[0].name, "pit") self.assertEqual(cfg.clock.timers[0].tickpolicy, "delay") self.assertEqual(cfg.clock.timers[1].name, "rtc") self.assertEqual(cfg.clock.timers[1].tickpolicy, "catchup") if expect_hpet: self.assertEqual(3, len(cfg.clock.timers)) self.assertIsInstance(cfg.clock.timers[2], vconfig.LibvirtConfigGuestTimer) self.assertEqual('hpet', cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) else: self.assertEqual(2, len(cfg.clock.timers)) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows(self, mock_version, mock_get_arch): mock_version.return_value = False mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch): mock_version.return_value = True mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) self.assertEqual("hypervclock", cfg.clock.timers[3].name) self.assertTrue(cfg.clock.timers[3].present) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature1(self, mock_version): def fake_version(lv_ver=None, hv_ver=None, hv_type=None): if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0): return True return False mock_version.side_effect = fake_version drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertFalse(cfg.features[2].spinlocks) self.assertFalse(cfg.features[2].vapic) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature2(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertTrue(cfg.features[2].spinlocks) self.assertEqual(8191, cfg.features[2].spinlock_retries) self.assertTrue(cfg.features[2].vapic) def test_get_guest_config_with_two_nics(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 2), image_meta, disk_info) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 2 * units.Mi) self.assertEqual(cfg.vcpus, 1) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_bug_1118829(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) disk_info = {'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': {u'vda': {'bus': 'virtio', 'type': 'disk', 'dev': u'vda'}, 'root': {'bus': 'virtio', 'type': 'disk', 'dev': 'vda'}}} # NOTE(jdg): For this specific test leave this blank # This will exercise the failed code path still, # and won't require fakes and stubs of the iscsi discovery block_device_info = {} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(instance_ref['root_device_name'], '/dev/vda') def test_get_guest_config_with_root_device_name(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = {'root_device_name': '/dev/vdb'} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(0, len(cfg.features)) self.assertEqual(cfg.memory, 2 * units.Mi) self.assertEqual(cfg.vcpus, 1) self.assertEqual(cfg.os_type, "uml") self.assertEqual(cfg.os_boot_dev, []) self.assertEqual(cfg.os_root, '/dev/vdb') self.assertEqual(len(cfg.devices), 3) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_get_guest_config_with_block_device(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': driver_block_device.convert_volumes([ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdd'}), ])} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdc') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_lxc_with_attached_volume(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': driver_block_device.convert_volumes([ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', }), fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'source_type': 'volume', 'destination_type': 'volume', }), ])} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info info['block_device_mapping'][2]['connection_info'] = conn_info info['block_device_mapping'][0]['mount_device'] = '/dev/vda' info['block_device_mapping'][1]['mount_device'] = '/dev/vdc' info['block_device_mapping'][2]['mount_device'] = '/dev/vdd' with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[1].target_dev, 'vdc') self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_with_configdrive(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # make configdrive.required_by() return True instance_ref['config_drive'] = True disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # The last device is selected for this. on x86 is the last ide # device (hdd). Since power only support scsi, the last device # is sdz expect = {"ppc": "sdz", "ppc64": "sdz"} disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd") self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, disk) def test_get_guest_config_with_virtio_scsi_bus(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, []) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[2].model, 'virtio-scsi') def test_get_guest_config_with_virtio_scsi_bus_bdm(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) conn_info = {'driver_volume_type': 'fake'} bd_info = { 'block_device_mapping': driver_block_device.convert_volumes([ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}), ])} bd_info['block_device_mapping'][0]['connection_info'] = conn_info bd_info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, bd_info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, [], bd_info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'sdc') self.assertEqual(cfg.devices[2].target_bus, 'scsi') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'sdd') self.assertEqual(cfg.devices[3].target_bus, 'scsi') self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[4].model, 'virtio-scsi') mock_save.assert_called_with() def test_get_guest_config_with_vnc(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "vnc") def test_get_guest_config_with_vnc_and_tablet(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_spice_and_tablet(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "spice") def test_get_guest_config_with_spice_and_agent(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") @mock.patch('nova.console.serial.acquire_port') @mock.patch('nova.virt.hardware.get_number_of_serial_ports', return_value=1) @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',) def test_create_serial_console_devices_based_on_arch(self, mock_get_arch, mock_get_port_number, mock_acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial, arch.S390: vconfig.LibvirtConfigGuestConsole, arch.S390X: vconfig.LibvirtConfigGuestConsole} for guest_arch, device_type in expected.items(): mock_get_arch.return_value = guest_arch guest = vconfig.LibvirtConfigGuest() drvr._create_serial_console_devices(guest, instance=None, flavor={}, image_meta={}) self.assertEqual(1, len(guest.devices)) console_device = guest.devices[0] self.assertIsInstance(console_device, device_type) self.assertEqual("tcp", console_device.type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console(self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.return_value = 11111 cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(8, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual(11111, cfg.devices[2].listen_port) def test_get_guest_config_serial_console_through_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) def test_get_guest_config_serial_console_invalid_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises( exception.ImageSerialPortNumberInvalid, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_serial_console_image_and_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_serial_port_count": "3"}}) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices), cfg.devices) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console_through_port_rng_exhausted( self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.side_effect = exception.SocketPortRangeExhaustedException( '127.0.0.1') self.assertRaises( exception.SocketPortRangeExhaustedException, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('bind', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 101), ('127.0.0.2', 100)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_connect_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('connect', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc, 'console') self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc, dev_name='serial'): xml = """ <domain type='kvm'> <devices> <%(dev_name)s type="tcp"> <source host="127.0.0.1" service="100" mode="connect"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.1" service="101" mode="bind"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.2" service="100" mode="bind"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.2" service="101" mode="connect"/> </%(dev_name)s> </devices> </domain>""" % {'dev_name': dev_name} mock_get_xml_desc.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) guest = libvirt_guest.Guest(FakeVirtDomain()) return drvr._get_serial_ports_from_guest(guest, mode=mode) def test_get_guest_config_with_type_xen(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 6) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[3].type, "vnc") self.assertEqual(cfg.devices[4].type, "xen") @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=arch.S390X) def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self._stub_host_capabilities_cpu_arch(arch.S390X) instance_ref = objects.Instance(**self.test_instance) cfg = self._get_guest_config_via_fake_api(instance_ref) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) log_file_device = cfg.devices[2] self.assertIsInstance(log_file_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclplm", log_file_device.target_type) self.assertEqual("file", log_file_device.type) terminal_device = cfg.devices[3] self.assertIsInstance(terminal_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclp", terminal_device.target_type) self.assertEqual("pty", terminal_device.type) self.assertEqual("s390-ccw-virtio", cfg.os_mach_type) def _stub_host_capabilities_cpu_arch(self, cpu_arch): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = cpu_arch caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) def _get_guest_config_via_fake_api(self, instance): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) return drvr._get_guest_config(instance, [], image_meta, disk_info) def test_get_guest_config_with_type_xen_pae_hvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['vm_mode'] = vm_mode.HVM image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureAPIC) def test_get_guest_config_with_type_xen_pae_pvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.XEN) self.assertEqual(1, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) def test_get_guest_config_with_vnc_and_spice(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[6].type, "vnc") self.assertEqual(cfg.devices[7].type, "spice") def test_get_guest_config_with_watchdog_action_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "none"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type, agent_enabled=False): self.flags(enabled=vnc_enabled, group='vnc') self.flags(enabled=spice_enabled, agent_enabled=agent_enabled, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) return drvr._get_guest_usb_tablet(os_type) def test_get_guest_usb_tablet_wipe(self): self.flags(use_usb_tablet=True, group='libvirt') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet(True, True, "foo") self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, True) self.assertIsNone(tablet) def _test_get_guest_config_with_watchdog_action_flavor(self, hw_watchdog_action="hw:watchdog_action"): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def test_get_guest_config_with_watchdog_action_through_flavor(self): self._test_get_guest_config_with_watchdog_action_flavor() # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action' # should be removed in the next release def test_get_guest_config_with_watchdog_action_through_flavor_no_scope( self): self._test_get_guest_config_with_watchdog_action_flavor( hw_watchdog_action="hw_watchdog_action") def test_get_guest_config_with_watchdog_overrides_flavor(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "pause"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("pause", cfg.devices[7].action) def test_get_guest_config_with_video_driver_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "vmvga"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[6].type, "vmvga") def test_get_guest_config_with_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "yes"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[7].type, "unix") self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0") def test_get_guest_config_with_video_driver_vram(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_unmount_fs_if_error_during_lxc_create_domain(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): """If we hit an error during a `_create_domain` call to `libvirt+lxc` we need to ensure the guest FS is unmounted from the host so that any future `lvremove` calls will work. """ self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.side_effect = exception.InstanceNotFound( instance_id='foo') drvr._conn.defineXML = mock.Mock() drvr._conn.defineXML.side_effect = ValueError('somethingbad') with contextlib.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, 'cleanup')): self.assertRaises(ValueError, drvr._create_domain_and_network, self.context, 'xml', mock_instance, None, None) mock_teardown.assert_called_with(container_dir='/tmp/rootfs') def test_video_driver_flavor_limit_not_set(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_video_driver_ram_above_flavor_limit(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') instance_ref = objects.Instance(**self.test_instance) instance_type = instance_ref.get_flavor() instance_type.extra_specs = {'hw_video:ram_max_mb': "50"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_without_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "no"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_rng_device(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) def test_get_guest_config_with_rng_not_allowed(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_with_rng_limits(self): self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True', 'hw_rng:rate_bytes': '1024', 'hw_rng:rate_period': '2'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertEqual(cfg.devices[6].rate_bytes, 1024) self.assertEqual(cfg.devices[6].rate_period, 2) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_backend(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng') self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_dev_not_present(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception.RngDeviceNotExist, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_guest_cpu_shares_with_multi_vcpu(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 4 image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(4096, cfg.cputune.shares) def test_get_guest_config_with_cpu_quota(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10000, cfg.cputune.shares) self.assertEqual(20000, cfg.cputune.period) def test_get_guest_config_with_bogus_cpu_quota(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(ValueError, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def _test_get_guest_config_sysinfo_serial(self, expected_serial): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) cfg = drvr._get_guest_config_sysinfo(instance_ref) self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo) self.assertEqual(version.vendor_string(), cfg.system_manufacturer) self.assertEqual(version.product_string(), cfg.system_product) self.assertEqual(version.version_string_with_package(), cfg.system_version) self.assertEqual(expected_serial, cfg.system_serial) self.assertEqual(instance_ref['uuid'], cfg.system_uuid) self.assertEqual("Virtual Machine", cfg.system_family) def test_get_guest_config_sysinfo_serial_none(self): self.flags(sysinfo_serial="none", group="libvirt") self._test_get_guest_config_sysinfo_serial(None) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid): self.flags(sysinfo_serial="hardware", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) @contextlib.contextmanager def patch_exists(self, result): real_exists = os.path.exists def fake_exists(filename): if filename == "/etc/machine-id": return result return real_exists(filename) with mock.patch.object(os.path, "exists") as mock_exists: mock_exists.side_effect = fake_exists yield mock_exists def test_get_guest_config_sysinfo_serial_os(self): self.flags(sysinfo_serial="os", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" with contextlib.nested( mock.patch('__builtin__.open', mock.mock_open(read_data=theuuid)), self.patch_exists(True)): self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self): self.flags(sysinfo_serial="os", group="libvirt") with contextlib.nested( mock.patch('__builtin__.open', mock.mock_open(read_data="")), self.patch_exists(True)): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self): self.flags(sysinfo_serial="os", group="libvirt") with self.patch_exists(False): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_auto_hardware(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists with contextlib.nested( mock.patch.object(os.path, "exists"), mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") ) as (mock_exists, mock_uuid): def fake_exists(filename): if filename == "/etc/machine-id": return False return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_auto_os(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists real_open = builtins.open with contextlib.nested( mock.patch.object(os.path, "exists"), mock.patch.object(builtins, "open"), ) as (mock_exists, mock_open): def fake_exists(filename): if filename == "/etc/machine-id": return True return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" def fake_open(filename, *args, **kwargs): if filename == "/etc/machine-id": h = mock.MagicMock() h.read.return_value = theuuid h.__enter__.return_value = h return h return real_open(filename, *args, **kwargs) mock_open.side_effect = fake_open self._test_get_guest_config_sysinfo_serial(theuuid) def _create_fake_service_compute(self): service_info = { 'id': 1729, 'host': 'fake', 'report_count': 0 } service_ref = objects.Service(**service_info) compute_info = { 'id': 1729, 'vcpus': 2, 'memory_mb': 1024, 'local_gb': 2048, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'free_ram_mb': 1024, 'free_disk_gb': 2048, 'hypervisor_type': 'xen', 'hypervisor_version': 1, 'running_vms': 0, 'cpu_info': '', 'current_workload': 0, 'service_id': service_ref['id'], 'host': service_ref['host'] } compute_ref = objects.ComputeNode(**compute_info) return (service_ref, compute_ref) def test_get_guest_config_with_pci_passthrough_kvm(self): self.flags(virt_type='kvm', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.1', compute_id=compute_ref['id'], instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'yes') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "1") self.assertEqual(had_pci, 1) def test_get_guest_config_with_pci_passthrough_xen(self): self.flags(virt_type='xen', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.2', compute_id=compute_ref['id'], instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'no') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "2") self.assertEqual(had_pci, 1) def test_get_guest_config_os_command_line_through_image_meta(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertEqual(cfg.os_cmdline, "fake_os_command_line") def test_get_guest_config_os_command_line_without_kernel_id(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsNone(cfg.os_cmdline) def test_get_guest_config_os_command_empty(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": ""}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by # default, so testing an empty string and None value in the # os_command_line image property must pass cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertNotEqual(cfg.os_cmdline, "") def test_get_guest_config_armv7(self): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "vexpress-a15") def test_get_guest_config_aarch64(self): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.AARCH64 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "virt") def test_get_guest_config_machine_type_s390(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigGuestCPU() image_meta = objects.ImageMeta.from_dict(self.test_image_meta) host_cpu_archs = (arch.S390, arch.S390X) for host_cpu_arch in host_cpu_archs: caps.host.cpu.arch = host_cpu_arch os_mach_type = drvr._get_machine_type(image_meta, caps) self.assertEqual('s390-ccw-virtio', os_mach_type) def test_get_guest_config_machine_type_through_image_meta(self): self.flags(virt_type="kvm", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_machine_type": "fake_machine_type"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def test_get_guest_config_machine_type_from_config(self): self.flags(virt_type='kvm', group='libvirt') self.flags(hw_machine_type=['x86_64=fake_machine_type'], group='libvirt') def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Penryn</model> <vendor>Intel</vendor> <feature policy='require' name='xtpr'/> </cpu> """ # Make sure the host arch is mocked as x86_64 self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities, baselineCPU=fake_baselineCPU, getVersion=lambda: 1005001) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def _test_get_guest_config_ppc64(self, device_index): """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config. """ self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) expected = (arch.PPC64, arch.PPC) for guestarch in expected: with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[device_index], vconfig.LibvirtConfigGuestVideo) self.assertEqual(cfg.devices[device_index].type, 'vga') def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self): self.flags(enabled=True, group='vnc') self._test_get_guest_config_ppc64(6) def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self): self.flags(enabled=True, agent_enabled=True, group='spice') self._test_get_guest_config_ppc64(8) def _test_get_guest_config_bootmenu(self, image_meta, extra_specs): self.flags(virt_type='kvm', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = extra_specs disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertTrue(conf.os_bootmenu) def test_get_guest_config_bootmenu_via_image_meta(self): image_meta = objects.ImageMeta.from_dict( {"disk_format": "raw", "properties": {"hw_boot_menu": "True"}}) self._test_get_guest_config_bootmenu(image_meta, {}) def test_get_guest_config_bootmenu_via_extra_specs(self): image_meta = objects.ImageMeta.from_dict( self.test_image_meta) self._test_get_guest_config_bootmenu(image_meta, {'hw:boot_menu': 'True'}) def test_get_guest_cpu_config_none(self): self.flags(cpu_mode="none", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertIsNone(conf.cpu.mode) self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_kvm(self): self.flags(virt_type="kvm", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_uml(self): self.flags(virt_type="uml", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_default_lxc(self): self.flags(virt_type="lxc", cpu_mode=None, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_host_passthrough(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-passthrough", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-passthrough") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_host_model(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-model", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_custom(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="custom", cpu_model="Penryn", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "custom") self.assertEqual(conf.cpu.model, "Penryn") self.assertEqual(conf.cpu.sockets, 1) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_get_guest_cpu_config_numa_topology(self, mock_has_min_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 2 instance_ref.numa_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=0, cpuset=set([0, 1]), memory=1024, cpu_pinning={})]) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertIsNone(instance_ref.numa_topology.cells[0].cpu_topology) drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) topo = instance_ref.numa_topology.cells[0].cpu_topology self.assertIsNotNone(topo) self.assertEqual(topo.cores * topo.sockets * topo.threads, instance_ref.flavor.vcpus) def test_get_guest_cpu_topology(self): instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 8 instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertEqual(conf.cpu.sockets, 4) self.assertEqual(conf.cpu.cores, 2) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_memory_balloon_config_by_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_disable(self): self.flags(mem_stats_period_seconds=0, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_get_guest_memory_balloon_config_period_value(self): self.flags(mem_stats_period_seconds=21, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(21, device.period) def test_get_guest_memory_balloon_config_qemu(self): self.flags(virt_type='qemu', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_xen(self): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('xen', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.HVM}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=True) def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.XEN}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=False, xen_only=True) def test_xml_and_uri_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False) def test_xml_and_uri_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True) def test_xml_and_uri_rescue(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False, rescue=instance_data) def test_xml_uuid(self): self._check_xml_and_uuid(self.test_image_meta) def test_lxc_container_and_uri(self): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) def test_xml_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, None) def test_xml_user_specified_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, 'sd') def test_xml_disk_driver(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_driver(instance_data) def test_xml_disk_bus_virtio(self): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self._check_xml_and_disk_bus(image_meta, None, (("disk", "virtio", "vda"),)) def test_xml_disk_bus_ide(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda")} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, None, (expec_val,)) def test_xml_disk_bus_ide_and_virtio(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda")} swap = {'device_name': '/dev/vdc', 'swap_size': 1} ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'size': 1}] block_device_info = { 'swap': swap, 'ephemerals': ephemerals} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, block_device_info, (expec_val, ("disk", "virtio", "vdb"), ("disk", "virtio", "vdc"))) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instances(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) names = drvr.list_instances() self.assertEqual(names[0], vm1.name()) self.assertEqual(names[1], vm2.name()) self.assertEqual(names[2], vm3.name()) self.assertEqual(names[3], vm4.name()) mock_list.assert_called_with(only_running=False) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instance_uuids(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) uuids = drvr.list_instance_uuids() self.assertEqual(len(uuids), 4) self.assertEqual(uuids[0], vm1.UUIDString()) self.assertEqual(uuids[1], vm2.UUIDString()) self.assertEqual(uuids[2], vm3.UUIDString()) self.assertEqual(uuids[3], vm4.UUIDString()) mock_list.assert_called_with(only_running=False) @mock.patch.object(host.Host, "list_instance_domains") def test_get_all_block_devices(self, mock_list): xml = [ """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> </disk> </devices> </domain> """, """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> </devices> </domain> """, """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <disk type='block'> <source dev='/path/to/dev/3'/> </disk> </devices> </domain> """, ] mock_list.return_value = [ FakeVirtDomain(xml[0], id=3, name="instance00000001"), FakeVirtDomain(xml[1], id=1, name="instance00000002"), FakeVirtDomain(xml[2], id=5, name="instance00000003")] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) devices = drvr._get_all_block_devices() self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3']) mock_list.assert_called_with() @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-5") get_online_cpus.return_value = set([4, 5, 6]) expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_out_of_range(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-6") get_online_cpus.return_value = set([4, 5]) self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="4-6") get_online_cpus.side_effect = not_supported_exc self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="1") get_online_cpus.side_effect = not_supported_exc expected_vcpus = 1 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_cpu_count') def test_get_host_vcpus_after_hotplug(self, get_cpu_count): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) get_cpu_count.return_value = 2 expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) get_cpu_count.return_value = 3 expected_vcpus = 3 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_quiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) img_meta = {"properties": {"hw_qemu_guest_agent": "yes", "os_require_quiesce": "yes"}} self.assertIsNone(drvr.quiesce(self.context, instance, img_meta)) mock_fsfreeze.assert_called_once_with() def test_quiesce_not_supported(self): self.create_fake_libvirt_mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstanceQuiesceNotSupported, drvr.quiesce, self.context, instance, None) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_unquiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005, lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) img_meta = {"properties": {"hw_qemu_guest_agent": "yes", "os_require_quiesce": "yes"}} self.assertIsNone(drvr.unquiesce(self.context, instance, img_meta)) mock_fsthaw.assert_called_once_with() def test_create_snapshot_metadata(self): base = objects.ImageMeta.from_dict( {'disk_format': 'raw'}) instance_data = {'kernel_id': 'kernel', 'project_id': 'prj_id', 'ramdisk_id': 'ram_id', 'os_type': None} instance = objects.Instance(**instance_data) img_fmt = 'raw' snp_name = 'snapshot_name' drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) expected = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'], }, 'disk_format': img_fmt, 'container_format': 'bare', } self.assertEqual(ret, expected) # simulate an instance with os_type field defined # disk format equals to ami # container format not equals to bare instance['os_type'] = 'linux' base = objects.ImageMeta.from_dict( {'disk_format': 'ami', 'container_format': 'test_container'}) expected['properties']['os_type'] = instance['os_type'] expected['disk_format'] = base.disk_format expected['container_format'] = base.container_format ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) self.assertEqual(ret, expected) def test_get_volume_driver(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} driver = conn._get_volume_driver(connection_info) result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver) self.assertTrue(result) def test_get_volume_driver_unknown(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'unknown', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} self.assertRaises( exception.VolumeDriverNotFound, conn._get_volume_driver, connection_info ) @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'connect_volume') @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config') def test_get_volume_config(self, get_config, connect_volume): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_config = mock.MagicMock() get_config.return_value = mock_config config = drvr._get_volume_config(connection_info, disk_info) get_config.assert_called_once_with(connection_info, disk_info) self.assertEqual(mock_config, config) def test_attach_invalid_volume_type(self): self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.VolumeDriverNotFound, drvr.attach_volume, None, {"driver_volume_type": "badtype"}, instance, "/dev/sda") def test_attach_blockio_invalid_hypervisor(self): self.flags(virt_type='fake_type', group='libvirt') self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidHypervisorType, drvr.attach_volume, None, {"driver_volume_type": "fake", "data": {"logical_block_size": "4096", "physical_block_size": "4096"} }, instance, "/dev/sda") @mock.patch.object(fakelibvirt.virConnect, "getLibVersion") def test_attach_blockio_invalid_version(self, mock_version): mock_version.return_value = (0 * 1000 * 1000) + (9 * 1000) + 8 self.flags(virt_type='qemu', group='libvirt') self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.Invalid, drvr.attach_volume, None, {"driver_volume_type": "fake", "data": {"logical_block_size": "4096", "physical_block_size": "4096"} }, instance, "/dev/sda") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_attach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain, mock_get_info, get_image): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = {} get_image.return_value = image_meta mock_dom = mock.MagicMock() mock_get_domain.return_value = mock_dom connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_get_info.return_value = disk_info mock_conf = mock.MagicMock() flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with contextlib.nested( mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode') ) as (mock_connect_volume, mock_get_volume_config, mock_set_cache_mode): for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] drvr.attach_volume(self.context, connection_info, instance, "/dev/vdb", disk_bus=bdm['disk_bus'], device_type=bdm['device_type']) mock_get_domain.assert_called_with(instance) mock_get_info.assert_called_with( instance, CONF.libvirt.virt_type, test.MatchType(objects.ImageMeta), bdm) mock_connect_volume.assert_called_with( connection_info, disk_info) mock_get_volume_config.assert_called_with( connection_info, disk_info) mock_set_cache_mode.assert_called_with(mock_conf) mock_dom.attachDeviceFlags.assert_called_with( mock_conf.to_xml(), flags=flags) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml = """<domain> <devices> <disk type='file'> <source file='/path/to/fake-volume'/> <target dev='vdc' bus='virtio'/> </disk> </devices> </domain>""" mock_dom = mock.MagicMock() mock_dom.XMLDesc.return_value = mock_xml connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with mock.patch.object(drvr, '_disconnect_volume') as \ mock_disconnect_volume: for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom drvr.detach_volume(connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_with(instance) mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk"> <source file="/path/to/fake-volume"/> <target bus="virtio" dev="vdc"/> </disk> """, flags=flags) mock_disconnect_volume.assert_called_with( connection_info, 'vdc') def test_multi_nic(self): network_info = _fake_network_info(self.stubs, 2) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) interfaces = tree.findall("./devices/interface") self.assertEqual(len(interfaces), 2) self.assertEqual(interfaces[0].get('type'), 'bridge') def _behave_supports_direct_io(self, raise_open=False, raise_write=False, exc=ValueError()): open_behavior = os.open(os.path.join('.', '.directio.test'), os.O_CREAT | os.O_WRONLY | os.O_DIRECT) if raise_open: open_behavior.AndRaise(exc) else: open_behavior.AndReturn(3) write_bahavior = os.write(3, mox.IgnoreArg()) if raise_write: write_bahavior.AndRaise(exc) else: os.close(3) os.unlink(3) def test_supports_direct_io(self): # O_DIRECT is not supported on all Python runtimes, so on platforms # where it's not supported (e.g. Mac), we can still test the code-path # by stubbing out the value. if not hasattr(os, 'O_DIRECT'): # `mock` seems to have trouble stubbing an attr that doesn't # originally exist, so falling back to stubbing out the attribute # directly. os.O_DIRECT = 16384 self.addCleanup(delattr, os, 'O_DIRECT') einval = OSError() einval.errno = errno.EINVAL self.mox.StubOutWithMock(os, 'open') self.mox.StubOutWithMock(os, 'write') self.mox.StubOutWithMock(os, 'close') self.mox.StubOutWithMock(os, 'unlink') _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io self._behave_supports_direct_io() self._behave_supports_direct_io(raise_write=True) self._behave_supports_direct_io(raise_open=True) self._behave_supports_direct_io(raise_write=True, exc=einval) self._behave_supports_direct_io(raise_open=True, exc=einval) self.mox.ReplayAll() self.assertTrue(_supports_direct_io('.')) self.assertRaises(ValueError, _supports_direct_io, '.') self.assertRaises(ValueError, _supports_direct_io, '.') self.assertFalse(_supports_direct_io('.')) self.assertFalse(_supports_direct_io('.')) self.mox.VerifyAll() def _check_xml_and_container(self, instance): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), 'lxc:///') network_info = _fake_network_info(self.stubs, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) check = [ (lambda t: t.find('.').get('type'), 'lxc'), (lambda t: t.find('./os/type').text, 'exe'), (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')] for i, (check, expected_result) in enumerate(check): self.assertEqual(check(tree), expected_result, '%s failed common check %d' % (xml, i)) target = tree.find('./devices/filesystem/source').get('dir') self.assertTrue(len(target) > 0) def _check_xml_and_disk_prefix(self, instance, prefix): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) def _get_prefix(p, default): if p: return p + 'a' return default type_disk_map = { 'qemu': [ (lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'xen': [ (lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'xvda'))], 'kvm': [ (lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'uml': [ (lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'ubda'))] } for (virt_type, checks) in six.iteritems(type_disk_map): self.flags(virt_type=virt_type, group='libvirt') if prefix: self.flags(disk_prefix=prefix, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) network_info = _fake_network_info(self.stubs, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) def _check_xml_and_disk_driver(self, image_meta): os_open = os.open directio_supported = True def os_open_stub(path, flags, *args, **kwargs): if flags & os.O_DIRECT: if not directio_supported: raise OSError(errno.EINVAL, '%s: %s' % (os.strerror(errno.EINVAL), path)) flags &= ~os.O_DIRECT return os_open(path, flags, *args, **kwargs) self.stubs.Set(os, 'open', os_open_stub) @staticmethod def connection_supports_direct_io_stub(dirpath): return directio_supported self.stubs.Set(libvirt_driver.LibvirtDriver, '_supports_direct_io', connection_supports_direct_io_stub) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self.stubs, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "none") directio_supported = False # The O_DIRECT availability is cached on first use in # LibvirtDriver, hence we re-create it here drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "writethrough") def _check_xml_and_disk_bus(self, image_meta, block_device_info, wantConfig): instance_ref = objects.Instance(**self.test_instance) network_info = _fake_network_info(self.stubs, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, block_device_info=block_device_info) tree = etree.fromstring(xml) got_disks = tree.findall('./devices/disk') got_disk_targets = tree.findall('./devices/disk/target') for i in range(len(wantConfig)): want_device_type = wantConfig[i][0] want_device_bus = wantConfig[i][1] want_device_dev = wantConfig[i][2] got_device_type = got_disks[i].get('device') got_device_bus = got_disk_targets[i].get('bus') got_device_dev = got_disk_targets[i].get('dev') self.assertEqual(got_device_type, want_device_type) self.assertEqual(got_device_bus, want_device_bus) self.assertEqual(got_device_dev, want_device_dev) def _check_xml_and_uuid(self, image_meta): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self.stubs, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) self.assertEqual(tree.find('./uuid').text, instance_ref['uuid']) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware",) def _check_xml_and_uri(self, instance, mock_serial, expect_ramdisk=False, expect_kernel=False, rescue=None, expect_xen_hvm=False, xen_only=False): mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686" instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) xen_vm_mode = vm_mode.XEN if expect_xen_hvm: xen_vm_mode = vm_mode.HVM type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'kvm': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./os/type').text, vm_mode.UML)]), 'xen': ('xen:///', [(lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./os/type').text, xen_vm_mode)])} if expect_xen_hvm or xen_only: hypervisors_to_check = ['xen'] else: hypervisors_to_check = ['qemu', 'kvm', 'xen'] for hypervisor_type in hypervisors_to_check: check_list = type_uri_map[hypervisor_type][1] if rescue: suffix = '.rescue' else: suffix = '' if expect_kernel: check = (lambda t: self.relpath(t.find('./os/kernel').text). split('/')[1], 'kernel' + suffix) else: check = (lambda t: t.find('./os/kernel'), None) check_list.append(check) if expect_kernel: check = (lambda t: "no_timer_check" in t.find('./os/cmdline'). text, hypervisor_type == "qemu") check_list.append(check) # Hypervisors that only support vm_mode.HVM and Xen # should not produce configuration that results in kernel # arguments if not expect_kernel and (hypervisor_type in ['qemu', 'kvm', 'xen']): check = (lambda t: t.find('./os/root'), None) check_list.append(check) check = (lambda t: t.find('./os/cmdline'), None) check_list.append(check) if expect_ramdisk: check = (lambda t: self.relpath(t.find('./os/initrd').text). split('/')[1], 'ramdisk' + suffix) else: check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: xpath = "./sysinfo/system/entry" check = (lambda t: t.findall(xpath)[0].get("name"), "manufacturer") check_list.append(check) check = (lambda t: t.findall(xpath)[0].text, version.vendor_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[1].get("name"), "product") check_list.append(check) check = (lambda t: t.findall(xpath)[1].text, version.product_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[2].get("name"), "version") check_list.append(check) # NOTE(sirp): empty strings don't roundtrip in lxml (they are # converted to None), so we need an `or ''` to correct for that check = (lambda t: t.findall(xpath)[2].text or '', version.version_string_with_package()) check_list.append(check) check = (lambda t: t.findall(xpath)[3].get("name"), "serial") check_list.append(check) check = (lambda t: t.findall(xpath)[3].text, "cef19ce0-0ca2-11df-855d-b19fbce37686") check_list.append(check) check = (lambda t: t.findall(xpath)[4].get("name"), "uuid") check_list.append(check) check = (lambda t: t.findall(xpath)[4].text, instance['uuid']) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: check = (lambda t: t.findall('./devices/serial')[0].get( 'type'), 'file') check_list.append(check) check = (lambda t: t.findall('./devices/serial')[1].get( 'type'), 'pty') check_list.append(check) check = (lambda t: self.relpath(t.findall( './devices/serial/source')[0].get('path')). split('/')[1], 'console.log') check_list.append(check) else: check = (lambda t: t.find('./devices/console').get( 'type'), 'pty') check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), (lambda t: t.find('./memory').text, '2097152')] if rescue: common_checks += [ (lambda t: self.relpath(t.findall('./devices/disk/source')[0]. get('file')).split('/')[1], 'disk.rescue'), (lambda t: self.relpath(t.findall('./devices/disk/source')[1]. get('file')).split('/')[1], 'disk')] else: common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[0].get('file')).split('/')[1], 'disk')] common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[1].get('file')).split('/')[1], 'disk.local')] for virt_type in hypervisors_to_check: expected_uri = type_uri_map[virt_type][0] checks = type_uri_map[virt_type][1] self.flags(virt_type=virt_type, group='libvirt') with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), expected_uri) network_info = _fake_network_info(self.stubs, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, rescue=rescue) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, rescue=rescue) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, '%s != %s failed common check %d' % (check(tree), expected_result, i)) filterref = './devices/interface/filterref' vif = network_info[0] nic_id = vif['address'].replace(':', '') fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), drvr) instance_filter_name = fw._instance_filter_name(instance_ref, nic_id) self.assertEqual(tree.find(filterref).get('filter'), instance_filter_name) # This test is supposed to make sure we don't # override a specifically set uri # # Deliberately not just assigning this string to CONF.connection_uri # and checking against that later on. This way we make sure the # implementation doesn't fiddle around with the CONF. testuri = 'something completely different' self.flags(connection_uri=testuri, group='libvirt') for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map): self.flags(virt_type=virt_type, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), testuri) def test_ensure_filtering_rules_for_instance_timeout(self): # ensure_filtering_fules_for_instance() finishes with timeout. # Preparing mocks def fake_none(self, *args): return class FakeTime(object): def __init__(self): self.counter = 0 def sleep(self, t): self.counter += t fake_timer = FakeTime() def fake_sleep(t): fake_timer.sleep(t) # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self.stubs, 1) self.create_fake_libvirt_mock() instance_ref = objects.Instance(**self.test_instance) # Start test self.mox.ReplayAll() try: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(drvr.firewall_driver, 'instance_filter_exists', fake_none) self.stubs.Set(greenthread, 'sleep', fake_sleep) drvr.ensure_filtering_rules_for_instance(instance_ref, network_info) except exception.NovaException as e: msg = ('The firewall filter for %s does not exist' % instance_ref['name']) c1 = (0 <= six.text_type(e).find(msg)) self.assertTrue(c1) self.assertEqual(29, fake_timer.counter, "Didn't wait the expected " "amount of time") @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_with_block_migration( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, True) self.assertThat({"filename": "file", 'image_type': 'default', 'disk_available_mb': 409600, "disk_over_commit": False, "block_migration": True}, matchers.DictMatches(return_value)) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_no_block_migration( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, False) self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": None}, matchers.DictMatches(return_value)) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file', return_value='fake') @mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu') def test_check_can_live_migrate_guest_cpu_none_model( self, mock_cpu, mock_test_file): # Tests that when instance.vcpu_model.model is None, the host cpu # model is used for live migration. instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel instance_ref.vcpu_model.model = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf'} result = drvr.check_can_live_migrate_destination( self.context, instance_ref, compute_info, compute_info) mock_cpu.assert_called_once_with(None, 'asdf') expected_result = {"filename": 'fake', "image_type": CONF.libvirt.images_type, "block_migration": False, "disk_over_commit": False, "disk_available_mb": None} self.assertDictEqual(expected_result, result) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_no_instance_cpu_info( self, mock_cpu, mock_test_file): instance_ref = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': jsonutils.dumps({ "vendor": "AMD", "arch": arch.I686, "features": ["sse3"], "model": "Opteron_G3", "topology": {"cores": 2, "threads": 1, "sockets": 4} })} filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, compute_info, compute_info, False) self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": None}, matchers.DictMatches(return_value)) @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_incompatible_cpu_raises( self, mock_cpu): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf'} mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo') self.assertRaises(exception.InvalidCPUInfo, drvr.check_can_live_migrate_destination, self.context, instance_ref, compute_info, compute_info, False) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare): mock_compare.return_value = 5 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info)) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_handles_not_supported_error_gracefully(self, mock_vconfig, mock_compare): not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virCompareCPU', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_compare.side_effect = not_supported_exc conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info)) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt.LibvirtDriver, '_vcpu_model_to_cpu_config') def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu, mock_compare): mock_compare.return_value = 6 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None) self.assertIsNone(ret) def test_compare_cpu_virt_type_xen(self): self.flags(virt_type='xen', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig, mock_compare): mock_compare.return_value = 0 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidCPUInfo, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info)) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig, mock_compare): mock_compare.side_effect = fakelibvirt.libvirtError('cpu') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationPreCheckError, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info)) def test_check_can_live_migrate_dest_cleanup_works_correctly(self): objects.Instance(**self.test_instance) dest_check_data = {"filename": "file", "block_migration": True, "disk_over_commit": False, "disk_available_mb": 1024} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file') drvr._cleanup_shared_storage_test_file("file") self.mox.ReplayAll() drvr.check_can_live_migrate_destination_cleanup(self.context, dest_check_data) def _mock_can_live_migrate_source(self, block_migration=False, is_shared_block_storage=False, is_shared_instance_path=False, is_booted_from_volume=False, disk_available_mb=1024, block_device_info=None, block_device_text=None): instance = objects.Instance(**self.test_instance) dest_check_data = {'filename': 'file', 'image_type': 'default', 'block_migration': block_migration, 'disk_over_commit': False, 'disk_available_mb': disk_available_mb} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_is_shared_block_storage') drvr._is_shared_block_storage(instance, dest_check_data, block_device_info).AndReturn(is_shared_block_storage) self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file') drvr._check_shared_storage_test_file('file').AndReturn( is_shared_instance_path) self.mox.StubOutWithMock(drvr, "get_instance_disk_info") drvr.get_instance_disk_info(instance, block_device_info=block_device_info).\ AndReturn(block_device_text) self.mox.StubOutWithMock(drvr, '_is_booted_from_volume') drvr._is_booted_from_volume(instance, block_device_text).AndReturn( is_booted_from_volume) return (instance, dest_check_data, drvr) def test_check_can_live_migrate_source_block_migration(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True) self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk") drvr._assert_dest_node_has_enough_disk( self.context, instance, dest_check_data['disk_available_mb'], False, None) self.mox.ReplayAll() ret = drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) self.assertIsInstance(ret, dict) self.assertIn('is_shared_block_storage', ret) self.assertIn('is_shared_instance_path', ret) self.assertEqual(ret['is_shared_instance_path'], ret['is_shared_storage']) def test_check_can_live_migrate_source_shared_block_storage(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_block_storage=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_instance_path(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_instance_path=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_non_shared_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_block_storage=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_shared_path_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_instance_path=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, None) def test_check_can_live_migrate_non_shared_non_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_with_dest_not_enough_disk(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, disk_available_mb=0) drvr.get_instance_disk_info(instance, block_device_info=None).AndReturn( '[{"virt_disk_size":2}]') self.mox.ReplayAll() self.assertRaises(exception.MigrationError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[]') self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume_with_swap(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[{"path":"disk.swap"}]') self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def _is_shared_block_storage_test_create_mocks(self, disks): # Test data instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>{}</devices></domain>") disks_xml = '' for dsk in disks: if dsk['type'] is not 'network': disks_xml = ''.join([disks_xml, "<disk type='{type}'>" "<driver name='qemu' type='{driver}'/>" "<source {source}='{source_path}'/>" "<target dev='{target_dev}' bus='virtio'/>" "</disk>".format(**dsk)]) else: disks_xml = ''.join([disks_xml, "<disk type='{type}'>" "<driver name='qemu' type='{driver}'/>" "<source protocol='{source_proto}'" "name='{source_image}' >" "<host name='hostname' port='7000'/>" "<config file='/path/to/file'/>" "</source>" "<target dev='{target_dev}'" "bus='ide'/>".format(**dsk)]) # Preparing mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.XMLDesc = mock.Mock() mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml)) mock_lookup = mock.Mock() def mock_lookup_side_effect(name): return mock_virDomain mock_lookup.side_effect = mock_lookup_side_effect mock_getsize = mock.Mock() mock_getsize.return_value = "10737418240" return (mock_getsize, mock_lookup) def test_is_shared_block_storage_rbd(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_instance_disk_info = mock.Mock() with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._is_shared_block_storage(instance, {'image_type': 'rbd'}, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_lvm(self): self.flags(images_type='lvm', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, {'image_type': 'lvm'}, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_qcow2(self): self.flags(images_type='qcow2', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, {'image_type': 'qcow2'}, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_source(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, {'is_shared_instance_path': False}, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_dest(self): bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, {'image_type': 'rbd', 'is_shared_instance_path': False}, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_volume_backed(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) with mock.patch.object(host.Host, 'get_domain', mock_lookup): self.assertTrue(drvr._is_shared_block_storage(instance, {'is_volume_backed': True, 'is_shared_instance_path': False}, block_device_info = bdi)) mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_volume_backed_with_disk(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}, {'type': 'file', 'driver': 'raw', 'source': 'file', 'source_path': '/instance/disk.local', 'target_dev': 'vdb'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) with contextlib.nested( mock.patch.object(os.path, 'getsize', mock_getsize), mock.patch.object(host.Host, 'get_domain', mock_lookup)): self.assertFalse(drvr._is_shared_block_storage( instance, {'is_volume_backed': True, 'is_shared_instance_path': False}, block_device_info = bdi)) mock_getsize.assert_called_once_with('/instance/disk.local') mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_nfs(self): bdi = {'block_device_mapping': []} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_backend = mock.MagicMock() mock_image_backend.backend.return_value = mock_backend mock_backend.is_file_in_instance_path.return_value = True mock_get_instance_disk_info = mock.Mock() with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): self.assertTrue(drvr._is_shared_block_storage('instance', {'is_shared_instance_path': True}, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_live_migration_update_graphics_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) xml_tmpl = ("<domain type='kvm'>" "<devices>" "<graphics type='vnc' listen='{vnc}'>" "<listen address='{vnc}'/>" "</graphics>" "<graphics type='spice' listen='{spice}'>" "<listen address='{spice}'/>" "</graphics>" "</devices>" "</domain>") initial_xml = xml_tmpl.format(vnc='1.2.3.4', spice='5.6.7.8') target_xml = xml_tmpl.format(vnc='10.0.0.1', spice='10.0.0.2') target_xml = etree.tostring(etree.fromstring(target_xml)) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( initial_xml) vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None, target_xml, mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}} self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock) def test_live_migration_update_volume_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test migrate_data = {'pre_live_migration_result': {'volume': {u'58a84f6d-3f0c-4e19-a0af-eb657b790657': {'connection_info': {u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': {u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'disk_info': {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'}}}}, 'graphics_listen_addrs': {}} pre_live_migrate_data = ((migrate_data or {}). get('pre_live_migration_result', {})) volume = pre_live_migrate_data.get('volume') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \ mget_info,\ mock.patch.object(drvr._host, 'get_domain') as mget_domain,\ mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\ mock.patch.object(drvr, '_update_xml') as mupdate: mget_info.side_effect = exception.InstanceNotFound( instance_id='foo') mget_domain.return_value = test_mock test_mock.XMLDesc.return_value = target_xml self.assertFalse(drvr._live_migration_operation( self.context, instance_ref, 'dest', False, migrate_data, test_mock)) mupdate.assert_called_once_with(target_xml, volume, None, None) def test_update_volume_xml(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" volume_xml = {'volume': {}} volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}} volume_xml['volume'][serial]['connection_info'] = \ {u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': {u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}} volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'} connection_info = volume_xml['volume'][serial]['connection_info'] disk_info = volume_xml['volume'][serial]['disk_info'] conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = disk_info['type'] conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = disk_info['dev'] conf.target_bus = disk_info['bus'] conf.serial = connection_info.get('serial') conf.source_type = "block" conf.source_path = connection_info['data'].get('device_path') with mock.patch.object(drvr, '_get_volume_config', return_value=conf): parser = etree.XMLParser(remove_blank_text=True) xml_doc = etree.fromstring(initial_xml, parser) config = drvr._update_volume_xml(xml_doc, volume_xml['volume']) xml_doc = etree.fromstring(target_xml, parser) self.assertEqual(etree.tostring(xml_doc), etree.tostring(config)) def test_update_volume_xml_no_serial(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) xml_tmpl = """ <domain type='kvm'> <devices> <disk type='block' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source dev='{device_path}'/> <target bus='virtio' dev='vdb'/> <serial></serial> <address type='pci' domain='0x0' bus='0x0' slot='0x04' \ function='0x0'/> </disk> </devices> </domain> """ initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" volume_xml = {'volume': {}} volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}} volume_xml['volume'][serial]['connection_info'] = \ {u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': {u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}} volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'} connection_info = volume_xml['volume'][serial]['connection_info'] disk_info = volume_xml['volume'][serial]['disk_info'] conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = disk_info['type'] conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = disk_info['dev'] conf.target_bus = disk_info['bus'] conf.serial = connection_info.get('serial') conf.source_type = "block" conf.source_path = connection_info['data'].get('device_path') with mock.patch.object(drvr, '_get_volume_config', return_value=conf): xml_doc = etree.fromstring(initial_xml) config = drvr._update_volume_xml(xml_doc, volume_xml['volume']) self.assertEqual(target_xml, etree.tostring(config)) def test_update_volume_xml_no_connection_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" volume_xml = {'volume': {}} volume_xml['volume'][serial] = {'info1': {}, 'info2': {}} conf = vconfig.LibvirtConfigGuestDisk() with mock.patch.object(drvr, '_get_volume_config', return_value=conf): xml_doc = etree.fromstring(initial_xml) config = drvr._update_volume_xml(xml_doc, volume_xml['volume']) self.assertEqual(target_xml, etree.tostring(config)) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI2") @mock.patch.object(fakelibvirt.virDomain, "XMLDesc") def test_live_migration_update_serial_console_xml(self, mock_xml, mock_migrate): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance xml_tmpl = ("<domain type='kvm'>" "<devices>" "<console type='tcp'>" "<source mode='bind' host='{addr}' service='10000'/>" "</console>" "</devices>" "</domain>") initial_xml = xml_tmpl.format(addr='9.0.0.1') target_xml = xml_tmpl.format(addr='9.0.0.12') target_xml = etree.tostring(etree.fromstring(target_xml)) # Preparing mocks mock_xml.return_value = initial_xml mock_migrate.side_effect = fakelibvirt.libvirtError("ERR") # start test bandwidth = CONF.libvirt.live_migration_bandwidth migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}, 'serial_listen_addr': '9.0.0.12'}} dom = fakelibvirt.virDomain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, dom) mock_xml.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE) mock_migrate.assert_called_once_with( CONF.libvirt.live_migration_uri % 'dest', None, target_xml, mock.ANY, None, bandwidth) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_with_serial_console_without_migratable(self): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance CONF.set_override("enabled", True, "serial_console") dom = fakelibvirt.virDomain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, None, dom) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_uses_migrateToURI_without_migratable_flag(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest', mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}} self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock) def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest', mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = {} self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_without_migratable_flag_or_0_addr(self): self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc') self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}} self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock) def test_live_migration_raises_exception(self): # Confirms recover method is called when exceptions are raised. # Preparing data self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None: vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest', mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) else: vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE ).AndReturn(FakeVirtDomain().XMLDesc(flags=0)) vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None, mox.IgnoreArg(), mox.IgnoreArg(), None, _bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) # start test migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': {'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}} self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock) self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state) self.assertEqual(power_state.RUNNING, instance_ref.power_state) def test_live_migration_raises_unsupported_config_exception(self): # Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED, # migrateToURI is used instead. # Preparing data instance_ref = objects.Instance(**self.test_instance) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, 'migrateToURI2') self.mox.StubOutWithMock(vdmock, 'migrateToURI') _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( FakeVirtDomain().XMLDesc(flags=0)) unsupported_config_error = fakelibvirt.libvirtError('ERR') unsupported_config_error.err = ( fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,) # This is the first error we hit but since the error code is # VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI. vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None, mox.IgnoreArg(), mox.IgnoreArg(), None, _bandwidth).AndRaise(unsupported_config_error) # This is the second and final error that will actually kill the run, # we use TestingException to make sure it's not the same libvirtError # above. vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest', mox.IgnoreArg(), None, _bandwidth).AndRaise(test.TestingException('oops')) graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'} migrate_data = {'pre_live_migration_result': {'graphics_listen_addrs': graphics_listen_addrs}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock( drvr, '_check_graphics_addresses_can_live_migrate') drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs) self.mox.ReplayAll() # start test self.assertRaises(test.TestingException, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): # destroy method may raise InstanceTerminationFailure or # InstancePowerOffFailure, here use their base class Invalid. mock_destroy.side_effect = exception.Invalid(reason='just test') fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = {'is_shared_instance_path': False} self.assertRaises(exception.Invalid, drvr.rollback_live_migration_at_destination, "context", "instance", [], None, True, migrate_data) mock_exist.assert_called_once_with(fake_instance_path) mock_shutil.assert_called_once_with(fake_instance_path) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = {'is_shared_instance_path': True} drvr.rollback_live_migration_at_destination("context", "instance", [], None, True, migrate_data) mock_destroy.assert_called_once_with("context", "instance", [], None, True, migrate_data) self.assertFalse(mock_get_instance_path.called) self.assertFalse(mock_exist.called) self.assertFalse(mock_shutil.called) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths(self, mock_xml): xml = """ <domain> <name>dummy</name> <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid> <devices> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.root"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.shared"/> <shareable/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.config"/> <readonly/> </disk> <disk type="block"> <source dev="/dev/mapper/somevol"/> </disk> <disk type="network"> <source protocol="https" name="url_path"> <host name="hostname" port="443"/> </source> </disk> </devices> </domain>""" mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(guest) self.assertEqual(["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], paths) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_data_gb_plain(self, mock_paths): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False) guest = libvirt_guest.Guest(dom) instance = objects.Instance(**self.test_instance) data_gb = drvr._live_migration_data_gb(instance, guest, False) self.assertEqual(2, data_gb) self.assertEqual(0, mock_paths.call_count) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_data_gb_block(self, mock_paths): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False) guest = libvirt_guest.Guest(dom) instance = objects.Instance(**self.test_instance) def fake_stat(path): class StatResult(object): def __init__(self, size): self._size = size @property def st_size(self): return self._size if path == "/var/lib/nova/instance/123/disk.root": return StatResult(10 * units.Gi) elif path == "/dev/mapper/somevol": return StatResult(1.5 * units.Gi) else: raise Exception("Should not be reached") mock_paths.return_value = ["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"] with mock.patch.object(os, "stat") as mock_stat: mock_stat.side_effect = fake_stat data_gb = drvr._live_migration_data_gb(instance, guest, True) # Expecting 2 GB for RAM, plus 10 GB for disk.root # and 1.5 GB rounded to 2 GB for somevol, so 14 GB self.assertEqual(14, data_gb) self.assertEqual(1, mock_paths.call_count) EXPECT_SUCCESS = 1 EXPECT_FAILURE = 2 EXPECT_ABORT = 3 @mock.patch.object(time, "time") @mock.patch.object(time, "sleep", side_effect=lambda x: eventlet.sleep(0)) @mock.patch.object(host.DomainJobInfo, "for_domain") @mock.patch.object(objects.Instance, "save") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(fakelibvirt.virDomain, "abortJob") def _test_live_migration_monitoring(self, job_info_records, time_records, expect_result, mock_abort, mock_running, mock_save, mock_job_info, mock_sleep, mock_time): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True) guest = libvirt_guest.Guest(dom) finish_event = eventlet.event.Event() def fake_job_info(hostself): while True: self.assertTrue(len(job_info_records) > 0) rec = job_info_records.pop(0) if type(rec) == str: if rec == "thread-finish": finish_event.send() elif rec == "domain-stop": dom.destroy() else: if len(time_records) > 0: time_records.pop(0) return rec return rec def fake_time(): if len(time_records) > 0: return time_records[0] else: return int( datetime.datetime(2001, 1, 20, 20, 1, 0) .strftime('%s')) mock_job_info.side_effect = fake_job_info mock_time.side_effect = fake_time dest = mock.sentinel.migrate_dest migrate_data = mock.sentinel.migrate_data fake_post_method = mock.MagicMock() fake_recover_method = mock.MagicMock() drvr._live_migration_monitor(self.context, instance, guest, dest, fake_post_method, fake_recover_method, False, migrate_data, dom, finish_event) if expect_result == self.EXPECT_SUCCESS: self.assertFalse(fake_recover_method.called, 'Recover method called when success expected') self.assertFalse(mock_abort.called, 'abortJob not called when success expected') fake_post_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) else: if expect_result == self.EXPECT_ABORT: self.assertTrue(mock_abort.called, 'abortJob called when abort expected') else: self.assertFalse(mock_abort.called, 'abortJob not called when failure expected') self.assertFalse(fake_post_method.called, 'Post method called when success not expected') fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) def test_live_migration_monitor_success(self): # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_success_race(self): # A normalish sequence but we're too slow to see the # completed job state domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_failed(self): # A failed sequence where we see all the expected events domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_failed_race(self): # A failed sequence where we are too slow to see the # failed event domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_cancelled(self): # A cancelled sequence where we see all the events domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) @mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime") @mock.patch.object(libvirt_driver.LibvirtDriver, "_migration_downtime_steps") def test_live_migration_monitor_downtime(self, mock_downtime_steps, mock_set_downtime): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=1000000, group='libvirt') # We've setup 4 fake downtime steps - first value is the # time delay, second is the downtime value downtime_steps = [ (90, 10), (180, 50), (270, 200), (500, 300), ] mock_downtime_steps.return_value = downtime_steps # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. # Times are chosen so that only the first 3 downtime # steps are needed. fake_times = [0, 1, 30, 95, 150, 200, 300] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_SUCCESS) mock_set_downtime.assert_has_calls([mock.call(10), mock.call(50), mock.call(200)]) def test_live_migration_monitor_completion(self): self.flags(live_migration_completion_timeout=100, live_migration_progress_timeout=1000000, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT) def test_live_migration_monitor_progress(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", host.DomainJobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT) def test_live_migration_downtime_steps(self): self.flags(live_migration_downtime=400, group='libvirt') self.flags(live_migration_downtime_steps=10, group='libvirt') self.flags(live_migration_downtime_delay=30, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) steps = drvr._migration_downtime_steps(3.0) self.assertEqual([ (0, 37), (90, 38), (180, 39), (270, 42), (360, 46), (450, 55), (540, 70), (630, 98), (720, 148), (810, 238), (900, 400), ], list(steps)) @mock.patch.object(utils, "spawn") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor") @mock.patch.object(host.Host, "get_guest") @mock.patch.object(fakelibvirt.Connection, "_mark_running") def test_live_migration_main(self, mock_running, mock_guest, mock_monitor, mock_thread): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain><name>demo</name></domain>", True) guest = libvirt_guest.Guest(dom) migrate_data = {} mock_guest.return_value = guest def fake_post(): pass def fake_recover(): pass drvr._live_migration(self.context, instance, "fakehost", fake_post, fake_recover, False, migrate_data) class AnyEventletEvent(object): def __eq__(self, other): return type(other) == eventlet.event.Event mock_thread.assert_called_once_with( drvr._live_migration_operation, self.context, instance, "fakehost", False, migrate_data, dom) mock_monitor.assert_called_once_with( self.context, instance, guest, "fakehost", fake_post, fake_recover, False, migrate_data, dom, AnyEventletEvent()) def _do_test_create_images_and_backing(self, disk_type): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image') disk_info = {'path': 'foo', 'type': disk_type, 'disk_size': 1 * 1024 ** 3, 'virt_disk_size': 20 * 1024 ** 3, 'backing_file': None} libvirt_driver.libvirt_utils.create_image( disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size']) drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance, fallback_from_host=None) self.mox.ReplayAll() self.stubs.Set(os.path, 'exists', lambda *args: False) drvr._create_images_and_backing(self.context, self.test_instance, "/fake/instance/dir", [disk_info]) def test_create_images_and_backing_qcow2(self): self._do_test_create_images_and_backing('qcow2') def test_create_images_and_backing_raw(self): self._do_test_create_images_and_backing('raw') def test_create_images_and_backing_images_not_exist_no_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")): self.assertRaises(exception.ImageNotFound, conn._create_images_and_backing, self.context, instance, "/fake/instance/dir", disk_info) def test_create_images_and_backing_images_not_exist_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) with contextlib.nested( mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")), ) as (copy_image_mock, fetch_image_mock): conn._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info, fallback_from_host="fake_host") backfile_path = os.path.join(base_dir, 'fake_image_backing_file') kernel_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'kernel') ramdisk_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'ramdisk') copy_image_mock.assert_has_calls([ mock.call(dest=backfile_path, src=backfile_path, host='fake_host', receive=True), mock.call(dest=kernel_path, src=kernel_path, host='fake_host', receive=True), mock.call(dest=ramdisk_path, src=ramdisk_path, host='fake_host', receive=True) ]) fetch_image_mock.assert_has_calls([ mock.call(context=self.context, target=backfile_path, image_id=self.test_instance['image_ref'], user_id=self.test_instance['user_id'], project_id=self.test_instance['project_id'], max_size=25165824), mock.call(self.context, kernel_path, self.test_instance['kernel_id'], self.test_instance['user_id'], self.test_instance['project_id']), mock.call(self.context, ramdisk_path, self.test_instance['ramdisk_id'], self.test_instance['user_id'], self.test_instance['project_id']), ]) @mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image') @mock.patch.object(os.path, 'exists', return_value=True) def test_create_images_and_backing_images_exist(self, mock_exists, mock_fetch_image): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) conn._create_images_and_backing(self.context, instance, '/fake/instance/dir', disk_info) self.assertFalse(mock_fetch_image.called) def test_create_images_and_backing_ephemeral_gets_created(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) disk_info = [ {u'backing_file': u'fake_image_backing_file', u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}, {u'backing_file': u'ephemeral_1_default', u'disk_size': 393216, u'over_committed_disk_size': 1073348608, u'path': u'disk_eph_path', u'type': u'qcow2', u'virt_disk_size': 1073741824}] base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) instance = objects.Instance(**self.test_instance) with contextlib.nested( mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'), mock.patch.object(drvr, '_create_ephemeral'), mock.patch.object(imagebackend.Image, 'verify_base_size') ) as (fetch_kernel_ramdisk_mock, fetch_image_mock, create_ephemeral_mock, verify_base_size_mock): drvr._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info) self.assertEqual(len(create_ephemeral_mock.call_args_list), 1) m_args, m_kwargs = create_ephemeral_mock.call_args_list[0] self.assertEqual( os.path.join(base_dir, 'ephemeral_1_default'), m_kwargs['target']) self.assertEqual(len(fetch_image_mock.call_args_list), 1) m_args, m_kwargs = fetch_image_mock.call_args_list[0] self.assertEqual( os.path.join(base_dir, 'fake_image_backing_file'), m_kwargs['target']) verify_base_size_mock.assert_has_calls([ mock.call(os.path.join(base_dir, 'fake_image_backing_file'), 25165824), mock.call(os.path.join(base_dir, 'ephemeral_1_default'), 1073741824) ]) def test_create_images_and_backing_disk_info_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance, fallback_from_host=None) self.mox.ReplayAll() drvr._create_images_and_backing(self.context, self.test_instance, "/fake/instance/dir", None) def test_pre_live_migration_works_correctly_mocked(self): # Creating testdata vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) instance = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol ).AndReturn(vol['block_device_mapping']) self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(instance), nw_info) self.mox.ReplayAll() result = drvr.pre_live_migration( c, instance, vol, nw_info, None, migrate_data={"block_migration": False}) target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} self.assertEqual(result, target_ret) def test_pre_live_migration_block_with_config_drive_mocked(self): # Creating testdata vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) def fake_true(*args, **kwargs): return True self.stubs.Set(configdrive, 'required_by', fake_true) instance = objects.Instance(**self.test_instance) c = context.get_admin_context() self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt, drvr.pre_live_migration, c, instance, vol, None, None, {'is_shared_instance_path': False, 'is_shared_block_storage': False}) @mock.patch('nova.virt.driver.block_device_info_get_mapping', return_value=()) @mock.patch('nova.virt.configdrive.required_by', return_value=True) def test_pre_live_migration_block_with_config_drive_mocked_with_vfat( self, mock_required_by, block_device_info_get_mapping): self.flags(config_drive_format='vfat') # Creating testdata vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) res_data = drvr.pre_live_migration( self.context, instance, vol, [], None, {'is_shared_instance_path': False, 'is_shared_block_storage': False}) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'serial_listen_addr': '127.0.0.1', 'volume': {}}, res_data) def test_pre_live_migration_vol_backed_works_correctly_mocked(self): # Creating testdata, using temp dir. with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] inst_ref = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(inst_ref), nw_info) self.mox.ReplayAll() migrate_data = {'is_shared_instance_path': False, 'is_volume_backed': True, 'block_migration': False, 'instance_relative_path': inst_ref['name'] } ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None, migrate_data) target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} self.assertEqual(ret, target_ret) self.assertTrue(os.path.exists('%s/%s/' % (tmpdir, inst_ref['name']))) def test_pre_live_migration_plug_vifs_retry_fails(self): self.flags(live_migration_retry_count=3) instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): raise processutils.ProcessExecutionError() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) self.assertRaises(processutils.ProcessExecutionError, drvr.pre_live_migration, self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json) def test_pre_live_migration_plug_vifs_retry_works(self): self.flags(live_migration_retry_count=3) called = {'count': 0} instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): called['count'] += 1 if called['count'] < CONF.live_migration_retry_count: raise processutils.ProcessExecutionError() else: return drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json) def test_pre_live_migration_image_not_created_with_shared_storage(self): migrate_data_set = [{'is_shared_block_storage': False, 'block_migration': False}, {'is_shared_block_storage': True, 'block_migration': False}, {'is_shared_block_storage': False, 'block_migration': True}] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # creating mocks with contextlib.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) for migrate_data in migrate_data_set: res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) self.assertFalse(create_image_mock.called) self.assertIsInstance(res, dict) def test_pre_live_migration_with_not_shared_instance_path(self): migrate_data = {'is_shared_block_storage': False, 'is_shared_instance_path': False} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def check_instance_dir(context, instance, instance_dir, disk_info, fallback_from_host=False): self.assertTrue(instance_dir) # creating mocks with contextlib.nested( mock.patch.object(drvr, '_create_images_and_backing', side_effect=check_instance_dir), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) create_image_mock.assert_has_calls( [mock.call(self.context, instance, mock.ANY, {}, fallback_from_host=instance.host)]) self.assertIsInstance(res, dict) def test_pre_live_migration_block_migrate_fails(self): bdms = [{ 'connection_info': { 'serial': '12345', u'data': { 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.t-lun-X' } }, 'mount_device': '/dev/sda'}] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) with contextlib.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, '_connect_volume'), mock.patch.object(driver, 'block_device_info_get_mapping', return_value=bdms)): disk_info_json = jsonutils.dumps({}) self.assertRaises(exception.MigrationError, drvr.pre_live_migration, self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data={}) def test_get_instance_disk_info_works_correctly(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local').AndReturn((ret, '')) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_post_live_migration(self): vol = {'block_device_mapping': [ {'connection_info': { 'data': {'multipath_id': 'dummy1'}, 'serial': 'fake_serial1'}, 'mount_device': '/dev/sda', }, {'connection_info': { 'data': {}, 'serial': 'fake_serial2'}, 'mount_device': '/dev/sdb', }]} def fake_initialize_connection(context, volume_id, connector): return {'data': {}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_connector = {'host': 'fake'} inst_ref = {'id': 'foo'} cntx = context.get_admin_context() # Set up the mock expectations with contextlib.nested( mock.patch.object(driver, 'block_device_info_get_mapping', return_value=vol['block_device_mapping']), mock.patch.object(drvr, "get_volume_connector", return_value=fake_connector), mock.patch.object(drvr._volume_api, "initialize_connection", side_effect=fake_initialize_connection), mock.patch.object(drvr, '_disconnect_volume') ) as (block_device_info_get_mapping, get_volume_connector, initialize_connection, _disconnect_volume): drvr.post_live_migration(cntx, inst_ref, vol) block_device_info_get_mapping.assert_has_calls([ mock.call(vol)]) get_volume_connector.assert_has_calls([ mock.call(inst_ref)]) _disconnect_volume.assert_has_calls([ mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'), mock.call({'data': {}}, 'sdb')]) def test_get_instance_disk_info_excludes_volumes(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/fake/path/to/volume1'/>" "<target dev='vdc' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/fake/path/to/volume2'/>" "<target dev='vdd' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local').AndReturn((ret, '')) self.mox.ReplayAll() conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': [ {'connection_info': conn_info, 'mount_device': '/dev/vdc'}, {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance, block_device_info=info) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_get_instance_disk_info_no_bdinfo_passed(self): # NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method # without access to Nova's block device information. We want to make # sure that we guess volumes mostly correctly in that case as well instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='block'><driver name='qemu' type='raw'/>" "<source file='/fake/path/to/volume1'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(1, len(info)) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) def test_spawn_with_network_info(self): # Preparing mocks def fake_none(*args, **kwargs): return def fake_getLibVersion(): return 9011 def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Penryn</model> <vendor>Intel</vendor> <feature policy='require' name='xtpr'/> </cpu> """ # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self.stubs, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) image_meta = self.test_image_meta # Mock out the get_info method of the LibvirtDriver so that the polling # in the spawn method of the LibvirtDriver returns immediately self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info') libvirt_driver.LibvirtDriver.get_info(instance ).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING)) # Start test self.mox.ReplayAll() with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_none) drvr.spawn(self.context, instance, image_meta, [], 'herp', network_info=network_info) path = os.path.join(CONF.instances_path, instance['name']) if os.path.isdir(path): shutil.rmtree(path) path = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if os.path.isdir(path): shutil.rmtree(os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name)) def test_spawn_without_image_meta(self): self.create_image_called = False def fake_none(*args, **kwargs): return def fake_create_image(*args, **kwargs): self.create_image_called = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_image', fake_create_image) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) drvr.spawn(self.context, instance, self.test_image_meta, [], None) self.assertTrue(self.create_image_called) drvr.spawn(self.context, instance, self.test_image_meta, [], None) self.assertTrue(self.create_image_called) def test_spawn_from_volume_calls_cache(self): self.cache_called_for_disk = False def fake_none(*args, **kwargs): return def fake_cache(*args, **kwargs): if kwargs.get('image_id') == 'my_fake_image': self.cache_called_for_disk = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_cache) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) block_device_info = {'root_device_name': '/dev/vda', 'block_device_mapping': [ {'mount_device': 'vda', 'boot_index': 0} ] } # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, self.test_image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from volume but with placeholder image instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, self.test_image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from an image instance_ref['image_ref'] = 'my_fake_image' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, self.test_image_meta, [], None) self.assertTrue(self.cache_called_for_disk) def test_start_lxc_from_volume(self): self.flags(virt_type="lxc", group='libvirt') def check_setup_container(image, container_dir=None): self.assertIsInstance(image, imgmodel.LocalBlockImage) self.assertEqual(image.path, '/dev/path/to/dev') return '/dev/nbd1' bdm = { 'guest_format': None, 'boot_index': 0, 'mount_device': '/dev/sda', 'connection_info': { 'driver_volume_type': 'iscsi', 'serial': 'afc1', 'data': { 'access_mode': 'rw', 'target_discovered': False, 'encrypted': False, 'qos_specs': None, 'target_iqn': 'iqn: volume-afc1', 'target_portal': 'ip: 3260', 'volume_id': 'afc1', 'target_lun': 1, 'auth_password': 'uj', 'auth_username': '47', 'auth_method': 'CHAP' } }, 'disk_bus': 'scsi', 'device_type': 'disk', 'delete_on_termination': False } def _connect_volume_side_effect(connection_info, disk_info): bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev' def _get(key, opt=None): return bdm.get(key, opt) def getitem(key): return bdm[key] def setitem(key, val): bdm[key] = val bdm_mock = mock.MagicMock() bdm_mock.__getitem__.side_effect = getitem bdm_mock.__setitem__.side_effect = setitem bdm_mock.get = _get disk_mock = mock.MagicMock() disk_mock.source_path = '/dev/path/to/dev' block_device_info = {'block_device_mapping': [bdm_mock], 'root_device_name': '/dev/sda'} # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/sda' instance_ref['ephemeral_gb'] = 0 instance_ref['uuid'] = uuidutils.generate_uuid() inst_obj = objects.Instance(**instance_ref) image_meta = {} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with contextlib.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, '_connect_volume', side_effect=_connect_volume_side_effect), mock.patch.object(drvr, '_get_volume_config', return_value=disk_mock), mock.patch.object(drvr, 'get_info', return_value=hardware.InstanceInfo( state=power_state.RUNNING)), mock.patch('nova.virt.disk.api.setup_container', side_effect=check_setup_container), mock.patch('nova.virt.disk.api.teardown_container'), mock.patch.object(objects.Instance, 'save')): drvr.spawn(self.context, inst_obj, image_meta, [], None, network_info=[], block_device_info=block_device_info) self.assertEqual('/dev/nbd1', inst_obj.system_metadata.get( 'rootfs_device_name')) def test_spawn_with_pci_devices(self): def fake_none(*args, **kwargs): return None def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) class FakeLibvirtPciDevice(object): def dettach(self): return None def reset(self): return None def fake_node_device_lookup_by_name(address): pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}" % dict(hex='[\da-f]', oct='[0-8]')) pattern = re.compile(pattern) if pattern.match(address) is None: raise fakelibvirt.libvirtError() return FakeLibvirtPciDevice() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_image', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) drvr._conn.nodeDeviceLookupByName = \ fake_node_device_lookup_by_name instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance = objects.Instance(**instance_ref) instance['pci_devices'] = objects.PciDeviceList( objects=[objects.PciDevice(address='0000:00:00.0')]) drvr.spawn(self.context, instance, self.test_image_meta, [], None) def test_chown_disk_config_for_instance(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(fake_libvirt_utils, 'chown') fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid') os.path.exists('/tmp/uuid/disk.config').AndReturn(True) fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid()) self.mox.ReplayAll() drvr._chown_disk_config_for_instance(instance) def _test_create_image_plain(self, os_type='', filename='', mkfs=False): gotFiles = [] def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) def snapshot(self, name): pass return FakeImage(instance, name) def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) # Stop 'libvirt_driver._create_image' touching filesystem self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image", fake_image) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) instance['os_type'] = os_type drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) if mkfs: self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'}) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) drvr._create_image(context, instance, disk_info['mapping']) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_plain_os_type_blank(self): self._test_create_image_plain(os_type='', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_none(self): self._test_create_image_plain(os_type=None, filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_no_fs(self): self._test_create_image_plain(os_type='test', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_with_fs(self): ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str( 'mkfs.ext4 --label %(fs_label)s %(target)s')[:7]) self._test_create_image_plain(os_type='test', filename=ephemeral_file_name, mkfs=True) def _create_image_helper(self, callback, suffix=''): gotFiles = [] imported_files = [] def fake_image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name, is_block_dev=False): self.path = os.path.join(instance['name'], name) self.is_block_dev = is_block_dev def create_image(self, prepare_template, base, size, *args, **kwargs): pass def cache(self, fetch_func, filename, size=None, *args, **kwargs): gotFiles.append({'filename': filename, 'size': size}) def import_file(self, instance, local_filename, remote_filename): imported_files.append((local_filename, remote_filename)) def snapshot(self, name): pass return FakeImage(instance, name) def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) # Stop 'libvirt_driver._create_image' touching filesystem self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image", fake_image) instance_ref = self.test_instance instance_ref['image_ref'] = 1 # NOTE(mikal): use this callback to tweak the instance to match # what you're trying to test callback(instance_ref) instance = objects.Instance(**instance_ref) # Turn on some swap to exercise that codepath in _create_image instance.flavor.swap = 500 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none) self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_none) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) drvr._create_image(context, instance, disk_info['mapping'], suffix=suffix) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) return gotFiles, imported_files def test_create_image_with_swap(self): def enable_swap(instance_ref): # Turn on some swap to exercise that codepath in _create_image instance_ref['system_metadata']['instance_type_swap'] = 500 gotFiles, _ = self._create_image_helper(enable_swap) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': self._EPHEMERAL_20_DEFAULT, 'size': 20 * units.Gi}, {'filename': 'swap_500', 'size': 500 * units.Mi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_with_configdrive(self): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper(enable_configdrive) self.assertTrue(imported_files[0][0].endswith('/disk.config')) self.assertEqual('disk.config', imported_files[0][1]) def test_create_image_with_configdrive_rescue(self): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper(enable_configdrive, suffix='.rescue') self.assertTrue(imported_files[0][0].endswith('/disk.config.rescue')) self.assertEqual('disk.config.rescue', imported_files[0][1]) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache', side_effect=exception.ImageNotFound(image_id='fake-id')) def test_create_image_not_exist_no_fallback(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) self.assertRaises(exception.ImageNotFound, drvr._create_image, self.context, instance, disk_info['mapping']) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_not_exist_fallback(self, mock_cache): def side_effect(fetch_func, filename, size=None, *args, **kwargs): def second_call(fetch_func, filename, size=None, *args, **kwargs): # call copy_from_host ourselves because we mocked image.cache() fetch_func('fake-target', 'fake-max-size') # further calls have no side effect mock_cache.side_effect = None mock_cache.side_effect = second_call # raise an error only the first call raise exception.ImageNotFound(image_id='fake-id') mock_cache.side_effect = side_effect drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) with mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image') as mock_copy: drvr._create_image(self.context, instance, disk_info['mapping'], fallback_from_host='fake-source-host') mock_copy.assert_called_once_with(src='fake-target', dest='fake-target', host='fake-source-host', receive=True) @mock.patch.object(utils, 'execute') def test_create_ephemeral_specified_fs(self, mock_exec): self.flags(default_ephemeral_format='ext3') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, max_size=20, specified_fs='ext4') mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) def test_create_ephemeral_specified_fs_not_valid(self): CONF.set_override('default_ephemeral_format', 'ext4') ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'guest_format': 'dummy', 'size': 1}] block_device_info = { 'ephemerals': ephemerals} instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) disk_info['mapping'].pop('disk.local') with contextlib.nested( mock.patch.object(utils, 'execute'), mock.patch.object(drvr, 'get_info'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(imagebackend.Image, 'verify_base_size')): self.assertRaises(exception.InvalidBDMFormat, drvr._create_image, context, instance, disk_info['mapping'], block_device_info=block_device_info) def test_create_ephemeral_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, max_size=20) def test_create_ephemeral_with_conf(self): CONF.set_override('default_ephemeral_format', 'ext4') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_arbitrary(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_ext3(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_swap_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkswap', '/dev/something', run_as_root=False) self.mox.ReplayAll() drvr._create_swap('/dev/something', 1, max_size=20) def test_get_console_output_file(self): fake_libvirt_utils.files['console.log'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) console_log = '%s/console.log' % (console_dir) fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='file'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 with mock.patch('os.path.exists', return_value=True): output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) def test_get_console_output_file_missing(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_log = os.path.join(tmpdir, instance['name'], 'non-existent.log') fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='file'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch('os.path.exists', return_value=False): output = drvr.get_console_output(self.context, instance) self.assertEqual('', output) def test_get_console_output_pty(self): fake_libvirt_utils.files['pty'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) pty_file = '%s/fake_pty' % (console_dir) fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='pty'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % pty_file def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) def _fake_flush(self, fake_pty): return 'foo' def _fake_append_to_file(self, data, fpath): return 'pty' self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) def test_get_host_ip_addr(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ip = drvr.get_host_ip_addr() self.assertEqual(ip, CONF.my_ip) @mock.patch.object(libvirt_driver.LOG, 'warn') @mock.patch('nova.compute.utils.get_machine_ips') def test_get_host_ip_addr_failure(self, mock_ips, mock_log): mock_ips.return_value = ['8.8.8.8', '75.75.75.75'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.get_host_ip_addr() mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was ' u'not found on any of the ' u'interfaces: %(ifaces)s', {'ifaces': '8.8.8.8, 75.75.75.75', 'my_ip': mock.ANY}) def test_conn_event_handler(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with contextlib.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): # verify that the driver registers for the close callback # and re-connects after receiving the callback self.assertRaises(exception.HypervisorUnavailable, drvr.init_host, "wibble") self.assertTrue(service_mock.disabled) def test_command_with_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with contextlib.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): drvr.init_host("wibble") self.assertRaises(exception.HypervisorUnavailable, drvr.get_num_instances) self.assertTrue(service_mock.disabled) def test_service_resume_after_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) service_mock = mock.MagicMock() service_mock.disabled.return_value = True with contextlib.nested( mock.patch.object(drvr._host, "_connect", return_value=mock.MagicMock()), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): drvr.init_host("wibble") drvr.get_num_instances() self.assertTrue(not service_mock.disabled and service_mock.disabled_reason is None) @mock.patch.object(objects.Instance, 'save') def test_immediate_delete(self, mock_save): def fake_get_domain(instance): raise exception.InstanceNotFound(instance_id=instance.uuid) def fake_delete_instance_files(instance): pass drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, {}) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True) @mock.patch.object(objects.Instance, 'save', autospec=True) @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume') @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping, mock_disconnect_volume, mock_delete_instance_files, mock_destroy, mock_inst_save, mock_inst_obj_load_attr, mock_get_by_uuid, volume_fail=False): instance = objects.Instance(self.context, **self.test_instance) vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} mock_mapping.return_value = vol['block_device_mapping'] mock_delete_instance_files.return_value = True mock_get_by_uuid.return_value = instance if volume_fail: mock_disconnect_volume.return_value = ( exception.VolumeNotFound('vol')) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], vol) def test_destroy_removes_disk(self): self._test_destroy_removes_disk(volume_fail=False) def test_destroy_removes_disk_volume_fails(self): self._test_destroy_removes_disk(volume_fail=True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs') @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy, mock_unplug_vifs): instance = fake_instance.fake_instance_obj( None, name='instancename', id=1, uuid='875a8070-d0b9-4949-8b31-104d125c9a64') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], None, False) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') fake_domain = FakeVirtDomain() def destroy_side_effect(*args, **kwargs): fake_domain._info[0] = power_state.SHUTDOWN with mock.patch.object(fake_domain, 'destroy', side_effect=destroy_side_effect) as mock_domain_destroy: mock_get_domain.return_value = fake_domain instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_domain_destroy.assert_called_once_with() mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container_when_no_domain(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.test_instance) inf_exception = exception.InstanceNotFound(instance_id=instance.uuid) mock_get_domain.side_effect = inf_exception drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) def test_reboot_different_ids(self): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_create_called = False # Mock domain mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_domain.ID().AndReturn('some_fake_id') mock_domain.ID().AndReturn('some_fake_id') mock_domain.shutdown() mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple) mock_domain.ID().AndReturn('some_other_fake_id') mock_domain.ID().AndReturn('some_other_fake_id') self.mox.ReplayAll() def fake_get_domain(instance): return mock_domain def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, '_create_domain', fake_create_domain) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: []) drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_create_called) @mock.patch.object(pci_manager, 'get_instance_pci_devs') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(greenthread, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot, mock_sleep, mock_loopingcall, mock_get_instance_pci_devs): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_hard_reboot_called = False # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple, (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple] mock_domain.info.side_effect = return_values mock_domain.ID.return_value = 'some_fake_id' mock_domain.shutdown.side_effect = mock.Mock() def fake_hard_reboot(*args, **kwargs): self.reboot_hard_reboot_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_hard_reboot.side_effect = fake_hard_reboot mock_loopingcall.return_value = FakeLoopingCall() mock_get_instance_pci_devs.return_value = [] drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_hard_reboot_called) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_soft_reboot_libvirt_exception(self, mock_get_domain, mock_hard_reboot): # Tests that a hard reboot is performed when a soft reboot results # in raising a libvirtError. info_tuple = ('fake', 'fake', 'fake', 'also_fake') # setup mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_virDomain.ID.return_value = 'some_fake_id' mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) context = None instance = objects.Instance(**self.test_instance) network_info = [] mock_get_domain.return_value = mock_virDomain drvr.reboot(context, instance, network_info, 'SOFT') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def _test_resume_state_on_host_boot_with_state(self, state, mock_get_domain, mock_hard_reboot): mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ([state, None, None, None, None]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = mock_virDomain instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self.stubs, 1) drvr.resume_state_on_host_boot(self.context, instance, network_info, block_device_info=None) ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) self.assertEqual(mock_hard_reboot.called, state not in ignored_states) def test_resume_state_on_host_boot_with_running_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_suspended_state(self): self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED) def test_resume_state_on_host_boot_with_paused_state(self): self._test_resume_state_on_host_boot_with_state(power_state.PAUSED) def test_resume_state_on_host_boot_with_nostate(self): self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE) def test_resume_state_on_host_boot_with_shutdown_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_crashed_state(self): self._test_resume_state_on_host_boot_with_state(power_state.CRASHED) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_resume_state_on_host_boot_with_instance_not_found_on_driver( self, mock_get_domain, mock_hard_reboot): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.side_effect = exception.InstanceNotFound( instance_id='fake') drvr.resume_state_on_host_boot(self.context, instance, network_info=[], block_device_info=None) mock_hard_reboot.assert_called_once_with(self.context, instance, [], None) @mock.patch('nova.virt.libvirt.LibvirtDriver.get_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot(self, mock_destroy, mock_get_disk_info, mock_get_instance_disk_info, mock_get_guest_xml, mock_create_images_and_backing, mock_create_domain_and_network, mock_get_info): self.context.auth_token = True # any non-None value will suffice instance = objects.Instance(**self.test_instance) instance_path = libvirt_utils.get_instance_path(instance) network_info = _fake_network_info(self.stubs, 1) block_device_info = None dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN), hardware.InstanceInfo(state=power_state.RUNNING)] mock_get_info.side_effect = return_values backing_disk_info = [{"virt_disk_size": 2}] mock_get_disk_info.return_value = mock.sentinel.disk_info mock_get_guest_xml.return_value = dummyxml mock_get_instance_disk_info.return_value = backing_disk_info drvr._hard_reboot(self.context, instance, network_info, block_device_info) # make sure that _create_images_and_backing is passed the disk_info # returned from _get_instance_disk_info and not the one that is in # scope from blockinfo.get_disk_info mock_create_images_and_backing.assert_called_once_with(self.context, instance, instance_path, backing_disk_info) # make sure that _create_domain_and_network is passed the disk_info # returned from blockinfo.get_disk_info and not the one that's # returned from _get_instance_disk_info mock_create_domain_and_network.assert_called_once_with(self.context, dummyxml, instance, network_info, mock.sentinel.disk_info, block_device_info=block_device_info, reboot=True, vifs_already_plugged=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.pci.manager.get_instance_pci_devs') @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.utils.write_to_file') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot_does_not_call_glance_show(self, mock_destroy, mock_get_disk_info, mock_get_guest_config, mock_get_instance_path, mock_write_to_file, mock_get_instance_disk_info, mock_create_images_and_backing, mock_create_domand_and_network, mock_prepare_pci_devices_for_use, mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree): """For a hard reboot, we shouldn't need an additional call to glance to get the image metadata. This is important for automatically spinning up instances on a host-reboot, since we won't have a user request context that'll allow the Glance request to go through. We have to rely on the cached image metadata, instead. https://bugs.launchpad.net/nova/+bug/1339386 """ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) network_info = mock.MagicMock() block_device_info = mock.MagicMock() mock_get_disk_info.return_value = {} mock_get_guest_config.return_value = mock.MagicMock() mock_get_instance_path.return_value = '/foo' mock_looping_call.return_value = mock.MagicMock() drvr._image_api = mock.MagicMock() drvr._hard_reboot(self.context, instance, network_info, block_device_info) self.assertFalse(drvr._image_api.get.called) mock_ensure_tree.assert_called_once_with('/foo') @mock.patch.object(time, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain') @mock.patch.object(host.Host, 'get_domain') def _test_clean_shutdown(self, mock_get_domain, mock_create_domain, mock_sleep, seconds_to_shutdown, timeout, retry_interval, shutdown_attempts, succeeds): info_tuple = ('fake', 'fake', 'fake', 'also_fake') shutdown_count = [] # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple] return_shutdowns = [shutdown_count.append("shutdown")] retry_countdown = retry_interval for x in range(min(seconds_to_shutdown, timeout)): return_infos.append( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) if retry_countdown == 0: return_shutdowns.append(shutdown_count.append("shutdown")) retry_countdown = retry_interval else: retry_countdown -= 1 if seconds_to_shutdown < timeout: return_infos.append( (libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple) mock_domain.info.side_effect = return_infos mock_domain.shutdown.side_effect = return_shutdowns def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_create_domain.side_effect = fake_create_domain result = drvr._clean_shutdown(instance, timeout, retry_interval) self.assertEqual(succeeds, result) self.assertEqual(shutdown_attempts, len(shutdown_count)) def test_clean_shutdown_first_time(self): self._test_clean_shutdown(seconds_to_shutdown=2, timeout=5, retry_interval=3, shutdown_attempts=1, succeeds=True) def test_clean_shutdown_with_retry(self): self._test_clean_shutdown(seconds_to_shutdown=4, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=True) def test_clean_shutdown_failure(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=False) def test_clean_shutdown_no_wait(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=0, retry_interval=3, shutdown_attempts=1, succeeds=False) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self.stubs, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, network_info) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports_with_info_cache(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self.stubs, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT instance.info_cache = objects.InstanceInfoCache( network_info=network_info) guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, None) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags') @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_detach_sriov_ports(self, mock_get_image_metadata, mock_detachDeviceFlags, mock_has_min_version): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self.stubs, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT instance.info_cache = objects.InstanceInfoCache( network_info=network_info) domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) drvr._detach_sriov_ports(self.context, instance, guest) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_detachDeviceFlags.called) def test_resume(self): dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self.stubs, 1) block_device_info = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest('fake_dom') with contextlib.nested( mock.patch.object(drvr, '_get_existing_domain_xml', return_value=dummyxml), mock.patch.object(drvr, '_create_domain_and_network', return_value=guest), mock.patch.object(drvr, '_attach_pci_devices'), mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='fake_pci_devs'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(blockinfo, 'get_disk_info'), ) as (_get_existing_domain_xml, _create_domain_and_network, _attach_pci_devices, get_instance_pci_devs, get_image_metadata, get_disk_info): get_image_metadata.return_value = {'bar': 234} disk_info = {'foo': 123} get_disk_info.return_value = disk_info drvr.resume(self.context, instance, network_info, block_device_info) _get_existing_domain_xml.assert_has_calls([mock.call(instance, network_info, block_device_info)]) _create_domain_and_network.assert_has_calls([mock.call( self.context, dummyxml, instance, network_info, disk_info, block_device_info=block_device_info, vifs_already_plugged=True)]) _attach_pci_devices.assert_has_calls([mock.call(guest, 'fake_pci_devs')]) @mock.patch.object(host.Host, 'get_domain') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines(self, mock_save, mock_delete_instance_files, mock_get_info, mock_get_domain): dom_mock = mock.MagicMock() dom_mock.undefineFlags.return_value = 1 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = dom_mock mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN, id=-1) mock_delete_instance_files.return_value = None instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(rbd_utils, 'RBDDriver') def test_cleanup_rbd(self, mock_driver): driver = mock_driver.return_value driver.cleanup_volumes = mock.Mock() fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._cleanup_rbd(fake_instance) driver.cleanup_volumes.assert_called_once_with(fake_instance) @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_undefine_flags(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err')) mock.ID().AndReturn(123) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndReturn(True) mock.managedSaveRemove(0) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndRaise(AttributeError()) mock.undefine() self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() def test_destroy_timed_out(self): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out")) self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_error_code(self): return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code', fake_get_error_code) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstancePowerOffFailure, drvr.destroy, self.context, instance, []) def test_private_destroy_not_found(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain", error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(ex) mock.info().AndRaise(ex) mock.UUIDString() self.mox.ReplayAll() def fake_get_domain(instance): return mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) instance = objects.Instance(**self.test_instance) # NOTE(vish): verifies destroy doesn't raise if the instance disappears drvr._destroy(instance) def test_private_destroy_lxc_processes_refused_to_die(self): self.flags(virt_type='lxc', group='libvirt') ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \ mock.patch.object(conn, 'get_info') as mock_get_info: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex mock_info = mock.MagicMock() mock_info.id = 1 mock_info.state = power_state.SHUTDOWN mock_get_info.return_value = mock_info instance = objects.Instance(**self.test_instance) conn._destroy(instance) def test_private_destroy_processes_refused_to_die_still_raises(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, conn._destroy, instance) def test_private_destroy_ebusy_timeout(self): # Tests that _destroy will retry 3 times to destroy the guest when an # EBUSY is raised, but eventually times out and raises the libvirtError ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=ex) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, drvr._destroy, instance) self.assertEqual(3, mock_guest.poweroff.call_count) def test_private_destroy_ebusy_multiple_attempt_ok(self): # Tests that the _destroy attempt loop is broken when EBUSY is no # longer raised. ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=[ex, None]) inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): with mock.patch.object(drvr, 'get_info', return_value=inst_info): drvr._destroy(instance) self.assertEqual(2, mock_guest.poweroff.call_count) def test_undefine_domain_with_not_found_instance(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id=instance.uuid) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code") self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # NOTE(wenjianhn): verifies undefine doesn't raise if the # instance disappears drvr._undefine_domain(instance) @mock.patch.object(host.Host, "list_instance_domains") def test_disk_over_committed_size_total(self, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name def ID(self): return 1 def name(self): return self._name def UUIDString(self): return "19479fee-07a5-49bb-9138-d3738280d63c" def XMLDesc(self, flags): return "<domain/>" mock_list.return_value = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '0'}]} def get_info(instance_name, xml, **kwargs): return fake_disks.get(instance_name) with mock.patch.object(drvr, "_get_instance_disk_info") as mock_info: mock_info.side_effect = get_info result = drvr._get_disk_over_committed_size_total() self.assertEqual(result, 10653532160) mock_list.assert_called_with() self.assertTrue(mock_info.called) @mock.patch.object(host.Host, "list_instance_domains") def test_disk_over_committed_size_total_eperm(self, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name def ID(self): return 1 def name(self): return self._name def UUIDString(self): return "19479fee-07a5-49bb-9138-d3738280d63c" def XMLDesc(self, flags): return "<domain/>" mock_list.return_value = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '21474836480'}]} def side_effect(name, dom): if name == 'instance0000001': raise OSError(errno.EACCES, 'Permission denied') if name == 'instance0000002': return fake_disks.get(name) get_disk_info = mock.Mock() get_disk_info.side_effect = side_effect drvr._get_instance_disk_info = get_disk_info result = drvr._get_disk_over_committed_size_total() self.assertEqual(21474836480, result) mock_list.assert_called_with() @mock.patch.object(host.Host, "list_instance_domains", return_value=[mock.MagicMock(name='foo')]) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info", side_effect=exception.VolumeBDMPathNotFound(path='bar')) def test_disk_over_committed_size_total_bdm_not_found(self, mock_get_disk_info, mock_list_domains): # Tests that we handle VolumeBDMPathNotFound gracefully. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_disk_over_committed_size_total()) def test_cpu_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigCPU() cpu.model = "Opteron_G4" cpu.vendor = "AMD" cpu.arch = arch.X86_64 cpu.cores = 2 cpu.threads = 1 cpu.sockets = 4 cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic")) cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow")) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.X86_64 guest.domtype = ["kvm"] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.I686 guest.domtype = ["kvm"] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = {"vendor": "AMD", "features": set(["extapic", "3dnow"]), "model": "Opteron_G4", "arch": arch.X86_64, "topology": {"cores": 2, "threads": 1, "sockets": 4}} got = drvr._get_cpu_info() self.assertEqual(want, got) def test_get_pcidev_info(self): def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) actualvf = drvr._get_pcidev_info("pci_0000_04_00_3") expect_vf = { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1521', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_10_7") expect_vf = { "dev_id": "pci_0000_04_10_7", "address": "0000:04:10.7", "product_id": '1520', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_11_7") expect_vf = { "dev_id": "pci_0000_04_11_7", "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) def test_list_devices_not_supported(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Handle just the NO_SUPPORT error not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) with mock.patch.object(drvr._conn, 'listDevices', side_effect=not_supported_exc): self.assertEqual('[]', drvr._get_pci_passthrough_devices()) # We cache not supported status to avoid emitting too many logging # messages. Clear this value to test the other exception case. del drvr._list_devices_supported # Other errors should not be caught other_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'other exc', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) with mock.patch.object(drvr._conn, 'listDevices', side_effect=other_exc): self.assertRaises(fakelibvirt.libvirtError, drvr._get_pci_passthrough_devices) def test_get_pci_passthrough_devices(self): def fakelistDevices(caps, fakeargs=0): return ['pci_0000_04_00_3', 'pci_0000_04_10_7', 'pci_0000_04_11_7'] self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn') libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) actjson = drvr._get_pci_passthrough_devices() expectvfs = [ { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None, "numa_node": None}, { "dev_id": "pci_0000_04_10_7", "domain": 0, "address": "0000:04:10.7", "product_id": '1520', "vendor_id": '8086', "numa_node": None, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')]}, { "dev_id": "pci_0000_04_11_7", "domain": 0, "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')], } ] actualvfs = jsonutils.loads(actjson) for dev in range(len(actualvfs)): for key in actualvfs[dev].keys(): if key not in ['phys_function', 'virt_functions', 'label']: self.assertEqual(expectvfs[dev][key], actualvfs[dev][key]) def _fake_caps_numa_topology(self, cells_per_host=4, sockets_per_cell=1, cores_per_socket=1, threads_per_core=2, kb_mem=1048576): # Generate mempages list per cell cell_mempages = list() for cellid in range(cells_per_host): mempages_0 = vconfig.LibvirtConfigCapsNUMAPages() mempages_0.size = 4 mempages_0.total = 1024 * cellid mempages_1 = vconfig.LibvirtConfigCapsNUMAPages() mempages_1.size = 2048 mempages_1.total = 0 + cellid cell_mempages.append([mempages_0, mempages_1]) topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host, sockets_per_cell, cores_per_socket, threads_per_core, kb_mem=kb_mem, numa_mempages_list=cell_mempages) return topology def _test_get_host_numa_topology(self, mempages): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) expected_topo_dict = {'cells': [ {'cpus': '0,1', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 0}, {'cpus': '3', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 1}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 2}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 3}]} with contextlib.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set([0, 1, 2, 3, 6])), ): got_topo = drvr._get_host_numa_topology() got_topo_dict = got_topo._to_dict() self.assertThat( expected_topo_dict, matchers.DictMatches(got_topo_dict)) if mempages: # cells 0 self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[0].total) self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[1].total) # cells 1 self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb) self.assertEqual(1024, got_topo.cells[1].mempages[0].total) self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb) self.assertEqual(1, got_topo.cells[1].mempages[1].total) else: self.assertEqual([], got_topo.cells[0].mempages) self.assertEqual([], got_topo.cells[1].mempages) self.assertEqual(expected_topo_dict, got_topo_dict) self.assertEqual(set([]), got_topo.cells[0].pinned_cpus) self.assertEqual(set([]), got_topo.cells[1].pinned_cpus) self.assertEqual(set([]), got_topo.cells[2].pinned_cpus) self.assertEqual(set([]), got_topo.cells[3].pinned_cpus) self.assertEqual([set([0, 1])], got_topo.cells[0].siblings) self.assertEqual([], got_topo.cells[1].siblings) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_host_numa_topology(self, mock_version): self._test_get_host_numa_topology(mempages=True) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_no_mempages(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') mock_lib_version.return_value = utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1 mock_version.return_value = utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self._test_get_host_numa_topology(mempages=False) def test_get_host_numa_topology_empty(self): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with contextlib.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps) ) as (has_min_version, get_caps): self.assertIsNone(drvr._get_host_numa_topology()) self.assertEqual(2, get_caps.call_count) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_old_version(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1 mock_version.return_value = utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self.assertIsNone(drvr._get_host_numa_topology()) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_xen(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = utils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) mock_version.return_value = utils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_XEN self.assertIsNone(drvr._get_host_numa_topology()) def test_diagnostic_vcpus_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): raise fakelibvirt.libvirtError('vcpus missing') def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) timeutils.set_time_override(diags_time) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_blockstats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): raise fakelibvirt.libvirtError('blockStats missing') def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) timeutils.set_time_override(diags_time) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_interfacestats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): raise fakelibvirt.libvirtError('interfaceStat missing') def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) timeutils.set_time_override(diags_time) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_memorystats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): raise fakelibvirt.libvirtError('memoryStats missing') def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) timeutils.set_time_override(diags_time) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_full(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) timeutils.set_time_override(diags_time) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(timeutils, 'utcnow') @mock.patch.object(host.Host, 'get_domain') def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain, mock_utcnow): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> <interface type="bridge"> <mac address="53:55:00:a5:39:39"/> <model type="virtio"/> <target dev="br0"/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self): return DiagFakeDomain() mock_get_domain.side_effect = fake_get_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, 'br0_rx': 4408, 'br0_rx_drop': 0, 'br0_rx_errors': 0, 'br0_rx_packets': 82, 'br0_tx': 0, 'br0_tx_drop': 0, 'br0_tx_errors': 0, 'br0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) mock_utcnow.return_value = diags_time instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}, {'mac_address': '53:55:00:a5:39:39', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10., 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count(self, mock_list): """Domain can fail to return the vcpu description in case it's just starting up or shutting down. Make sure None is handled gracefully. """ class DiagFakeDomain(object): def __init__(self, vcpus): self._vcpus = vcpus def vcpus(self): if self._vcpus is None: raise fakelibvirt.libvirtError("fake-error") else: return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus) def ID(self): return 1 def name(self): return "instance000001" def UUIDString(self): return "19479fee-07a5-49bb-9138-d3738280d63c" mock_list.return_value = [ DiagFakeDomain(None), DiagFakeDomain(5)] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(5, drvr._get_vcpu_used()) mock_list.assert_called_with() @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count_none(self, mock_list): """Domain will return zero if the current number of vcpus used is None. This is in case of VM state starting up or shutting down. None type returned is counted as zero. """ class DiagFakeDomain(object): def __init__(self): pass def vcpus(self): return None def ID(self): return 1 def name(self): return "instance000001" mock_list.return_value = [DiagFakeDomain()] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_vcpu_used()) mock_list.assert_called_with() def test_get_instance_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): caps = vconfig.LibvirtConfigCaps() guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.X86_64 guest.domtype = ['kvm', 'qemu'] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.I686 guest.domtype = ['kvm'] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = [(arch.X86_64, 'kvm', 'hvm'), (arch.X86_64, 'qemu', 'hvm'), (arch.I686, 'kvm', 'hvm')] got = drvr._get_instance_capabilities() self.assertEqual(want, got) def test_set_cache_mode(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'directsync') def test_set_cache_mode_invalid_mode(self): self.flags(disk_cachemodes=['file=FAKE'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertIsNone(fake_conf.driver_cache) def test_set_cache_mode_invalid_object(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuest() fake_conf.driver_cache = 'fake' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'fake') @mock.patch('os.unlink') @mock.patch.object(os.path, 'exists') def _test_shared_storage_detection(self, is_same, mock_exists, mock_unlink): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.get_host_ip_addr = mock.MagicMock(return_value='bar') mock_exists.return_value = is_same with contextlib.nested( mock.patch.object(drvr._remotefs, 'create_file'), mock.patch.object(drvr._remotefs, 'remove_file') ) as (mock_rem_fs_create, mock_rem_fs_remove): result = drvr._is_storage_shared_with('host', '/path') mock_rem_fs_create.assert_any_call('host', mock.ANY) create_args, create_kwargs = mock_rem_fs_create.call_args self.assertTrue(create_args[1].startswith('/path')) if is_same: mock_unlink.assert_called_once_with(mock.ANY) else: mock_rem_fs_remove.assert_called_with('host', mock.ANY) remove_args, remove_kwargs = mock_rem_fs_remove.call_args self.assertTrue(remove_args[1].startswith('/path')) return result def test_shared_storage_detection_same_host(self): self.assertTrue(self._test_shared_storage_detection(True)) def test_shared_storage_detection_different_host(self): self.assertFalse(self._test_shared_storage_detection(False)) def test_shared_storage_detection_easy(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(drvr, 'get_host_ip_addr') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(os, 'unlink') drvr.get_host_ip_addr().AndReturn('foo') self.mox.ReplayAll() self.assertTrue(drvr._is_storage_shared_with('foo', '/path')) def test_store_pid_remove_pid(self): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) popen = mock.Mock(pid=3) drvr.job_tracker.add_job(instance, popen.pid) self.assertIn(3, drvr.job_tracker.jobs[instance.uuid]) drvr.job_tracker.remove_job(instance, popen.pid) self.assertNotIn(instance.uuid, drvr.job_tracker.jobs) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_get_domain_info_with_more_return(self, mock_get_domain): instance = objects.Instance(**self.test_instance) dom_mock = mock.MagicMock() dom_mock.info.return_value = [ 1, 2048, 737, 8, 12345, 888888 ] dom_mock.ID.return_value = mock.sentinel.instance_id mock_get_domain.return_value = dom_mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_info(instance) self.assertEqual(1, info.state) self.assertEqual(2048, info.max_mem_kb) self.assertEqual(737, info.mem_kb) self.assertEqual(8, info.num_cpu) self.assertEqual(12345, info.cpu_time_ns) self.assertEqual(mock.sentinel.instance_id, info.id) dom_mock.info.assert_called_once_with() dom_mock.ID.assert_called_once_with() mock_get_domain.assert_called_once_with(instance) def test_create_domain(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_domain = mock.MagicMock() guest = drvr._create_domain(domain=mock_domain) self.assertEqual(mock_domain, guest._domain) mock_domain.createWithFlags.assert_has_calls([mock.call(0)]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_clean): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with contextlib.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_id_maps(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_chown, mock_get_info, mock_clean): self.flags(virt_type='lxc', uid_maps=["0:1000:100"], gid_maps=["0:1000:100"], group='libvirt') def chown_side_effect(path, id_maps): self.assertEqual('/tmp/rootfs', path) self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertEqual(0, id_maps[0].start) self.assertEqual(1000, id_maps[0].target) self.assertEqual(100, id_maps[0].count) self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap) self.assertEqual(0, id_maps[1].start) self.assertEqual(1000, id_maps[1].target) self.assertEqual(100, id_maps[1].count) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_chown.side_effect = chown_side_effect mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with contextlib.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter') ) as ( mock_create_images_and_backing, mock_is_booted_from_volume, mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering, mock_prepare_instance_filter, mock_apply_instance_filter ): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_is_booted_from_volume.assert_called_once_with(mock_instance, {}) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_not_running(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) with contextlib.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) teardown_call = mock.call(container_dir='/tmp/rootfs') mock_teardown.assert_has_calls([teardown_call]) def test_create_domain_define_xml_fails(self): """Tests that the xml is logged when defining the domain fails.""" fake_xml = "<test>this is a test</test>" def fake_defineXML(xml): self.assertEqual(fake_xml, xml) raise fakelibvirt.libvirtError('virDomainDefineXML() failed') def fake_safe_decode(text, *args, **kwargs): return text + 'safe decoded' self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.assertIn('safe decoded', msg % args) self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock(defineXML=fake_defineXML) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, fake_xml) self.assertTrue(self.log_error_called) def test_create_domain_with_flags_fails(self): """Tests that the xml is logged when creating the domain with flags fails """ fake_xml = "<test>this is a test</test>" fake_domain = FakeVirtDomain(fake_xml) def fake_createWithFlags(launch_flags): raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed') self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, domain=fake_domain) self.assertTrue(self.log_error_called) def test_create_domain_enable_hairpin_fails(self): """Tests that the xml is logged when enabling hairpin mode for the domain fails. """ fake_xml = "<test>this is a test</test>" fake_domain = FakeVirtDomain(fake_xml) def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError('error') def fake_get_interfaces(*args): return ["dev"] self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.stubs.Set(nova.utils, 'execute', fake_execute) self.stubs.Set( nova.virt.libvirt.guest.Guest, 'get_interfaces', fake_get_interfaces) self.assertRaises(processutils.ProcessExecutionError, drvr._create_domain, domain=fake_domain, power_on=False) self.assertTrue(self.log_error_called) def test_get_vnc_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<graphics type='vnc' port='5900'/>" "</devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) vnc_dict = drvr.get_vnc_console(self.context, instance) self.assertEqual(vnc_dict.port, '5900') def test_get_vnc_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices></devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_vnc_console, self.context, instance) def test_get_spice_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<graphics type='spice' port='5950'/>" "</devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) spice_dict = drvr.get_spice_console(self.context, instance) self.assertEqual(spice_dict.port, '5950') def test_get_spice_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices></devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_spice_console, self.context, instance) def test_detach_volume_with_instance_not_found(self): # Test that detach_volume() method does not raise exception, # if the instance does not exist. instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with contextlib.nested( mock.patch.object(host.Host, 'get_domain', side_effect=exception.InstanceNotFound( instance_id=instance.uuid)), mock.patch.object(drvr, '_disconnect_volume') ) as (_get_domain, _disconnect_volume): connection_info = {'driver_volume_type': 'fake'} drvr.detach_volume(connection_info, instance, '/dev/sda') _get_domain.assert_called_once_with(instance) _disconnect_volume.assert_called_once_with(connection_info, 'sda') def _test_attach_detach_interface_get_config(self, method_name): """Tests that the get_config() method is properly called in attach_interface() and detach_interface(). method_name: either \"attach_interface\" or \"detach_interface\" depending on the method to test. """ self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain()) instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self.stubs, 1) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_image_meta = {'id': instance['image_ref']} fake_image_meta_obj = objects.ImageMeta.from_dict( fake_image_meta) if method_name == "attach_interface": self.mox.StubOutWithMock(drvr.firewall_driver, 'setup_basic_filtering') drvr.firewall_driver.setup_basic_filtering(instance, network_info) expected = drvr.vif_driver.get_config(instance, network_info[0], fake_image_meta_obj, instance.get_flavor(), CONF.libvirt.virt_type, drvr._host) self.mox.StubOutWithMock(drvr.vif_driver, 'get_config') drvr.vif_driver.get_config(instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, drvr._host).\ AndReturn(expected) self.mox.ReplayAll() if method_name == "attach_interface": drvr.attach_interface(instance, fake_image_meta, network_info[0]) elif method_name == "detach_interface": drvr.detach_interface(instance, network_info[0]) else: raise ValueError("Unhandled method %s" % method_name) @mock.patch.object(lockutils, "external_lock") def test_attach_interface_get_config(self, mock_lock): """Tests that the get_config() method is properly called in attach_interface(). """ mock_lock.return_value = threading.Semaphore() self._test_attach_detach_interface_get_config("attach_interface") def test_detach_interface_get_config(self): """Tests that the get_config() method is properly called in detach_interface(). """ self._test_attach_detach_interface_get_config("detach_interface") def test_default_root_device_name(self): instance = {'uuid': 'fake_instance'} image_meta = {'id': 'fake'} root_bdm = {'source_type': 'image', 'detination_type': 'volume', 'image_id': 'fake_id'} self.flags(virt_type='fake_libvirt_type', group='libvirt') self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type') self.mox.StubOutWithMock(blockinfo, 'get_root_info') blockinfo.get_disk_bus_for_device_type(instance, 'fake_libvirt_type', mox.IsA(objects.ImageMeta), 'disk').InAnyOrder().\ AndReturn('virtio') blockinfo.get_disk_bus_for_device_type(instance, 'fake_libvirt_type', mox.IsA(objects.ImageMeta), 'cdrom').InAnyOrder().\ AndReturn('ide') blockinfo.get_root_info(instance, 'fake_libvirt_type', mox.IsA(objects.ImageMeta), root_bdm, 'virtio', 'ide').AndReturn({'dev': 'vda'}) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(drvr.default_root_device_name(instance, image_meta, root_bdm), '/dev/vda') @mock.patch.object(objects.BlockDeviceMapping, "save") def test_default_device_names_for_instance(self, save_mock): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' ephemerals = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdb', 'source_type': 'blank', 'volume_size': 2, 'destination_type': 'local'}))] swap = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdg', 'source_type': 'blank', 'volume_size': 512, 'guest_format': 'swap', 'destination_type': 'local'}))] block_device_mapping = [ objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-image-id', 'device_name': '/dev/vdxx', 'disk_bus': 'scsi'}))] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.default_device_names_for_instance(instance, instance.root_device_name, ephemerals, swap, block_device_mapping) # Ephemeral device name was correct so no changes self.assertEqual('/dev/vdb', ephemerals[0].device_name) # Swap device name was incorrect so it was changed self.assertEqual('/dev/vdc', swap[0].device_name) # Volume device name was changed too, taking the bus into account self.assertEqual('/dev/sda', block_device_mapping[0].device_name) self.assertEqual(3, save_mock.call_count) def _test_get_device_name_for_instance(self, new_bdm, expected_dev): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' instance.ephemeral_gb = 0 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) got_dev = drvr.get_device_name_for_instance( instance, [], new_bdm) self.assertEqual(expected_dev, got_dev) def test_get_device_name_for_instance_simple(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_suggested(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name='/dev/vdg', guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_bus(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus='scsi', device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/sda') def test_get_device_name_for_instance_device_type(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type='floppy') self._test_get_device_name_for_instance(new_bdm, '/dev/fda') def test_is_supported_fs_format(self): supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3, disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertTrue(drvr.is_supported_fs_format(fs)) supported_fs = ['', 'dummy'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertFalse(drvr.is_supported_fs_format(fs)) def test_post_live_migration_at_destination_with_block_device_info(self): # Preparing mocks mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) self.resultXML = None def fake_getLibVersion(): return 9011 def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): if image_meta is None: image_meta = {} conf = drvr._get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info) self.resultXML = conf.to_xml() return self.resultXML def fake_get_domain(instance): return mock_domain def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Westmere</model> <vendor>Intel</vendor> <feature policy='require' name='aes'/> </cpu> """ network_info = _fake_network_info(self.stubs, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, listDefinedDomains=lambda: [], numOfDomains=lambda: 0, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) block_device_info = {'block_device_mapping': driver_block_device.convert_volumes([ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'guest_format': None, 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'disk_bus': 'virtio', 'device_type': 'disk', 'delete_on_termination': False}), ])} block_device_info['block_device_mapping'][0]['connection_info'] = ( {'driver_volume_type': 'iscsi'}) with contextlib.nested( mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(objects.Instance, 'save') ) as (mock_volume_save, mock_instance_save): drvr.post_live_migration_at_destination( self.context, instance, network_info, True, block_device_info=block_device_info) self.assertIn('fake', self.resultXML) mock_volume_save.assert_called_once_with() def test_create_propagates_exceptions(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(id=1, uuid='fake-uuid', image_ref='my_fake_image') with contextlib.nested( mock.patch.object(drvr, '_create_domain_setup_lxc'), mock.patch.object(drvr, '_create_domain_cleanup_lxc'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain', side_effect=exception.NovaException), mock.patch.object(drvr, 'cleanup')): self.assertRaises(exception.NovaException, drvr._create_domain_and_network, self.context, 'xml', instance, None, None) def test_create_without_pause(self): self.flags(virt_type='lxc', group='libvirt') @contextlib.contextmanager def fake_lxc_disk_handler(*args, **kwargs): yield drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) with contextlib.nested( mock.patch.object(drvr, '_lxc_disk_handler', side_effect=fake_lxc_disk_handler), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'cleanup')) as ( _handler, cleanup, firewall_driver, create, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, None, None) self.assertEqual(0, create.call_args_list[0][1]['pause']) self.assertEqual(0, domain.resume.call_count) def _test_create_with_network_events(self, neutron_failure=None, power_on=True): generated_events = [] def wait_timeout(): event = mock.MagicMock() if neutron_failure == 'timeout': raise eventlet.timeout.Timeout() elif neutron_failure == 'error': event.status = 'failed' else: event.status = 'completed' return event def fake_prepare(instance, event_name): m = mock.MagicMock() m.instance = instance m.event_name = event_name m.wait.side_effect = wait_timeout generated_events.append(m) return m virtapi = manager.ComputeVirtAPI(mock.MagicMock()) prepare = virtapi._compute.instance_events.prepare_for_instance_event prepare.side_effect = fake_prepare drvr = libvirt_driver.LibvirtDriver(virtapi, False) instance = objects.Instance(**self.test_instance) vifs = [{'id': 'vif1', 'active': False}, {'id': 'vif2', 'active': False}] @mock.patch.object(drvr, 'plug_vifs') @mock.patch.object(drvr, 'firewall_driver') @mock.patch.object(drvr, '_create_domain') @mock.patch.object(drvr, 'cleanup') def test_create(cleanup, create, fw_driver, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, vifs, None, power_on=power_on) plug_vifs.assert_called_with(instance, vifs) pause = self._get_pause_flag(drvr, vifs, power_on=power_on) self.assertEqual(pause, create.call_args_list[0][1]['pause']) if pause: domain.resume.assert_called_once_with() if neutron_failure and CONF.vif_plugging_is_fatal: cleanup.assert_called_once_with(self.context, instance, network_info=vifs, block_device_info=None) test_create() if utils.is_neutron() and CONF.vif_plugging_timeout and power_on: prepare.assert_has_calls([ mock.call(instance, 'network-vif-plugged-vif1'), mock.call(instance, 'network-vif-plugged-vif2')]) for event in generated_events: if neutron_failure and generated_events.index(event) != 0: self.assertEqual(0, event.call_count) elif (neutron_failure == 'error' and not CONF.vif_plugging_is_fatal): event.wait.assert_called_once_with() else: self.assertEqual(0, prepare.call_count) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_power_off(self, is_neutron): # Tests that we don't wait for events if we don't start the instance. self._test_create_with_network_events(power_on=False) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_nowait(self, is_neutron): self.flags(vif_plugging_timeout=0) self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_timeout( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_timeout( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_error( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_error( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=False) def test_create_with_network_events_non_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.volume.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_dom = mock.MagicMock() mock_encryption_meta = mock.MagicMock() get_encryption_metadata.return_value = mock_encryption_meta fake_xml = """ <domain> <name>instance-00000001</name> <memory>1048576</memory> <vcpu>1</vcpu> <devices> <disk type='file' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source file='/path/fake-volume1'/> <target dev='vda' bus='virtio'/> </disk> </devices> </domain> """ fake_volume_id = "fake-volume-id" connection_info = {"driver_volume_type": "fake", "data": {"access_mode": "rw", "volume_id": fake_volume_id}} def fake_getitem(*args, **kwargs): fake_bdm = {'connection_info': connection_info, 'mount_device': '/dev/vda'} return fake_bdm.get(args[0]) mock_volume = mock.MagicMock() mock_volume.__getitem__.side_effect = fake_getitem block_device_info = {'block_device_mapping': [mock_volume]} network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] with contextlib.nested( mock.patch.object(drvr, '_get_volume_encryptor'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering, prepare_instance_filter, create_domain, apply_instance_filter): create_domain.return_value = libvirt_guest.Guest(mock_dom) guest = drvr._create_domain_and_network( self.context, fake_xml, instance, network_info, None, block_device_info=block_device_info) get_encryption_metadata.assert_called_once_with(self.context, drvr._volume_api, fake_volume_id, connection_info) get_volume_encryptor.assert_called_once_with(connection_info, mock_encryption_meta) plug_vifs.assert_called_once_with(instance, network_info) setup_basic_filtering.assert_called_once_with(instance, network_info) prepare_instance_filter.assert_called_once_with(instance, network_info) pause = self._get_pause_flag(drvr, network_info) create_domain.assert_called_once_with( fake_xml, pause=pause, power_on=True) self.assertEqual(mock_dom, guest._domain) def test_get_guest_storage_config(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_instance = copy.deepcopy(self.test_instance) test_instance["default_swap_device"] = None instance = objects.Instance(**test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = instance.get_flavor() conn_info = {'driver_volume_type': 'fake', 'data': {}} bdi = {'block_device_mapping': driver_block_device.convert_volumes([ fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'}) ])} bdm = bdi['block_device_mapping'][0] bdm['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, bdi) mock_conf = mock.MagicMock(source_path='fake') with contextlib.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode') ) as (volume_save, connect_volume, get_volume_config, set_cache_mode): devices = drvr._get_guest_storage_config(instance, image_meta, disk_info, False, bdi, flavor, "hvm") self.assertEqual(3, len(devices)) self.assertEqual('/dev/vdb', instance.default_ephemeral_device) self.assertIsNone(instance.default_swap_device) connect_volume.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) get_volume_config.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) volume_save.assert_called_once_with() self.assertEqual(3, set_cache_mode.call_count) def test_get_neutron_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] events = drvr._get_neutron_events(network_info) self.assertEqual([('network-vif-plugged', '1')], events) def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') drvr._unplug_vifs('inst', [1], ignore_errors=True) vif_driver.unplug.assert_called_once_with('inst', 1) def test_unplug_vifs_reports_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') self.assertRaises(exception.AgentError, drvr.unplug_vifs, 'inst', [1]) vif_driver.unplug.assert_called_once_with('inst', 1) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_pass_with_no_mount_device(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = mock.Mock() drvr._disconnect_volume = mock.Mock() fake_inst = {'name': 'foo'} fake_bdms = [{'connection_info': 'foo', 'mount_device': None}] with mock.patch('nova.virt.driver' '.block_device_info_get_mapping', return_value=fake_bdms): drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False) self.assertTrue(drvr._disconnect_volume.called) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) fake_inst = {'name': 'foo'} with mock.patch.object(drvr._conn, 'lookupByName') as lookup: lookup.return_value = fake_inst # NOTE(danms): Make unplug cause us to bail early, since # we only care about how it was called unplug.side_effect = test.TestingException self.assertRaises(test.TestingException, drvr.cleanup, 'ctxt', fake_inst, 'netinfo') unplug.assert_called_once_with(fake_inst, 'netinfo', True) @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(host.Host, "get_guest") @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_serial_ports_from_guest') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_serial_console_enabled( self, undefine, get_ports, get_guest, block_device_info_get_mapping): self.flags(enabled="True", group='serial_console') instance = 'i1' network_info = {} bdm_info = {} firewall_driver = mock.MagicMock() guest = mock.Mock(spec=libvirt_guest.Guest) get_guest.return_value = guest get_ports.return_value = iter([('127.0.0.1', 10000)]) block_device_info_get_mapping.return_value = () # We want to ensure undefine_domain is called after # lookup_domain. def undefine_domain(instance): get_ports.side_effect = Exception("domain undefined") undefine.side_effect = undefine_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = firewall_driver drvr.cleanup( 'ctx', instance, network_info, block_device_info=bdm_info, destroy_disks=False, destroy_vifs=False) get_ports.assert_called_once_with(guest) undefine.assert_called_once_with(instance) firewall_driver.unfilter_instance.assert_called_once_with( instance, network_info=network_info) block_device_info_get_mapping.assert_called_once_with(bdm_info) @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(host.Host, "get_guest") @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_serial_console_domain_gone( self, undefine, get_guest, block_device_info_get_mapping): self.flags(enabled="True", group='serial_console') instance = {'name': 'i1'} network_info = {} bdm_info = {} firewall_driver = mock.MagicMock() block_device_info_get_mapping.return_value = () # Ensure get_guest raises same exception that would have occurred # if domain was gone. get_guest.side_effect = exception.InstanceNotFound("domain undefined") drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = firewall_driver drvr.cleanup( 'ctx', instance, network_info, block_device_info=bdm_info, destroy_disks=False, destroy_vifs=False) get_guest.assert_called_once_with(instance) undefine.assert_called_once_with(instance) firewall_driver.unfilter_instance.assert_called_once_with( instance, network_info=network_info) block_device_info_get_mapping.assert_called_once_with(bdm_info) def test_swap_volume(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) with mock.patch.object(drvr._conn, 'defineXML', create=True) as mock_define: xmldoc = "<domain/>" srcfile = "/first/path" dstfile = "/second/path" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_dom.blockJobInfo.return_value = {} drvr._swap_volume(guest, srcfile, dstfile, 1) mock_dom.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dstfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)) mock_dom.blockResize.assert_called_once_with( srcfile, 1 * units.Gi / units.Ki) mock_define.assert_called_once_with(xmldoc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_id') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume') @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_swap_volume_driver_bdm_save(self, get_guest, connect_volume, get_volume_config, get_by_volume_id, volume_save, swap_volume, disconnect_volume): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) old_connection_info = {'driver_volume_type': 'fake', 'serial': 'old-volume-id', 'data': {'device_path': '/fake-old-volume', 'access_mode': 'rw'}} new_connection_info = {'driver_volume_type': 'fake', 'serial': 'new-volume-id', 'data': {'device_path': '/fake-new-volume', 'access_mode': 'rw'}} mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) mock_dom.XMLDesc.return_value = """<domain> <devices> <disk type='file'> <source file='/fake-old-volume'/> <target dev='vdb' bus='virtio'/> </disk> </devices> </domain> """ mock_dom.name.return_value = 'inst' mock_dom.UUIDString.return_value = 'uuid' get_guest.return_value = guest disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'} get_volume_config.return_value = mock.MagicMock( source_path='/fake-new-volume') bdm = objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-2', 'boot_index': 0})) get_by_volume_id.return_value = bdm conn.swap_volume(old_connection_info, new_connection_info, instance, '/dev/vdb', 1) get_guest.assert_called_once_with(instance) connect_volume.assert_called_once_with(new_connection_info, disk_info) swap_volume.assert_called_once_with(guest, 'vdb', '/fake-new-volume', 1) disconnect_volume.assert_called_once_with(old_connection_info, 'vdb') volume_save.assert_called_once_with() def test_live_snapshot(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() with contextlib.nested( mock.patch.object(drvr._conn, 'defineXML', create=True), mock.patch.object(fake_libvirt_utils, 'get_disk_size'), mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'), mock.patch.object(fake_libvirt_utils, 'create_cow_image'), mock.patch.object(fake_libvirt_utils, 'chown'), mock.patch.object(fake_libvirt_utils, 'extract_snapshot'), ) as (mock_define, mock_size, mock_backing, mock_create_cow, mock_chown, mock_snapshot): xmldoc = "<domain/>" srcfile = "/first/path" dstfile = "/second/path" bckfile = "/other/path" dltfile = dstfile + ".delta" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_size.return_value = 1004009 mock_backing.return_value = bckfile guest = libvirt_guest.Guest(mock_dom) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr._live_snapshot(self.context, self.test_instance, guest, srcfile, dstfile, "qcow2", image_meta) mock_dom.XMLDesc.assert_called_once_with(flags=( fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dltfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)) mock_size.assert_called_once_with(srcfile) mock_backing.assert_called_once_with(srcfile, basename=False) mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009) mock_chown.assert_called_once_with(dltfile, os.getuid()) mock_snapshot.assert_called_once_with(dltfile, "qcow2", dstfile, "qcow2") mock_define.assert_called_once_with(xmldoc) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") def test_live_migration_hostname_valid(self, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.live_migration(self.context, self.test_instance, "host1.example.com", lambda x: x, lambda x: x) self.assertEqual(1, mock_lm.call_count) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") @mock.patch.object(fake_libvirt_utils, "is_valid_hostname") def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_hostname.return_value = False self.assertRaises(exception.InvalidHostname, drvr.live_migration, self.context, self.test_instance, "foo/?com=/bin/sh", lambda x: x, lambda x: x) @mock.patch('os.path.exists', return_value=True) @mock.patch('tempfile.mkstemp') @mock.patch('os.close', return_value=None) def test_check_instance_shared_storage_local_raw(self, mock_close, mock_mkstemp, mock_exists): instance_uuid = str(uuid.uuid4()) self.flags(images_type='raw', group='libvirt') self.flags(instances_path='/tmp') mock_mkstemp.return_value = (-1, '/tmp/{0}/file'.format(instance_uuid)) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) temp_file = driver.check_instance_shared_storage_local(self.context, instance) self.assertEqual('/tmp/{0}/file'.format(instance_uuid), temp_file['filename']) def test_check_instance_shared_storage_local_rbd(self): self.flags(images_type='rbd', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertIsNone(driver. check_instance_shared_storage_local(self.context, instance)) def test_version_to_string(self): driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) string_ver = driver._version_to_string((4, 33, 173)) self.assertEqual("4.33.173", string_ver) def test_parallels_min_version_fail(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002011): self.assertRaises(exception.NovaException, driver.init_host, 'wibble') def test_parallels_min_version_ok(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002012): driver.init_host('wibble') def test_get_guest_config_parallels_vm(self): self.flags(virt_type='parallels', group='libvirt') self.flags(images_type='ploop', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.HVM, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(6, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[0].driver_format, "ploop") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) def test_get_guest_config_parallels_ct(self): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vm_mode.EXE instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, {'mapping': {'disk': {}}}) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertIsNone(cfg.os_root) self.assertEqual(4, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) fs = cfg.devices[0] self.assertEqual(fs.source_type, "file") self.assertEqual(fs.driver_type, "ploop") self.assertEqual(fs.target_dir, "/") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestVideo) def _test_get_guest_config_parallels_volume(self, vmmode, devices): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vmmode instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': driver_block_device.convert_volumes([ fake_block_device.FakeDbBlockDeviceDict( {'id': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sda'}), ])} info['block_device_mapping'][0]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self.stubs, 1), image_meta, disk_info, None, info) mock_save.assert_called_once_with() self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(2 * units.Mi, cfg.memory) self.assertEqual(1, cfg.vcpus) self.assertEqual(vmmode, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(devices, len(cfg.devices)) disk_found = False for dev in cfg.devices: result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys) self.assertFalse(result) if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and (dev.source_path is None or 'disk.local' not in dev.source_path)): self.assertEqual("disk", dev.source_device) self.assertEqual("sda", dev.target_dev) disk_found = True self.assertTrue(disk_found) def test_get_guest_config_parallels_volume(self): self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4) self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6) class HostStateTestCase(test.NoDBTestCase): cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686", "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"}} instance_caps = [(arch.X86_64, "kvm", "hvm"), (arch.I686, "kvm", "hvm")] pci_devices = [{ "dev_id": "pci_0000_04_00_3", "address": "0000:04:10.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None}] numa_topology = objects.NUMATopology( cells=[objects.NUMACell( id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) class FakeConnection(libvirt_driver.LibvirtDriver): """Fake connection object.""" def __init__(self): super(HostStateTestCase.FakeConnection, self).__init__(fake.FakeVirtAPI(), True) self._host = host.Host("qemu:///system") def _get_memory_mb_total(): return 497 def _get_memory_mb_used(): return 88 self._host.get_memory_mb_total = _get_memory_mb_total self._host.get_memory_mb_used = _get_memory_mb_used def _get_vcpu_total(self): return 1 def _get_vcpu_used(self): return 0 def _get_cpu_info(self): return HostStateTestCase.cpu_info def _get_disk_over_committed_size_total(self): return 0 def _get_local_gb_info(self): return {'total': 100, 'used': 20, 'free': 80} def get_host_uptime(self): return ('10:01:16 up 1:36, 6 users, ' 'load average: 0.21, 0.16, 0.19') def _get_disk_available_least(self): return 13091 def _get_instance_capabilities(self): return HostStateTestCase.instance_caps def _get_pci_passthrough_devices(self): return jsonutils.dumps(HostStateTestCase.pci_devices) def _get_host_numa_topology(self): return HostStateTestCase.numa_topology @mock.patch.object(fakelibvirt, "openAuth") def test_update_status(self, mock_open): mock_open.return_value = fakelibvirt.Connection("qemu:///system") drvr = HostStateTestCase.FakeConnection() stats = drvr.get_available_resource("compute1") self.assertEqual(stats["vcpus"], 1) self.assertEqual(stats["memory_mb"], 497) self.assertEqual(stats["local_gb"], 100) self.assertEqual(stats["vcpus_used"], 0) self.assertEqual(stats["memory_mb_used"], 88) self.assertEqual(stats["local_gb_used"], 20) self.assertEqual(stats["hypervisor_type"], 'QEMU') self.assertEqual(stats["hypervisor_version"], 1001000) self.assertEqual(stats["hypervisor_hostname"], 'compute1') cpu_info = jsonutils.loads(stats["cpu_info"]) self.assertEqual(cpu_info, {"vendor": "Intel", "model": "pentium", "arch": arch.I686, "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"} }) self.assertEqual(stats["disk_available_least"], 80) self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]), HostStateTestCase.pci_devices) self.assertThat(objects.NUMATopology.obj_from_db_obj( stats['numa_topology'])._to_dict(), matchers.DictMatches( HostStateTestCase.numa_topology._to_dict())) class LibvirtDriverTestCase(test.NoDBTestCase): """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver.""" def setUp(self): super(LibvirtDriverTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver( fake.FakeVirtAPI(), read_only=True) self.context = context.get_admin_context() self.test_image_meta = { "disk_format": "raw", } def _create_instance(self, params=None): """Create a test instance.""" if not params: params = {} flavor = objects.Flavor(memory_mb=512, swap=0, vcpu_weight=None, root_gb=10, id=2, name=u'm1.tiny', ephemeral_gb=20, rxtx_factor=1.0, flavorid=u'1', vcpus=1) inst = {} inst['id'] = 1 inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b' inst['os_type'] = 'linux' inst['image_ref'] = '1' inst['reservation_id'] = 'r-fakeres' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = 2 inst['ami_launch_index'] = 0 inst['host'] = 'host1' inst['root_gb'] = flavor.root_gb inst['ephemeral_gb'] = flavor.ephemeral_gb inst['config_drive'] = True inst['kernel_id'] = 2 inst['ramdisk_id'] = 3 inst['key_data'] = 'ABCDEFG' inst['system_metadata'] = {} inst['metadata'] = {} inst.update(params) return objects.Instance(flavor=flavor, old_flavor=None, new_flavor=None, **inst) @staticmethod def _disk_info(): # 10G root and 512M swap disk disk_info = [{'disk_size': 1, 'type': 'qcow2', 'virt_disk_size': 10737418240, 'path': '/test/disk', 'backing_file': '/base/disk'}, {'disk_size': 1, 'type': 'qcow2', 'virt_disk_size': 536870912, 'path': '/test/disk.swap', 'backing_file': '/base/swap_512'}] return jsonutils.dumps(disk_info) def test_migrate_disk_and_power_off_exception(self): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.counter = 0 self.checked_shared_storage = False def fake_get_instance_disk_info(instance, block_device_info=None): return '[]' def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): self.counter += 1 if self.counter == 1: assert False, "intentional failure" def fake_os_path_exists(path): return True def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(os.path, 'exists', fake_os_path_exists) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self.assertRaises(AssertionError, self.drvr.migrate_disk_and_power_off, context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None) def _test_migrate_disk_and_power_off(self, flavor_obj, block_device_info=None, params_for_instance=None): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ disk_info = self._disk_info() def fake_get_instance_disk_info(instance, block_device_info=None): return disk_info def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): pass def fake_copy_image(src, dest, host=None, receive=False, on_execute=None, on_completion=None, compression=True): self.assertIsNotNone(on_execute) self.assertIsNotNone(on_completion) self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image) ins_ref = self._create_instance(params=params_for_instance) # dest is different host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) # dest is same host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), ins_ref, '10.0.0.1', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) def test_migrate_disk_and_power_off(self): flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') def test_migrate_disk_and_power_off_boot_from_volume(self, disconnect_volume): info = {'block_device_mapping': [{'boot_index': None, 'mount_device': '/dev/vdd', 'connection_info': None}, {'boot_index': 0, 'mount_device': '/dev/vda', 'connection_info': None}]} flavor = {'root_gb': 1, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) # Note(Mike_D): The size of instance's ephemeral_gb is 0 gb. self._test_migrate_disk_and_power_off( flavor_obj, block_device_info=info, params_for_instance={'image_ref': None, 'ephemeral_gb': 0}) disconnect_volume.assert_called_with( info['block_device_mapping'][1]['connection_info'], 'vda') @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info, get_host_ip_addr, mock_destroy, mock_copy_image, mock_execute): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.copy_or_move_swap_called = False disk_info = self._disk_info() mock_get_disk_info.return_value = disk_info get_host_ip_addr.return_value = '10.0.0.1' def fake_copy_image(*args, **kwargs): # disk.swap should not be touched since it is skipped over if '/test/disk.swap' in list(args): self.copy_or_move_swap_called = True def fake_execute(*args, **kwargs): # disk.swap should not be touched since it is skipped over if set(['mv', '/test/disk.swap']).issubset(list(args)): self.copy_or_move_swap_called = True mock_copy_image.side_effect = fake_copy_image mock_execute.side_effect = fake_execute drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Original instance config instance = self._create_instance({'root_gb': 10, 'ephemeral_gb': 0}) # Re-size fake instance to 20G root and 1024M swap disk flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024} flavor_obj = objects.Flavor(**flavor) # Destination is same host out = drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, '10.0.0.1', flavor_obj, None) mock_get_disk_info.assert_called_once_with(instance, block_device_info=None) self.assertTrue(get_host_ip_addr.called) mock_destroy.assert_called_once_with(instance) self.assertFalse(self.copy_or_move_swap_called) self.assertEqual(disk_info, out) def _test_migrate_disk_and_power_off_resize_check(self, expected_exc): """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection .migrate_disk_and_power_off. """ def fake_get_instance_disk_info(instance, xml=None, block_device_info=None): return self._disk_info() def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) # Migration is not implemented for LVM backed instances self.assertRaises(expected_exc, self.drvr.migrate_disk_and_power_off, None, ins_ref, '10.0.0.1', flavor_obj, None) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') def _test_migrate_disk_and_power_off_backing_file(self, shared_storage, mock_is_shared_storage, mock_get_disk_info, mock_destroy, mock_execute): self.convert_file_called = False flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0} flavor_obj = objects.Flavor(**flavor) disk_info = [{'type': 'qcow2', 'path': '/test/disk', 'virt_disk_size': '10737418240', 'backing_file': '/base/disk', 'disk_size': '83886080'}] disk_info_text = jsonutils.dumps(disk_info) mock_get_disk_info.return_value = disk_info_text mock_is_shared_storage.return_value = shared_storage def fake_execute(*args, **kwargs): self.assertNotEqual(args[0:2], ['qemu-img', 'convert']) mock_execute.side_effect = fake_execute instance = self._create_instance() out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.2', flavor_obj, None) self.assertTrue(mock_is_shared_storage.called) mock_destroy.assert_called_once_with(instance) self.assertEqual(out, disk_info_text) def test_migrate_disk_and_power_off_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(True) def test_migrate_disk_and_power_off_non_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(False) def test_migrate_disk_and_power_off_lvm(self): self.flags(images_type='lvm', group='libvirt') def fake_execute(*args, **kwargs): pass self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) def test_migrate_disk_and_power_off_resize_cannot_ssh(self): def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError() def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info): instance = self._create_instance() flavor = {'root_gb': 5, 'ephemeral_gb': 10} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error_default_ephemeral( self, mock_get_disk_info): # Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb. instance = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises(exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.driver.block_device_info_get_ephemerals') def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get, mock_get_disk_info): mappings = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, { 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': 1, 'guest_format': None, 'boot_index': 1, 'volume_size': 6 }, { 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 1, 'device_type': 'disk', 'guest_format': None, 'boot_index': 0, 'volume_size': 4 }, { 'device_name': '/dev/sda3', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 3 } ] mock_get.return_value = mappings instance = self._create_instance() # Old flavor, eph is 20, real disk is 3, target is 2, fail flavor = {'root_gb': 10, 'ephemeral_gb': 2} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = self._disk_info() self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) # Old flavor, eph is 20, real disk is 3, target is 4 flavor = {'root_gb': 10, 'ephemeral_gb': 4} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) def test_wait_for_running(self): def fake_get_info(instance): if instance['name'] == "not_found": raise exception.InstanceNotFound(instance_id=instance['uuid']) elif instance['name'] == "running": return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) self.stubs.Set(self.drvr, 'get_info', fake_get_info) # instance not found case self.assertRaises(exception.InstanceNotFound, self.drvr._wait_for_running, {'name': 'not_found', 'uuid': 'not_found_uuid'}) # instance is running case self.assertRaises(loopingcall.LoopingCallDone, self.drvr._wait_for_running, {'name': 'running', 'uuid': 'running_uuid'}) # else case self.drvr._wait_for_running({'name': 'else', 'uuid': 'other_uuid'}) def test_disk_size_from_instance_disk_info(self): instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30} inst = objects.Instance(**instance_data) info = {'path': '/path/disk'} self.assertEqual(10 * units.Gi, self.drvr._disk_size_from_instance(inst, info)) info = {'path': '/path/disk.local'} self.assertEqual(20 * units.Gi, self.drvr._disk_size_from_instance(inst, info)) info = {'path': '/path/disk.swap'} self.assertEqual(0, self.drvr._disk_size_from_instance(inst, info)) @mock.patch('nova.utils.execute') def test_disk_raw_to_qcow2(self, mock_execute): path = '/test/disk' _path_qcow = path + '_qcow' self.drvr._disk_raw_to_qcow2(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, _path_qcow), mock.call('mv', _path_qcow, path)]) @mock.patch('nova.utils.execute') def test_disk_qcow2_to_raw(self, mock_execute): path = '/test/disk' _path_raw = path + '_raw' self.drvr._disk_qcow2_to_raw(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, _path_raw), mock.call('mv', _path_raw, path)]) @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_raw(self, mock_extend): image = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(image, 50) mock_extend.assert_called_once_with(image, 50) @mock.patch('nova.virt.disk.api.can_resize_image') @mock.patch('nova.virt.disk.api.is_image_extendable') @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_qcow2( self, mock_extend, mock_can_resize, mock_is_image_extendable): with contextlib.nested( mock.patch.object( self.drvr, '_disk_qcow2_to_raw'), mock.patch.object( self.drvr, '_disk_raw_to_qcow2'))\ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2): mock_can_resize.return_value = True mock_is_image_extendable.return_value = True imageqcow2 = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_QCOW2) imageraw = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(imageqcow2, 50) mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path) mock_extend.assert_called_once_with(imageraw, 50) mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path) def _test_finish_migration(self, power_on, resize_instance=False): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_migration. """ powered_on = power_on self.fake_create_domain_called = False self.fake_disk_resize_called = False def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): return "" def fake_plug_vifs(instance, network_info): pass def fake_create_image(context, inst, disk_mapping, suffix='', disk_images=None, network_info=None, block_device_info=None, inject_files=True, fallback_from_host=None): self.assertFalse(inject_files) def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) def fake_enable_hairpin(): pass def fake_execute(*args, **kwargs): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_disk_resize(image, size): self.fake_disk_resize_called = True self.flags(use_cow_images=True) self.stubs.Set(self.drvr, '_disk_resize', fake_disk_resize) self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(self.drvr, '_create_image', fake_create_image) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain_and_network) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, 'get_info', fake_get_info) ins_ref = self._create_instance() migration = objects.Migration() migration.source_compute = 'fake-source-compute' migration.dest_compute = 'fake-dest-compute' migration.source_node = 'fake-source-node' migration.dest_node = 'fake-dest-node' self.drvr.finish_migration( context.get_admin_context(), migration, ins_ref, self._disk_info(), [], self.test_image_meta, resize_instance, None, power_on) self.assertTrue(self.fake_create_domain_called) self.assertEqual( resize_instance, self.fake_disk_resize_called) def test_finish_migration_resize(self): self._test_finish_migration(True, resize_instance=True) def test_finish_migration_power_on(self): self._test_finish_migration(True) def test_finish_migration_power_off(self): self._test_finish_migration(False) def _test_finish_revert_migration(self, power_on): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_revert_migration. """ powered_on = power_on self.fake_create_domain_called = False def fake_execute(*args, **kwargs): pass def fake_plug_vifs(instance, network_info): pass def fake_create_domain(context, xml, instance, network_info, disk_info, block_device_info=None, power_on=None, vifs_already_plugged=None): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) return mock.MagicMock() def fake_enable_hairpin(): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None): return "" self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(self.drvr, 'get_info', fake_get_info) self.stubs.Set(utils, 'get_image_from_system_metadata', lambda *a: self.test_image_meta) with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) ins_ref = self._create_instance() os.mkdir(os.path.join(tmpdir, ins_ref['name'])) libvirt_xml_path = os.path.join(tmpdir, ins_ref['name'], 'libvirt.xml') f = open(libvirt_xml_path, 'w') f.close() self.drvr.finish_revert_migration( context.get_admin_context(), ins_ref, [], None, power_on) self.assertTrue(self.fake_create_domain_called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(True) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(False) def _test_finish_revert_migration_after_crash(self, backup_made=True, del_inst_failed=False): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None context = 'fake_context' instance = self._create_instance() self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(shutil, 'rmtree') self.mox.StubOutWithMock(utils, 'execute') self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None) self.stubs.Set(self.drvr, '_get_guest_xml', lambda *a, **k: None) self.stubs.Set(self.drvr, '_create_domain_and_network', lambda *a, **kw: None) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo') os.path.exists('/fake/foo_resize').AndReturn(backup_made) if backup_made: if del_inst_failed: os_error = OSError(errno.ENOENT, 'No such file or directory') shutil.rmtree('/fake/foo').AndRaise(os_error) else: shutil.rmtree('/fake/foo') utils.execute('mv', '/fake/foo_resize', '/fake/foo') self.mox.ReplayAll() self.drvr.finish_revert_migration(context, instance, []) def test_finish_revert_migration_after_crash(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_new(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_backup(self): self._test_finish_revert_migration_after_crash(backup_made=False) def test_finish_revert_migration_after_crash_delete_failed(self): self._test_finish_revert_migration_after_crash(backup_made=True, del_inst_failed=True) def test_finish_revert_migration_preserves_disk_bus(self): def fake_get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=None): self.assertEqual('ide', disk_info['disk_bus']) image_meta = {"disk_format": "raw", "properties": {"hw_disk_bus": "ide"}} instance = self._create_instance() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with contextlib.nested( mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image_meta), mock.patch.object(drvr, '_get_guest_xml', side_effect=fake_get_guest_xml)): drvr.finish_revert_migration('', instance, None, power_on=False) def test_cleanup_failed_migration(self): self.mox.StubOutWithMock(shutil, 'rmtree') shutil.rmtree('/fake/inst') self.mox.ReplayAll() self.drvr._cleanup_failed_migration('/fake/inst') def test_confirm_migration(self): ins_ref = self._create_instance() self.mox.StubOutWithMock(self.drvr, "_cleanup_resize") self.drvr._cleanup_resize(ins_ref, _fake_network_info(self.stubs, 1)) self.mox.ReplayAll() self.drvr.confirm_migration("migration_ref", ins_ref, _fake_network_info(self.stubs, 1)) def test_cleanup_resize_same_host(self): CONF.set_override('policy_dirs', []) ins_ref = self._create_instance({'host': CONF.host}) def fake_os_path_exists(path): return True self.stubs.Set(os.path, 'exists', fake_os_path_exists) self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(utils, 'execute') libvirt_utils.get_instance_path(ins_ref, forceold=True).AndReturn('/fake/inst') utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) self.mox.ReplayAll() self.drvr._cleanup_resize(ins_ref, _fake_network_info(self.stubs, 1)) def test_cleanup_resize_not_same_host(self): CONF.set_override('policy_dirs', []) host = 'not' + CONF.host ins_ref = self._create_instance({'host': host}) def fake_os_path_exists(path): return True def fake_undefine_domain(instance): pass def fake_unplug_vifs(instance, network_info, ignore_errors=False): pass def fake_unfilter_instance(instance, network_info): pass self.stubs.Set(os.path, 'exists', fake_os_path_exists) self.stubs.Set(self.drvr, '_undefine_domain', fake_undefine_domain) self.stubs.Set(self.drvr, 'unplug_vifs', fake_unplug_vifs) self.stubs.Set(self.drvr.firewall_driver, 'unfilter_instance', fake_unfilter_instance) self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path') self.mox.StubOutWithMock(utils, 'execute') libvirt_utils.get_instance_path(ins_ref, forceold=True).AndReturn('/fake/inst') utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) self.mox.ReplayAll() self.drvr._cleanup_resize(ins_ref, _fake_network_info(self.stubs, 1)) def test_get_instance_disk_info_exception(self): instance = self._create_instance() class FakeExceptionDomain(FakeVirtDomain): def __init__(self): super(FakeExceptionDomain, self).__init__() def XMLDesc(self, flags): raise fakelibvirt.libvirtError("Libvirt error") def fake_get_domain(self, instance): return FakeExceptionDomain() self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.assertRaises(exception.InstanceNotFound, self.drvr.get_instance_disk_info, instance) @mock.patch('os.path.exists') @mock.patch.object(lvm, 'list_volumes') def test_lvm_disks(self, listlvs, exists): instance = objects.Instance(uuid='fake-uuid', id=1) self.flags(images_volume_group='vols', group='libvirt') exists.return_value = True listlvs.return_value = ['fake-uuid_foo', 'other-uuid_foo'] disks = self.drvr._lvm_disks(instance) self.assertEqual(['/dev/vols/fake-uuid_foo'], disks) def test_is_booted_from_volume(self): func = libvirt_driver.LibvirtDriver._is_booted_from_volume instance, disk_mapping = {}, {} self.assertTrue(func(instance, disk_mapping)) disk_mapping['disk'] = 'map' self.assertTrue(func(instance, disk_mapping)) instance['image_ref'] = 'uuid' self.assertFalse(func(instance, disk_mapping)) @mock.patch('nova.virt.netutils.get_injected_network_template') @mock.patch('nova.virt.disk.api.inject_data') @mock.patch.object(libvirt_driver.LibvirtDriver, "_conn") def _test_inject_data(self, driver_params, path, disk_params, mock_conn, disk_inject_data, inj_network, called=True): class ImageBackend(object): path = '/path' def check_image_exists(self): if self.path == '/fail/path': return False return True def get_model(self, connection): return imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_RAW) def fake_inj_network(*args, **kwds): return args[0] or None inj_network.side_effect = fake_inj_network image_backend = ImageBackend() image_backend.path = path with mock.patch.object( self.drvr.image_backend, 'image', return_value=image_backend): self.flags(inject_partition=0, group='libvirt') self.drvr._inject_data(**driver_params) if called: disk_inject_data.assert_called_once_with( mock.ANY, *disk_params, partition=None, mandatory=('files',)) self.assertEqual(disk_inject_data.called, called) def _test_inject_data_default_driver_params(self, **params): return { 'instance': self._create_instance(params=params), 'network_info': None, 'admin_pass': None, 'files': None, 'suffix': '' } def test_inject_data_adminpass(self): self.flags(inject_password=True, group='libvirt') driver_params = self._test_inject_data_default_driver_params() driver_params['admin_pass'] = 'foobar' disk_params = [ None, # key None, # net {}, # metadata 'foobar', # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_password=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_key(self): driver_params = self._test_inject_data_default_driver_params() driver_params['instance']['key_data'] = 'key-content' self.flags(inject_key=True, group='libvirt') disk_params = [ 'key-content', # key None, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_key=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_metadata(self): instance_metadata = {'metadata': {'data': 'foo'}} driver_params = self._test_inject_data_default_driver_params( **instance_metadata ) disk_params = [ None, # key None, # net {'data': 'foo'}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_files(self): driver_params = self._test_inject_data_default_driver_params() driver_params['files'] = ['file1', 'file2'] disk_params = [ None, # key None, # net {}, # metadata None, # admin_pass ['file1', 'file2'], # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_net(self): driver_params = self._test_inject_data_default_driver_params() driver_params['network_info'] = {'net': 'eno1'} disk_params = [ None, # key {'net': 'eno1'}, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_not_exist_image(self): driver_params = self._test_inject_data_default_driver_params() disk_params = [ 'key-content', # key None, # net None, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/fail/path", disk_params, called=False) def _test_attach_detach_interface(self, method, power_state, expected_flags): instance = self._create_instance() network_info = _fake_network_info(self.stubs, 1) domain = FakeVirtDomain() self.mox.StubOutWithMock(host.Host, 'get_domain') self.mox.StubOutWithMock(self.drvr.firewall_driver, 'setup_basic_filtering') self.mox.StubOutWithMock(domain, 'attachDeviceFlags') self.mox.StubOutWithMock(domain, 'info') host.Host.get_domain(instance).AndReturn(domain) if method == 'attach_interface': self.drvr.firewall_driver.setup_basic_filtering( instance, [network_info[0]]) fake_image_meta = {'id': instance.image_ref} fake_image_meta_obj = objects.ImageMeta.from_dict( fake_image_meta) expected = self.drvr.vif_driver.get_config( instance, network_info[0], fake_image_meta_obj, instance.flavor, CONF.libvirt.virt_type, self.drvr._host) self.mox.StubOutWithMock(self.drvr.vif_driver, 'get_config') self.drvr.vif_driver.get_config( instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, self.drvr._host).AndReturn(expected) domain.info().AndReturn([power_state, 1, 2, 3, 4]) if method == 'attach_interface': domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags) elif method == 'detach_interface': domain.detachDeviceFlags(expected.to_xml(), expected_flags) self.mox.ReplayAll() if method == 'attach_interface': self.drvr.attach_interface( instance, fake_image_meta, network_info[0]) elif method == 'detach_interface': self.drvr.detach_interface( instance, network_info[0]) self.mox.VerifyAll() def test_attach_interface_with_running_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) def test_detach_interface_with_running_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) def test_rescue(self): instance = self._create_instance({'config_drive': None}) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") network_info = _fake_network_info(self.stubs, 1) self.mox.StubOutWithMock(self.drvr, '_get_existing_domain_xml') self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(imagebackend.Image, 'cache') self.mox.StubOutWithMock(self.drvr, '_get_guest_xml') self.mox.StubOutWithMock(self.drvr, '_destroy') self.mox.StubOutWithMock(self.drvr, '_create_domain') self.drvr._get_existing_domain_xml(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.rescue', 'default' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()).MultipleTimes() imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), size=None, user_id=mox.IgnoreArg()) image_meta = {'id': 'fake', 'name': 'fake'} self.drvr._get_guest_xml(mox.IgnoreArg(), instance, network_info, mox.IgnoreArg(), mox.IsA(objects.ImageMeta), rescue=mox.IgnoreArg(), write_to_disk=mox.IgnoreArg() ).AndReturn(dummyxml) self.drvr._destroy(instance) self.drvr._create_domain(mox.IgnoreArg()) self.mox.ReplayAll() rescue_password = 'fake_password' self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.mox.VerifyAll() @mock.patch.object(libvirt_utils, 'get_instance_path') @mock.patch.object(libvirt_utils, 'load_file') @mock.patch.object(host.Host, "get_domain") def test_unrescue(self, mock_get_domain, mock_load_file, mock_get_instance_path): dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='block' device='disk'>" "<source dev='/dev/some-vg/some-lv'/>" "<target dev='vda' bus='virtio'/></disk>" "</devices></domain>") mock_get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake=uuid', id=1) fake_dom = FakeVirtDomain(fake_xml=dummyxml) mock_get_domain.return_value = fake_dom mock_load_file.return_value = "fake_unrescue_xml" unrescue_xml_path = os.path.join('/path', 'unrescue.xml') rescue_file = os.path.join('/path', 'rescue.file') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with contextlib.nested( mock.patch.object(drvr, '_destroy'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(libvirt_utils, 'file_delete'), mock.patch.object(drvr, '_lvm_disks', return_value=['lvm.rescue']), mock.patch.object(lvm, 'remove_volumes'), mock.patch.object(glob, 'iglob', return_value=[rescue_file]) ) as (mock_destroy, mock_create, mock_del, mock_lvm_disks, mock_remove_volumes, mock_glob): drvr.unrescue(instance, None) mock_destroy.assert_called_once_with(instance) mock_create.assert_called_once_with("fake_unrescue_xml", fake_dom) self.assertEqual(2, mock_del.call_count) self.assertEqual(unrescue_xml_path, mock_del.call_args_list[0][0][0]) self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0]) mock_remove_volumes.assert_called_once_with(['lvm.rescue']) @mock.patch( 'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata') @mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive') def test_rescue_config_drive(self, mock_make, mock_add): instance = self._create_instance() uuid = instance.uuid configdrive_path = uuid + '/disk.config.rescue' dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") network_info = _fake_network_info(self.stubs, 1) self.mox.StubOutWithMock(self.drvr, '_get_existing_domain_xml') self.mox.StubOutWithMock(libvirt_utils, 'write_to_file') self.mox.StubOutWithMock(imagebackend.Backend, 'image') self.mox.StubOutWithMock(imagebackend.Image, 'cache') self.mox.StubOutWithMock(instance_metadata.InstanceMetadata, '__init__') self.mox.StubOutWithMock(self.drvr, '_get_guest_xml') self.mox.StubOutWithMock(self.drvr, '_destroy') self.mox.StubOutWithMock(self.drvr, '_create_domain') self.drvr._get_existing_domain_xml(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) imagebackend.Backend.image(instance, 'kernel.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.rescue', 'default' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw' ).AndReturn(fake_imagebackend.Raw()) imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), user_id=mox.IgnoreArg()).MultipleTimes() imagebackend.Image.cache(context=mox.IgnoreArg(), fetch_func=mox.IgnoreArg(), filename=mox.IgnoreArg(), image_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(), size=None, user_id=mox.IgnoreArg()) instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(), content=mox.IgnoreArg(), extra_md=mox.IgnoreArg(), network_info=mox.IgnoreArg()) image_meta = {'id': 'fake', 'name': 'fake'} self.drvr._get_guest_xml(mox.IgnoreArg(), instance, network_info, mox.IgnoreArg(), mox.IsA(objects.ImageMeta), rescue=mox.IgnoreArg(), write_to_disk=mox.IgnoreArg() ).AndReturn(dummyxml) self.drvr._destroy(instance) self.drvr._create_domain(mox.IgnoreArg()) self.mox.ReplayAll() rescue_password = 'fake_password' self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.mox.VerifyAll() mock_add.assert_any_call(mock.ANY) expected_call = [mock.call(os.path.join(CONF.instances_path, configdrive_path))] mock_make.assert_has_calls(expected_call) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('os.kill') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_kill_running( self, get_instance_path, kill, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) self.drvr.job_tracker.jobs[instance.uuid] = [3, 4] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0), mock.call(4, signal.SIGKILL), mock.call(4, 0)]) shutil.assert_called_with('/path_del') self.assertTrue(result) self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resize(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = [Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) exists.side_effect = [False, False, True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_mv_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resume(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_none(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, False, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertEqual(0, len(shutil.mock_calls)) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_concurrent(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid='fake-uuid', id=1) nova.utils.execute.side_effect = [Exception(), Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] expected.append(expected[0]) self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) def _assert_on_id_map(self, idmap, klass, start, target, count): self.assertIsInstance(idmap, klass) self.assertEqual(start, idmap.start) self.assertEqual(target, idmap.target) self.assertEqual(count, idmap.count) def test_get_id_maps(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.virt_type = "lxc" CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(len(idmaps), 4) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) self._assert_on_id_map(idmaps[2], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[3], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_get_id_maps_not_lxc(self): CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(0, len(idmaps)) def test_get_id_maps_only_uid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = [] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) def test_get_id_maps_only_gid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = [] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_instance_on_disk(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid='fake-uuid', id=1) self.assertFalse(drvr.instance_on_disk(instance)) def test_instance_on_disk_rbd(self): self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid='fake-uuid', id=1) self.assertTrue(drvr.instance_on_disk(instance)) def test_get_interfaces(self): dom_xml = """ <domain type="qemu"> <devices> <interface type="ethernet"> <mac address="fe:eb:da:ed:ef:ac"/> <model type="virtio"/> <target dev="eth0"/> </interface> <interface type="bridge"> <mac address="ca:fe:de:ad:be:ef"/> <model type="virtio"/> <target dev="br0"/> </interface> </devices> </domain>""" list_interfaces = ['eth0', 'br0'] drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(list_interfaces, drv._get_interfaces(dom_xml)) def test_get_disk_xml(self): dom_xml = """ <domain type="kvm"> <devices> <disk type="file"> <source file="disk1_file"/> <target dev="vda" bus="virtio"/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type="block"> <source dev="/path/to/dev/1"/> <target dev="vdb" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ diska_xml = """<disk type="file" device="disk"> <source file="disk1_file"/> <target bus="virtio" dev="vda"/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk>""" diskb_xml = """<disk type="block" device="disk"> <source dev="/path/to/dev/1"/> <target bus="virtio" dev="vdb"/> </disk>""" dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) # NOTE(gcb): etree.tostring(node) returns an extra line with # some white spaces, need to strip it. actual_diska_xml = guest.get_disk('vda').to_xml() self.assertEqual(diska_xml.strip(), actual_diska_xml.strip()) actual_diskb_xml = guest.get_disk('vdb').to_xml() self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip()) self.assertIsNone(guest.get_disk('vdc')) def test_vcpu_model_from_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) vcpu_model = drv._cpu_config_to_vcpu_model(None, None) self.assertIsNone(vcpu_model) cpu = vconfig.LibvirtConfigGuestCPU() feature1 = vconfig.LibvirtConfigGuestCPUFeature() feature2 = vconfig.LibvirtConfigGuestCPUFeature() feature1.name = 'sse' feature1.policy = cpumodel.POLICY_REQUIRE feature2.name = 'aes' feature2.policy = cpumodel.POLICY_REQUIRE cpu.features = set([feature1, feature2]) cpu.mode = cpumodel.MODE_CUSTOM cpu.sockets = 1 cpu.cores = 2 cpu.threads = 4 vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None) self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match) self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode) self.assertEqual(4, vcpu_model.topology.threads) self.assertEqual(set(['sse', 'aes']), set([f.name for f in vcpu_model.features])) cpu.mode = cpumodel.MODE_HOST_MODEL vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode) self.assertEqual(vcpu_model, vcpu_model_1) @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') @mock.patch.object(objects.Instance, 'save') def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain, mock_unfilter, mock_delete_volume, mock_get_guest, mock_get_size): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance(uuid='fake-uuid', id=1, ephemeral_key_uuid='000-000-000') instance.system_metadata = {} block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} self.flags(images_type="lvm", group='libvirt') dom_xml = """ <domain type="kvm"> <devices> <disk type="block"> <driver name='qemu' type='raw' cache='none'/> <source dev="/dev/mapper/fake-dmcrypt"/> <target dev="vda" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False, block_device_info=block_device_info) mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt') @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size, encrypted=False): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance(uuid='fake-uuid', id=1, ephemeral_key_uuid='000-000-000') block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} dev_name = 'fake-dmcrypt' if encrypted else 'fake' dom_xml = """ <domain type="kvm"> <devices> <disk type="block"> <driver name='qemu' type='raw' cache='none'/> <source dev="/dev/mapper/%s"/> <target dev="vda" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ % dev_name dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv._cleanup_lvm(instance, block_device_info) if encrypted: mock_delete_volume.assert_called_once_with( '/dev/mapper/fake-dmcrypt') else: self.assertFalse(mock_delete_volume.called) def test_cleanup_lvm(self): self._test_cleanup_lvm() def test_cleanup_encrypted_lvm(self): self._test_cleanup_lvm(encrypted=True) def test_vcpu_model_to_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE, name='sse') feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID, name='aes') topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4) vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL, features=[feature, feature_1], topology=topo) cpu = drv._vcpu_model_to_cpu_config(vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode) self.assertEqual(1, cpu.sockets) self.assertEqual(4, cpu.threads) self.assertEqual(2, len(cpu.features)) self.assertEqual(set(['sse', 'aes']), set([f.name for f in cpu.features])) self.assertEqual(set([cpumodel.POLICY_REQUIRE, cpumodel.POLICY_FORBID]), set([f.policy for f in cpu.features])) class LibvirtVolumeUsageTestCase(test.NoDBTestCase): """Test for LibvirtDriver.get_all_volume_usage.""" def setUp(self): super(LibvirtVolumeUsageTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.ins_ref = objects.Instance( id=1729, uuid='875a8070-d0b9-4949-8b31-104d125c9a64' ) # verify bootable volume device path also self.bdms = [{'volume_id': 1, 'device_name': '/dev/vde'}, {'volume_id': 2, 'device_name': 'vda'}] def test_get_all_volume_usage(self): def fake_block_stats(instance_name, disk): return (169, 688640, 0, 0, -1) self.stubs.Set(self.drvr, 'block_stats', fake_block_stats) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) expected_usage = [{'volume': 1, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}, {'volume': 2, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}] self.assertEqual(vol_usage, expected_usage) def test_get_all_volume_usage_device_not_found(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id="fakedom") self.stubs.Set(host.Host, 'get_domain', fake_get_domain) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) self.assertEqual(vol_usage, []) class LibvirtNonblockingTestCase(test.NoDBTestCase): """Test libvirtd calls are nonblocking.""" def setUp(self): super(LibvirtNonblockingTestCase, self).setUp() self.flags(connection_uri="test:///default", group='libvirt') def test_connection_to_primitive(self): # Test bug 962840. import nova.virt.libvirt.driver as libvirt_driver drvr = libvirt_driver.LibvirtDriver('') drvr.set_host_enabled = mock.Mock() jsonutils.to_primitive(drvr._conn, convert_instances=True) def test_tpool_execute_calls_libvirt(self): conn = fakelibvirt.virConnect() conn.is_expected = True self.mox.StubOutWithMock(eventlet.tpool, 'execute') eventlet.tpool.execute( fakelibvirt.openAuth, 'test:///default', mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn) eventlet.tpool.execute( conn.domainEventRegisterAny, None, fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, mox.IgnoreArg(), mox.IgnoreArg()) if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'): eventlet.tpool.execute( conn.registerCloseCallback, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) c = driver._get_connection() self.assertEqual(True, c.is_expected) class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase): """Tests for libvirtDriver.volume_snapshot_create/delete.""" def setUp(self): super(LibvirtVolumeSnapshotTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.flags(instance_name_template='instance-%s') self.flags(qemu_allowed_storage_drivers=[], group='libvirt') # creating instance self.inst = {} self.inst['uuid'] = uuidutils.generate_uuid() self.inst['id'] = '1' # create domain info self.dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio' serial='1234'/> </disk> </devices> </domain>""" # alternate domain info with network-backed snapshot chain self.dom_netdisk_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial> </disk> <disk type='network' device='disk'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/root.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='1'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='2'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap-b.img'> <host name='server1' port='24007'/> </source> <backingStore/> </backingStore> </backingStore> <target dev='vdb' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> </devices> </domain> """ # XML with netdisk attached, and 1 snapshot taken self.dom_netdisk_xml_2 = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial> </disk> <disk type='network' device='disk'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='1'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/root.img'> <host name='server1' port='24007'/> </source> <backingStore/> </backingStore> <target dev='vdb' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> </devices> </domain> """ self.create_info = {'type': 'qcow2', 'snapshot_id': '1234-5678', 'new_file': 'new-file'} self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d' self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162' self.delete_info_1 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': None} self.delete_info_2 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'other-snap.img'} self.delete_info_3 = {'type': 'qcow2', 'file_to_merge': None, 'merge_target_file': None} self.delete_info_netdisk = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'root.img'} self.delete_info_invalid_type = {'type': 'made_up_type', 'file_to_merge': 'some_file', 'merge_target_file': 'some_other_file'} def tearDown(self): super(LibvirtVolumeSnapshotTestCase, self).tearDown() @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.' 'refresh_connection_info') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_id') def test_volume_refresh_connection_info(self, mock_get_by_volume_id, mock_refresh_connection_info): fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': '{"fake": "connection_info"}'}) mock_get_by_volume_id.return_value = fake_bdm self.drvr._volume_refresh_connection_info(self.c, self.inst, self.volume_uuid) mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid) mock_refresh_connection_info.assert_called_once_with(self.c, self.inst, self.drvr._volume_api, self.drvr) def test_volume_snapshot_create(self, quiesce=True): """Test snapshot creation with file-based disk.""" self.flags(instance_name_template='instance-%s') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '<domainsnapshot>\n' ' <disks>\n' ' <disk name="disk1_file" snapshot="external" type="file">\n' ' <source file="new-file"/>\n' ' </disk>\n' ' <disk name="vdb" snapshot="no"/>\n' ' </disks>\n' '</domainsnapshot>\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0) self.mox.ReplayAll() self.drvr._volume_snapshot_create(self.c, instance, domain, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_libgfapi(self, quiesce=True): """Test snapshot creation with libgfapi network disk.""" self.flags(instance_name_template = 'instance-%s') self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type='block'> <source protocol='gluster' name='gluster1/volume-1234'> <host name='127.3.4.5' port='24007'/> </source> <target dev='vdb' bus='virtio' serial='1234'/> </disk> </devices> </domain>""" instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '<domainsnapshot>\n' ' <disks>\n' ' <disk name="disk1_file" snapshot="external" type="file">\n' ' <source file="new-file"/>\n' ' </disk>\n' ' <disk name="vdb" snapshot="no"/>\n' ' </disks>\n' '</domainsnapshot>\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0) self.mox.ReplayAll() self.drvr._volume_snapshot_create(self.c, instance, domain, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_noquiesce(self): self.test_volume_snapshot_create(quiesce=False) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertIsNone(self.drvr._can_quiesce(instance, image_meta)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_bad_hyp(self, ver): self.flags(virt_type='xxx', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_can_quiesce_bad_ver(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = {"properties": { "hw_qemu_guest_agent": "yes"}} self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_agent_not_enable(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict({}) self.assertRaises(exception.QemuGuestAgentNotEnabled, self.drvr._can_quiesce, instance, image_meta) def test_volume_snapshot_create_outer_success(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._volume_snapshot_create(self.c, instance, domain, self.volume_uuid, self.create_info['new_file']) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'creating') self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot') self.drvr._volume_api.get_snapshot(self.c, self.create_info['snapshot_id']).AndReturn({'status': 'available'}) self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_create_outer_failure(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._volume_snapshot_create(self.c, instance, domain, self.volume_uuid, self.create_info['new_file']).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'error') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_create, self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_delete_1(self): """Deleting newest snapshot -- blockRebase.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vda', 'snap.img', 0, flags=0) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_relative_1(self): """Deleting newest snapshot -- blockRebase using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vda', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_2(self): """Deleting older snapshot -- blockCommit.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_relative_2(self): """Deleting older snapshot -- blockCommit using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) self.mox.VerifyAll() def test_volume_snapshot_delete_nonrelative_null_base(self): # Deleting newest and last snapshot of a volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) with contextlib.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(self.drvr._host, 'has_min_version', return_value=True), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_has_min_version, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_has_min_version.assert_called_once_with((1, 1, 1,)) mock_rebase.assert_called_once_with('vda', None, 0, flags=0) mock_job_info.assert_called_once_with('vda', flags=0) def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self): # Deleting newest and last snapshot of a network attached volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2) guest = libvirt_guest.Guest(domain) with contextlib.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_netdisk_xml_2), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(self.drvr._host, 'has_min_version', return_value=True), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_has_min_version, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_has_min_version.assert_called_once_with((1, 1, 1,)) mock_rebase.assert_called_once_with('vdb', None, 0, flags=0) mock_job_info.assert_called_once_with('vdb', flags=0) def test_volume_snapshot_delete_outer_success(self): instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'deleting') self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_outer_failure(self): instance = objects.Instance(**self.inst) snapshot_id = '1234-9876' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_invalid_type(self): instance = objects.Instance(**self.inst) FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.drvr._volume_api.update_snapshot_status( self.c, self.snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, self.snapshot_id, self.delete_info_invalid_type) def test_volume_snapshot_delete_netdisk_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vdb', 'vdb[1]', 0, flags=0) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_netdisk_relative_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockRebase('vdb', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_netdisk_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_netdisk_relative_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr._host, 'has_min_version') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True) domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn( {'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) self.mox.VerifyAll() def _fake_convert_image(source, dest, out_format, run_as_root=True): libvirt_driver.libvirt_utils.files[dest] = '' class _BaseSnapshotTests(test.NoDBTestCase): def setUp(self): super(_BaseSnapshotTests, self).setUp() self.flags(snapshots_directory='./', group='libvirt') self.context = context.get_admin_context() self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self.stubs) self.mock_update_task_state = mock.Mock() test_instance = _create_test_instance() self.instance_ref = objects.Instance(**test_instance) self.instance_ref.info_cache = objects.InstanceInfoCache( network_info=None) def _assert_snapshot(self, snapshot, disk_format, expected_properties=None): self.mock_update_task_state.assert_has_calls([ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), mock.call(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)]) props = snapshot['properties'] self.assertEqual(props['image_state'], 'available') self.assertEqual(snapshot['status'], 'active') self.assertEqual(snapshot['disk_format'], disk_format) self.assertEqual(snapshot['name'], 'test-snap') if expected_properties: for expected_key, expected_value in \ six.iteritems(expected_properties): self.assertEqual(expected_value, props[expected_key]) def _create_image(self, extra_properties=None): properties = {'instance_id': self.instance_ref['id'], 'user_id': str(self.context.user_id)} if extra_properties: properties.update(extra_properties) sent_meta = {'name': 'test-snap', 'is_public': False, 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = self.image_service.create(self.context, sent_meta) return recv_meta @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'get_domain') def _snapshot(self, image_id, mock_get_domain, mock_resolve): mock_get_domain.return_value = FakeVirtDomain() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) driver.snapshot(self.context, self.instance_ref, image_id, self.mock_update_task_state) snapshot = self.image_service.show(self.context, image_id) return snapshot def _test_snapshot(self, disk_format, extra_properties=None): recv_meta = self._create_image(extra_properties=extra_properties) snapshot = self._snapshot(recv_meta['id']) self._assert_snapshot(snapshot, disk_format=disk_format, expected_properties=extra_properties) class LibvirtSnapshotTests(_BaseSnapshotTests): def test_ami(self): # Assign different image_ref from nova/images/fakes for testing ami self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' self.instance_ref.system_metadata = \ utils.get_system_metadata_from_image( {'disk_format': 'ami'}) self._test_snapshot(disk_format='ami') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_raw(self, mock_convert_image): self._test_snapshot(disk_format='raw') def test_qcow2(self): self._test_snapshot(disk_format='qcow2') def test_no_image_architecture(self): self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' self._test_snapshot(disk_format='qcow2') def test_no_original_image(self): self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa' self._test_snapshot(disk_format='qcow2') def test_snapshot_metadata_image(self): # Assign an image with an architecture defined (x86_64) self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379' extra_properties = {'architecture': 'fake_arch', 'key_a': 'value_a', 'key_b': 'value_b', 'os_type': 'linux'} self._test_snapshot(disk_format='qcow2', extra_properties=extra_properties) class LXCSnapshotTests(LibvirtSnapshotTests): """Repeat all of the Libvirt snapshot tests, but with LXC enabled""" def setUp(self): super(LXCSnapshotTests, self).setUp() self.flags(virt_type='lxc', group='libvirt') class LVMSnapshotTests(_BaseSnapshotTests): @mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info') def _test_lvm_snapshot(self, disk_format, mock_volume_info, mock_convert_image): self.flags(images_type='lvm', images_volume_group='nova-vg', group='libvirt') self._test_snapshot(disk_format=disk_format) mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')]) mock_convert_image.assert_called_once_with( '/dev/nova-vg/lv', mock.ANY, disk_format, run_as_root=True) def test_raw(self): self._test_lvm_snapshot('raw') def test_qcow2(self): self.flags(snapshot_image_format='qcow2', group='libvirt') self._test_lvm_snapshot('qcow2')
felixma/nova
nova/tests/unit/virt/libvirt/test_driver.py
Python
apache-2.0
670,986
#coding=utf-8 import sys import optparse if len(sys.argv )!= 3: sys.stderr.write("usage: python %s inputfile outputfile\n" % sys.argv[0]) #raise SystemExit(1) p = optparse.OptionParser() p.add_option("-o", action="store",dest="outfile") p.add_option("--output", action="store", dest="outfile") p.set_defaults(debug=False) #解析命令行 opts, args = p.parse_args() outfile=opts.outfile
wufengwhu/my_blog
exercise/io/std_in_out.py
Python
apache-2.0
399
import io import json import zlib from unittest import mock import pytest import aiohttp from aiohttp import payload from aiohttp.hdrs import (CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_TRANSFER_ENCODING, CONTENT_TYPE) from aiohttp.helpers import parse_mimetype from aiohttp.multipart import MultipartResponseWrapper from aiohttp.streams import DEFAULT_LIMIT as stream_reader_default_limit from aiohttp.streams import StreamReader from aiohttp.test_utils import make_mocked_coro BOUNDARY = b'--:' @pytest.fixture def buf(): return bytearray() @pytest.fixture def stream(buf): writer = mock.Mock() async def write(chunk): buf.extend(chunk) writer.write.side_effect = write return writer @pytest.fixture def writer(): return aiohttp.MultipartWriter(boundary=':') class Response: def __init__(self, headers, content): self.headers = headers self.content = content class Stream: def __init__(self, content): self.content = io.BytesIO(content) async def read(self, size=None): return self.content.read(size) def at_eof(self): return self.content.tell() == len(self.content.getbuffer()) async def readline(self): return self.content.readline() def unread_data(self, data): self.content = io.BytesIO(data + self.content.read()) class StreamWithShortenRead(Stream): def __init__(self, content): self._first = True super().__init__(content) async def read(self, size=None): if size is not None and self._first: self._first = False size = size // 2 return await super().read(size) class TestMultipartResponseWrapper: def test_at_eof(self): wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock()) wrapper.at_eof() assert wrapper.resp.content.at_eof.called async def test_next(self): wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock()) wrapper.stream.next = make_mocked_coro(b'') wrapper.stream.at_eof.return_value = False await wrapper.next() assert wrapper.stream.next.called async def test_release(self): wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock()) wrapper.resp.release = make_mocked_coro(None) await wrapper.release() assert wrapper.resp.release.called async def test_release_when_stream_at_eof(self): wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock()) wrapper.resp.release = make_mocked_coro(None) wrapper.stream.next = make_mocked_coro(b'') wrapper.stream.at_eof.return_value = True await wrapper.next() assert wrapper.stream.next.called assert wrapper.resp.release.called class TestPartReader: async def test_next(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello, world!\r\n--:')) result = await obj.next() assert b'Hello, world!' == result assert obj.at_eof() async def test_next_next(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello, world!\r\n--:')) result = await obj.next() assert b'Hello, world!' == result assert obj.at_eof() result = await obj.next() assert result is None async def test_read(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello, world!\r\n--:')) result = await obj.read() assert b'Hello, world!' == result assert obj.at_eof() async def test_read_chunk_at_eof(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'--:')) obj._at_eof = True result = await obj.read_chunk() assert b'' == result async def test_read_chunk_without_content_length(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello, world!\r\n--:')) c1 = await obj.read_chunk(8) c2 = await obj.read_chunk(8) c3 = await obj.read_chunk(8) assert c1 + c2 == b'Hello, world!' assert c3 == b'' async def test_read_incomplete_chunk(self, loop): stream = Stream(b'') def prepare(data): f = loop.create_future() f.set_result(data) return f with mock.patch.object(stream, 'read', side_effect=[ prepare(b'Hello, '), prepare(b'World'), prepare(b'!\r\n--:'), prepare(b'') ]): obj = aiohttp.BodyPartReader( BOUNDARY, {}, stream) c1 = await obj.read_chunk(8) assert c1 == b'Hello, ' c2 = await obj.read_chunk(8) assert c2 == b'World' c3 = await obj.read_chunk(8) assert c3 == b'!' async def test_read_all_at_once(self): stream = Stream(b'Hello, World!\r\n--:--\r\n') obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream) result = await obj.read_chunk() assert b'Hello, World!' == result result = await obj.read_chunk() assert b'' == result assert obj.at_eof() async def test_read_incomplete_body_chunked(self): stream = Stream(b'Hello, World!\r\n-') obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream) result = b'' with pytest.raises(AssertionError): for _ in range(4): result += await obj.read_chunk(7) assert b'Hello, World!\r\n-' == result async def test_read_boundary_with_incomplete_chunk(self, loop): stream = Stream(b'') def prepare(data): f = loop.create_future() f.set_result(data) return f with mock.patch.object(stream, 'read', side_effect=[ prepare(b'Hello, World'), prepare(b'!\r\n'), prepare(b'--:'), prepare(b'') ]): obj = aiohttp.BodyPartReader( BOUNDARY, {}, stream) c1 = await obj.read_chunk(12) assert c1 == b'Hello, World' c2 = await obj.read_chunk(8) assert c2 == b'!' c3 = await obj.read_chunk(8) assert c3 == b'' async def test_multi_read_chunk(self): stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--') obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream) result = await obj.read_chunk(8) assert b'Hello,' == result result = await obj.read_chunk(8) assert b'' == result assert obj.at_eof() async def test_read_chunk_properly_counts_read_bytes(self): expected = b'.' * 10 size = len(expected) obj = aiohttp.BodyPartReader( BOUNDARY, {'CONTENT-LENGTH': size}, StreamWithShortenRead(expected + b'\r\n--:--')) result = bytearray() while True: chunk = await obj.read_chunk() if not chunk: break result.extend(chunk) assert size == len(result) assert b'.' * size == result assert obj.at_eof() async def test_read_does_not_read_boundary(self): stream = Stream(b'Hello, world!\r\n--:') obj = aiohttp.BodyPartReader( BOUNDARY, {}, stream) result = await obj.read() assert b'Hello, world!' == result assert b'--:' == (await stream.read()) async def test_multiread(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--')) result = await obj.read() assert b'Hello,' == result result = await obj.read() assert b'' == result assert obj.at_eof() async def test_read_multiline(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello\n,\r\nworld!\r\n--:--')) result = await obj.read() assert b'Hello\n,\r\nworld!' == result result = await obj.read() assert b'' == result assert obj.at_eof() async def test_read_respects_content_length(self): obj = aiohttp.BodyPartReader( BOUNDARY, {'CONTENT-LENGTH': 100500}, Stream(b'.' * 100500 + b'\r\n--:--')) result = await obj.read() assert b'.' * 100500 == result assert obj.at_eof() async def test_read_with_content_encoding_gzip(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_ENCODING: 'gzip'}, Stream(b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU' b'(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00' b'\r\n--:--')) result = await obj.read(decode=True) assert b'Time to Relax!' == result async def test_read_with_content_encoding_deflate(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_ENCODING: 'deflate'}, Stream(b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--')) result = await obj.read(decode=True) assert b'Time to Relax!' == result async def test_read_with_content_encoding_identity(self): thing = (b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU' b'(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00' b'\r\n') obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_ENCODING: 'identity'}, Stream(thing + b'--:--')) result = await obj.read(decode=True) assert thing[:-2] == result async def test_read_with_content_encoding_unknown(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_ENCODING: 'snappy'}, Stream(b'\x0e4Time to Relax!\r\n--:--')) with pytest.raises(RuntimeError): await obj.read(decode=True) async def test_read_with_content_transfer_encoding_base64(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TRANSFER_ENCODING: 'base64'}, Stream(b'VGltZSB0byBSZWxheCE=\r\n--:--')) result = await obj.read(decode=True) assert b'Time to Relax!' == result async def test_read_with_content_transfer_encoding_quoted_printable(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TRANSFER_ENCODING: 'quoted-printable'}, Stream(b'=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,' b' =D0=BC=D0=B8=D1=80!\r\n--:--')) result = await obj.read(decode=True) expected = (b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,' b' \xd0\xbc\xd0\xb8\xd1\x80!') assert result == expected @pytest.mark.parametrize('encoding', ('binary', '8bit', '7bit')) async def test_read_with_content_transfer_encoding_binary(self, encoding): data = b'\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,' \ b' \xd0\xbc\xd0\xb8\xd1\x80!' obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TRANSFER_ENCODING: encoding}, Stream(data + b'\r\n--:--')) result = await obj.read(decode=True) assert data == result async def test_read_with_content_transfer_encoding_unknown(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TRANSFER_ENCODING: 'unknown'}, Stream(b'\x0e4Time to Relax!\r\n--:--')) with pytest.raises(RuntimeError): await obj.read(decode=True) async def test_read_text(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello, world!\r\n--:--')) result = await obj.text() assert 'Hello, world!' == result async def test_read_text_default_encoding(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream('Привет, Мир!\r\n--:--'.encode('utf-8'))) result = await obj.text() assert 'Привет, Мир!' == result async def test_read_text_encoding(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream('Привет, Мир!\r\n--:--'.encode('cp1251'))) result = await obj.text(encoding='cp1251') assert 'Привет, Мир!' == result async def test_read_text_guess_encoding(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'text/plain;charset=cp1251'}, Stream('Привет, Мир!\r\n--:--'.encode('cp1251'))) result = await obj.text() assert 'Привет, Мир!' == result async def test_read_text_compressed(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_ENCODING: 'deflate', CONTENT_TYPE: 'text/plain'}, Stream(b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--')) result = await obj.text() assert 'Time to Relax!' == result async def test_read_text_while_closed(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'text/plain'}, Stream(b'')) obj._at_eof = True result = await obj.text() assert '' == result async def test_read_json(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/json'}, Stream(b'{"test": "passed"}\r\n--:--')) result = await obj.json() assert {'test': 'passed'} == result async def test_read_json_encoding(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/json'}, Stream('{"тест": "пассед"}\r\n--:--'.encode('cp1251'))) result = await obj.json(encoding='cp1251') assert {'тест': 'пассед'} == result async def test_read_json_guess_encoding(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/json; charset=cp1251'}, Stream('{"тест": "пассед"}\r\n--:--'.encode('cp1251'))) result = await obj.json() assert {'тест': 'пассед'} == result async def test_read_json_compressed(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_ENCODING: 'deflate', CONTENT_TYPE: 'application/json'}, Stream(b'\xabV*I-.Q\xb2RP*H,.NMQ\xaa\x05\x00\r\n--:--')) result = await obj.json() assert {'test': 'passed'} == result async def test_read_json_while_closed(self): stream = Stream(b'') obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/json'}, stream) obj._at_eof = True result = await obj.json() assert result is None async def test_read_form(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/x-www-form-urlencoded'}, Stream(b'foo=bar&foo=baz&boo=\r\n--:--')) result = await obj.form() assert [('foo', 'bar'), ('foo', 'baz'), ('boo', '')] == result async def test_read_form_encoding(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/x-www-form-urlencoded'}, Stream('foo=bar&foo=baz&boo=\r\n--:--'.encode('cp1251'))) result = await obj.form(encoding='cp1251') assert [('foo', 'bar'), ('foo', 'baz'), ('boo', '')] == result async def test_read_form_guess_encoding(self): obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/x-www-form-urlencoded; charset=utf-8'}, Stream('foo=bar&foo=baz&boo=\r\n--:--'.encode('utf-8'))) result = await obj.form() assert [('foo', 'bar'), ('foo', 'baz'), ('boo', '')] == result async def test_read_form_while_closed(self): stream = Stream(b'') obj = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_TYPE: 'application/x-www-form-urlencoded'}, stream) obj._at_eof = True result = await obj.form() assert result is None async def test_readline(self): obj = aiohttp.BodyPartReader( BOUNDARY, {}, Stream(b'Hello\n,\r\nworld!\r\n--:--')) result = await obj.readline() assert b'Hello\n' == result result = await obj.readline() assert b',\r\n' == result result = await obj.readline() assert b'world!' == result result = await obj.readline() assert b'' == result assert obj.at_eof() async def test_release(self): stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--') obj = aiohttp.BodyPartReader( BOUNDARY, {}, stream) await obj.release() assert obj.at_eof() assert b'--:\r\n\r\nworld!\r\n--:--' == stream.content.read() async def test_release_respects_content_length(self): obj = aiohttp.BodyPartReader( BOUNDARY, {'CONTENT-LENGTH': 100500}, Stream(b'.' * 100500 + b'\r\n--:--')) result = await obj.release() assert result is None assert obj.at_eof() async def test_release_release(self): stream = Stream(b'Hello,\r\n--:\r\n\r\nworld!\r\n--:--') obj = aiohttp.BodyPartReader( BOUNDARY, {}, stream) await obj.release() await obj.release() assert b'--:\r\n\r\nworld!\r\n--:--' == stream.content.read() async def test_filename(self): part = aiohttp.BodyPartReader( BOUNDARY, {CONTENT_DISPOSITION: 'attachment; filename=foo.html'}, None) assert 'foo.html' == part.filename async def test_reading_long_part(self): size = 2 * stream_reader_default_limit protocol = mock.Mock(_reading_paused=False) stream = StreamReader(protocol) stream.feed_data(b'0' * size + b'\r\n--:--') stream.feed_eof() obj = aiohttp.BodyPartReader( BOUNDARY, {}, stream) data = await obj.read() assert len(data) == size class TestMultipartReader: def test_from_response(self): resp = Response({CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n\r\nhello\r\n--:--')) res = aiohttp.MultipartReader.from_response(resp) assert isinstance(res, MultipartResponseWrapper) assert isinstance(res.stream, aiohttp.MultipartReader) def test_bad_boundary(self): resp = Response( {CONTENT_TYPE: 'multipart/related;boundary=' + 'a' * 80}, Stream(b'')) with pytest.raises(ValueError): aiohttp.MultipartReader.from_response(resp) def test_dispatch(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n\r\necho\r\n--:--')) res = reader._get_part_reader({CONTENT_TYPE: 'text/plain'}) assert isinstance(res, reader.part_reader_cls) def test_dispatch_bodypart(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n\r\necho\r\n--:--')) res = reader._get_part_reader({CONTENT_TYPE: 'text/plain'}) assert isinstance(res, reader.part_reader_cls) def test_dispatch_multipart(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'----:--\r\n' b'\r\n' b'test\r\n' b'----:--\r\n' b'\r\n' b'passed\r\n' b'----:----\r\n' b'--:--')) res = reader._get_part_reader( {CONTENT_TYPE: 'multipart/related;boundary=--:--'}) assert isinstance(res, reader.__class__) def test_dispatch_custom_multipart_reader(self): class CustomReader(aiohttp.MultipartReader): pass reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'----:--\r\n' b'\r\n' b'test\r\n' b'----:--\r\n' b'\r\n' b'passed\r\n' b'----:----\r\n' b'--:--')) reader.multipart_reader_cls = CustomReader res = reader._get_part_reader( {CONTENT_TYPE: 'multipart/related;boundary=--:--'}) assert isinstance(res, CustomReader) async def test_emit_next(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n\r\necho\r\n--:--')) res = await reader.next() assert isinstance(res, reader.part_reader_cls) async def test_invalid_boundary(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'---:\r\n\r\necho\r\n---:--')) with pytest.raises(ValueError): await reader.next() async def test_release(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/mixed;boundary=":"'}, Stream(b'--:\r\n' b'Content-Type: multipart/related;boundary=--:--\r\n' b'\r\n' b'----:--\r\n' b'\r\n' b'test\r\n' b'----:--\r\n' b'\r\n' b'passed\r\n' b'----:----\r\n' b'\r\n' b'--:--')) await reader.release() assert reader.at_eof() async def test_release_release(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n\r\necho\r\n--:--')) await reader.release() assert reader.at_eof() await reader.release() assert reader.at_eof() async def test_release_next(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n\r\necho\r\n--:--')) await reader.release() assert reader.at_eof() res = await reader.next() assert res is None async def test_second_next_releases_previous_object(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n' b'\r\n' b'test\r\n' b'--:\r\n' b'\r\n' b'passed\r\n' b'--:--')) first = await reader.next() assert isinstance(first, aiohttp.BodyPartReader) second = await reader.next() assert first.at_eof() assert not second.at_eof() async def test_release_without_read_the_last_object(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n' b'\r\n' b'test\r\n' b'--:\r\n' b'\r\n' b'passed\r\n' b'--:--')) first = await reader.next() second = await reader.next() third = await reader.next() assert first.at_eof() assert second.at_eof() assert second.at_eof() assert third is None async def test_read_chunk_by_length_doesnt_breaks_reader(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n' b'Content-Length: 4\r\n\r\n' b'test' b'\r\n--:\r\n' b'Content-Length: 6\r\n\r\n' b'passed' b'\r\n--:--')) body_parts = [] while True: read_part = b'' part = await reader.next() if part is None: break while not part.at_eof(): read_part += await part.read_chunk(3) body_parts.append(read_part) assert body_parts == [b'test', b'passed'] async def test_read_chunk_from_stream_doesnt_breaks_reader(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'--:\r\n' b'\r\n' b'chunk' b'\r\n--:\r\n' b'\r\n' b'two_chunks' b'\r\n--:--')) body_parts = [] while True: read_part = b'' part = await reader.next() if part is None: break while not part.at_eof(): chunk = await part.read_chunk(5) assert chunk read_part += chunk body_parts.append(read_part) assert body_parts == [b'chunk', b'two_chunks'] async def test_reading_skips_prelude(self): reader = aiohttp.MultipartReader( {CONTENT_TYPE: 'multipart/related;boundary=":"'}, Stream(b'Multi-part data is not supported.\r\n' b'\r\n' b'--:\r\n' b'\r\n' b'test\r\n' b'--:\r\n' b'\r\n' b'passed\r\n' b'--:--')) first = await reader.next() assert isinstance(first, aiohttp.BodyPartReader) second = await reader.next() assert first.at_eof() assert not second.at_eof() async def test_writer(writer): assert writer.size == 0 assert writer.boundary == ':' async def test_writer_serialize_io_chunk(buf, stream, writer): flo = io.BytesIO(b'foobarbaz') writer.append(flo) await writer.write(stream) assert (buf == b'--:\r\nContent-Type: application/octet-stream' b'\r\nContent-Length: 9\r\n\r\nfoobarbaz\r\n--:--\r\n') async def test_writer_serialize_json(buf, stream, writer): writer.append_json({'привет': 'мир'}) await writer.write(stream) assert (b'{"\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442":' b' "\\u043c\\u0438\\u0440"}' in buf) async def test_writer_serialize_form(buf, stream, writer): data = [('foo', 'bar'), ('foo', 'baz'), ('boo', 'zoo')] writer.append_form(data) await writer.write(stream) assert (b'foo=bar&foo=baz&boo=zoo' in buf) async def test_writer_serialize_form_dict(buf, stream, writer): data = {'hello': 'мир'} writer.append_form(data) await writer.write(stream) assert (b'hello=%D0%BC%D0%B8%D1%80' in buf) async def test_writer_write(buf, stream, writer): writer.append('foo-bar-baz') writer.append_json({'test': 'passed'}) writer.append_form({'test': 'passed'}) writer.append_form([('one', 1), ('two', 2)]) sub_multipart = aiohttp.MultipartWriter(boundary='::') sub_multipart.append('nested content') sub_multipart.headers['X-CUSTOM'] = 'test' writer.append(sub_multipart) await writer.write(stream) assert ( (b'--:\r\n' b'Content-Type: text/plain; charset=utf-8\r\n' b'Content-Length: 11\r\n\r\n' b'foo-bar-baz' b'\r\n' b'--:\r\n' b'Content-Type: application/json\r\n' b'Content-Length: 18\r\n\r\n' b'{"test": "passed"}' b'\r\n' b'--:\r\n' b'Content-Type: application/x-www-form-urlencoded\r\n' b'Content-Length: 11\r\n\r\n' b'test=passed' b'\r\n' b'--:\r\n' b'Content-Type: application/x-www-form-urlencoded\r\n' b'Content-Length: 11\r\n\r\n' b'one=1&two=2' b'\r\n' b'--:\r\n' b'Content-Type: multipart/mixed; boundary="::"\r\n' b'X-CUSTOM: test\r\nContent-Length: 93\r\n\r\n' b'--::\r\n' b'Content-Type: text/plain; charset=utf-8\r\n' b'Content-Length: 14\r\n\r\n' b'nested content\r\n' b'--::--\r\n' b'\r\n' b'--:--\r\n') == bytes(buf)) async def test_writer_write_no_close_boundary(buf, stream): writer = aiohttp.MultipartWriter(boundary=':') writer.append('foo-bar-baz') writer.append_json({'test': 'passed'}) writer.append_form({'test': 'passed'}) writer.append_form([('one', 1), ('two', 2)]) await writer.write(stream, close_boundary=False) assert ( (b'--:\r\n' b'Content-Type: text/plain; charset=utf-8\r\n' b'Content-Length: 11\r\n\r\n' b'foo-bar-baz' b'\r\n' b'--:\r\n' b'Content-Type: application/json\r\n' b'Content-Length: 18\r\n\r\n' b'{"test": "passed"}' b'\r\n' b'--:\r\n' b'Content-Type: application/x-www-form-urlencoded\r\n' b'Content-Length: 11\r\n\r\n' b'test=passed' b'\r\n' b'--:\r\n' b'Content-Type: application/x-www-form-urlencoded\r\n' b'Content-Length: 11\r\n\r\n' b'one=1&two=2' b'\r\n') == bytes(buf)) async def test_writer_serialize_with_content_encoding_gzip(buf, stream, writer): writer.append('Time to Relax!', {CONTENT_ENCODING: 'gzip'}) await writer.write(stream) headers, message = bytes(buf).split(b'\r\n\r\n', 1) assert (b'--:\r\nContent-Encoding: gzip\r\n' b'Content-Type: text/plain; charset=utf-8' == headers) decompressor = zlib.decompressobj(wbits=16+zlib.MAX_WBITS) data = decompressor.decompress(message.split(b'\r\n')[0]) data += decompressor.flush() assert b'Time to Relax!' == data async def test_writer_serialize_with_content_encoding_deflate(buf, stream, writer): writer.append('Time to Relax!', {CONTENT_ENCODING: 'deflate'}) await writer.write(stream) headers, message = bytes(buf).split(b'\r\n\r\n', 1) assert (b'--:\r\nContent-Encoding: deflate\r\n' b'Content-Type: text/plain; charset=utf-8' == headers) thing = b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--\r\n' assert thing == message async def test_writer_serialize_with_content_encoding_identity(buf, stream, writer): thing = b'\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00' writer.append(thing, {CONTENT_ENCODING: 'identity'}) await writer.write(stream) headers, message = bytes(buf).split(b'\r\n\r\n', 1) assert (b'--:\r\nContent-Encoding: identity\r\n' b'Content-Type: application/octet-stream\r\n' b'Content-Length: 16' == headers) assert thing == message.split(b'\r\n')[0] def test_writer_serialize_with_content_encoding_unknown(buf, stream, writer): with pytest.raises(RuntimeError): writer.append('Time to Relax!', {CONTENT_ENCODING: 'snappy'}) async def test_writer_with_content_transfer_encoding_base64(buf, stream, writer): writer.append('Time to Relax!', {CONTENT_TRANSFER_ENCODING: 'base64'}) await writer.write(stream) headers, message = bytes(buf).split(b'\r\n\r\n', 1) assert (b'--:\r\nContent-Transfer-Encoding: base64\r\n' b'Content-Type: text/plain; charset=utf-8' == headers) assert b'VGltZSB0byBSZWxheCE=' == message.split(b'\r\n')[0] async def test_writer_content_transfer_encoding_quote_printable(buf, stream, writer): writer.append('Привет, мир!', {CONTENT_TRANSFER_ENCODING: 'quoted-printable'}) await writer.write(stream) headers, message = bytes(buf).split(b'\r\n\r\n', 1) assert (b'--:\r\nContent-Transfer-Encoding: quoted-printable\r\n' b'Content-Type: text/plain; charset=utf-8' == headers) assert (b'=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,' b' =D0=BC=D0=B8=D1=80!' == message.split(b'\r\n')[0]) def test_writer_content_transfer_encoding_unknown(buf, stream, writer): with pytest.raises(RuntimeError): writer.append('Time to Relax!', {CONTENT_TRANSFER_ENCODING: 'unknown'}) class TestMultipartWriter: def test_default_subtype(self, writer): mimetype = parse_mimetype(writer.headers.get(CONTENT_TYPE)) assert 'multipart' == mimetype.type assert 'mixed' == mimetype.subtype def test_unquoted_boundary(self): writer = aiohttp.MultipartWriter(boundary='abc123') expected = {CONTENT_TYPE: 'multipart/mixed; boundary=abc123'} assert expected == writer.headers def test_quoted_boundary(self): writer = aiohttp.MultipartWriter(boundary=R'\"') expected = {CONTENT_TYPE: R'multipart/mixed; boundary="\\\""'} assert expected == writer.headers def test_bad_boundary(self): with pytest.raises(ValueError): aiohttp.MultipartWriter(boundary='тест') with pytest.raises(ValueError): aiohttp.MultipartWriter(boundary='test\n') def test_default_headers(self, writer): expected = {CONTENT_TYPE: 'multipart/mixed; boundary=":"'} assert expected == writer.headers def test_iter_parts(self, writer): writer.append('foo') writer.append('bar') writer.append('baz') assert 3 == len(list(writer)) def test_append(self, writer): assert 0 == len(writer) writer.append('hello, world!') assert 1 == len(writer) assert isinstance(writer._parts[0][0], payload.Payload) def test_append_with_headers(self, writer): writer.append('hello, world!', {'x-foo': 'bar'}) assert 1 == len(writer) assert 'x-foo' in writer._parts[0][0].headers assert writer._parts[0][0].headers['x-foo'] == 'bar' def test_append_json(self, writer): writer.append_json({'foo': 'bar'}) assert 1 == len(writer) part = writer._parts[0][0] assert part.headers[CONTENT_TYPE] == 'application/json' def test_append_part(self, writer): part = payload.get_payload( 'test', headers={CONTENT_TYPE: 'text/plain'}) writer.append(part, {CONTENT_TYPE: 'test/passed'}) assert 1 == len(writer) part = writer._parts[0][0] assert part.headers[CONTENT_TYPE] == 'test/passed' def test_append_json_overrides_content_type(self, writer): writer.append_json({'foo': 'bar'}, {CONTENT_TYPE: 'test/passed'}) assert 1 == len(writer) part = writer._parts[0][0] assert part.headers[CONTENT_TYPE] == 'test/passed' def test_append_form(self, writer): writer.append_form({'foo': 'bar'}, {CONTENT_TYPE: 'test/passed'}) assert 1 == len(writer) part = writer._parts[0][0] assert part.headers[CONTENT_TYPE] == 'test/passed' def test_append_multipart(self, writer): subwriter = aiohttp.MultipartWriter(boundary=':') subwriter.append_json({'foo': 'bar'}) writer.append(subwriter, {CONTENT_TYPE: 'test/passed'}) assert 1 == len(writer) part = writer._parts[0][0] assert part.headers[CONTENT_TYPE] == 'test/passed' async def test_write(self, writer, stream): await writer.write(stream) def test_with(self): with aiohttp.MultipartWriter(boundary=':') as writer: writer.append('foo') writer.append(b'bar') writer.append_json({'baz': True}) assert 3 == len(writer) def test_append_int_not_allowed(self): with pytest.raises(TypeError): with aiohttp.MultipartWriter(boundary=':') as writer: writer.append(1) def test_append_float_not_allowed(self): with pytest.raises(TypeError): with aiohttp.MultipartWriter(boundary=':') as writer: writer.append(1.1) def test_append_none_not_allowed(self): with pytest.raises(TypeError): with aiohttp.MultipartWriter(boundary=':') as writer: writer.append(None) async def test_async_for_reader(loop): data = [ {"test": "passed"}, 42, b'plain text', b'aiohttp\n', b'no epilogue'] reader = aiohttp.MultipartReader( headers={CONTENT_TYPE: 'multipart/mixed; boundary=":"'}, content=Stream(b'\r\n'.join([ b'--:', b'Content-Type: application/json', b'', json.dumps(data[0]).encode(), b'--:', b'Content-Type: application/json', b'', json.dumps(data[1]).encode(), b'--:', b'Content-Type: multipart/related; boundary="::"', b'', b'--::', b'Content-Type: text/plain', b'', data[2], b'--::', b'Content-Disposition: attachment; filename="aiohttp"', b'Content-Type: text/plain', b'Content-Length: 28', b'Content-Encoding: gzip', b'', b'\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03K\xcc\xcc\xcf())' b'\xe0\x02\x00\xd6\x90\xe2O\x08\x00\x00\x00', b'--::', b'Content-Type: multipart/related; boundary=":::"', b'', b'--:::', b'Content-Type: text/plain', b'', data[4], b'--:::--', b'--::--', b'', b'--:--', b'']))) idata = iter(data) async def check(reader): async for part in reader: if isinstance(part, aiohttp.BodyPartReader): if part.headers[CONTENT_TYPE] == 'application/json': assert next(idata) == (await part.json()) else: assert next(idata) == await part.read(decode=True) else: await check(part) await check(reader) async def test_async_for_bodypart(loop): part = aiohttp.BodyPartReader( boundary=b'--:', headers={}, content=Stream(b'foobarbaz\r\n--:--')) async for data in part: assert data == b'foobarbaz'
rutsky/aiohttp
tests/test_multipart.py
Python
apache-2.0
38,605
#!/usr/bin/env python import os from MentalUs import create_app, db from MentalUs.models import MTUser, MTScale, MTAnnouncement, \ MTExtendFields, MTUserExtendInfo, MTScaleResult, MTUnfinishedScale from flask.ext.script import Manager, Shell from flask.ext.migrate import Migrate, MigrateCommand app = create_app('dev') manager = Manager(app) migrate = Migrate(app, db) def generate_debug(): MTUser.generate_debug() MTScale.generate_debug() MTAnnouncement.generate_debug() MTExtendFields.generate_debug() MTScaleResult.generate_debug() MTUnfinishedScale.generate_debug() def make_shell_context(): config_dict = { 'MTUser': MTUser, 'MTScale': MTScale, 'MTAnnouncement': MTAnnouncement, 'MTExtendFields': MTExtendFields, 'MTUserExtendInfo': MTUserExtendInfo, 'MTScaleResult': MTScaleResult, 'MTUnfinishedScale': MTUnfinishedScale, 'db': db, 'generate_debug': generate_debug } return config_dict manager.add_command('shell', Shell(make_context=make_shell_context)) manager.add_command('db', MigrateCommand) if __name__ == '__main__': manager.run()
realityone/MentalUs
manage.py
Python
apache-2.0
1,169
# Copyright 2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import re import six import warnings from cassandra.cqlengine import CQLEngineException, ValidationError from cassandra.cqlengine import columns from cassandra.cqlengine import connection from cassandra.cqlengine import query from cassandra.cqlengine.query import DoesNotExist as _DoesNotExist from cassandra.cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned from cassandra.metadata import protect_name from cassandra.util import OrderedDict log = logging.getLogger(__name__) class ModelException(CQLEngineException): pass class ModelDefinitionException(ModelException): pass class PolymorphicModelException(ModelException): pass class UndefinedKeyspaceWarning(Warning): pass DEFAULT_KEYSPACE = None class hybrid_classmethod(object): """ Allows a method to behave as both a class method and normal instance method depending on how it's called """ def __init__(self, clsmethod, instmethod): self.clsmethod = clsmethod self.instmethod = instmethod def __get__(self, instance, owner): if instance is None: return self.clsmethod.__get__(owner, owner) else: return self.instmethod.__get__(instance, owner) def __call__(self, *args, **kwargs): """ Just a hint to IDEs that it's ok to call this """ raise NotImplementedError class QuerySetDescriptor(object): """ returns a fresh queryset for the given model it's declared on everytime it's accessed """ def __get__(self, obj, model): """ :rtype: ModelQuerySet """ if model.__abstract__: raise CQLEngineException('cannot execute queries against abstract models') queryset = model.__queryset__(model) # if this is a concrete polymorphic model, and the discriminator # key is an indexed column, add a filter clause to only return # logical rows of the proper type if model._is_polymorphic and not model._is_polymorphic_base: name, column = model._discriminator_column_name, model._discriminator_column if column.partition_key or column.index: # look for existing poly types return queryset.filter(**{name: model.__discriminator_value__}) return queryset def __call__(self, *args, **kwargs): """ Just a hint to IDEs that it's ok to call this :rtype: ModelQuerySet """ raise NotImplementedError class TransactionDescriptor(object): """ returns a query set descriptor """ def __get__(self, instance, model): if instance: def transaction_setter(*prepared_transaction, **unprepared_transactions): if len(prepared_transaction) > 0: transactions = prepared_transaction[0] else: transactions = instance.objects.iff(**unprepared_transactions)._transaction instance._transaction = transactions return instance return transaction_setter qs = model.__queryset__(model) def transaction_setter(**unprepared_transactions): transactions = model.objects.iff(**unprepared_transactions)._transaction qs._transaction = transactions return qs return transaction_setter def __call__(self, *args, **kwargs): raise NotImplementedError class TTLDescriptor(object): """ returns a query set descriptor """ def __get__(self, instance, model): if instance: # instance = copy.deepcopy(instance) # instance method def ttl_setter(ts): instance._ttl = ts return instance return ttl_setter qs = model.__queryset__(model) def ttl_setter(ts): qs._ttl = ts return qs return ttl_setter def __call__(self, *args, **kwargs): raise NotImplementedError class TimestampDescriptor(object): """ returns a query set descriptor with a timestamp specified """ def __get__(self, instance, model): if instance: # instance method def timestamp_setter(ts): instance._timestamp = ts return instance return timestamp_setter return model.objects.timestamp def __call__(self, *args, **kwargs): raise NotImplementedError class IfNotExistsDescriptor(object): """ return a query set descriptor with a if_not_exists flag specified """ def __get__(self, instance, model): if instance: # instance method def ifnotexists_setter(ife): instance._if_not_exists = ife return instance return ifnotexists_setter return model.objects.if_not_exists def __call__(self, *args, **kwargs): raise NotImplementedError class ConsistencyDescriptor(object): """ returns a query set descriptor if called on Class, instance if it was an instance call """ def __get__(self, instance, model): if instance: # instance = copy.deepcopy(instance) def consistency_setter(consistency): instance.__consistency__ = consistency return instance return consistency_setter qs = model.__queryset__(model) def consistency_setter(consistency): qs._consistency = consistency return qs return consistency_setter def __call__(self, *args, **kwargs): raise NotImplementedError class ColumnQueryEvaluator(query.AbstractQueryableColumn): """ Wraps a column and allows it to be used in comparator expressions, returning query operators ie: Model.column == 5 """ def __init__(self, column): self.column = column def __unicode__(self): return self.column.db_field_name def _get_column(self): """ :rtype: ColumnQueryEvaluator """ return self.column class ColumnDescriptor(object): """ Handles the reading and writing of column values to and from a model instance's value manager, as well as creating comparator queries """ def __init__(self, column): """ :param column: :type column: columns.Column :return: """ self.column = column self.query_evaluator = ColumnQueryEvaluator(self.column) def __get__(self, instance, owner): """ Returns either the value or column, depending on if an instance is provided or not :param instance: the model instance :type instance: Model """ try: return instance._values[self.column.column_name].getval() except AttributeError: return self.query_evaluator def __set__(self, instance, value): """ Sets the value on an instance, raises an exception with classes TODO: use None instance to create update statements """ if instance: return instance._values[self.column.column_name].setval(value) else: raise AttributeError('cannot reassign column values') def __delete__(self, instance): """ Sets the column value to None, if possible """ if instance: if self.column.can_delete: instance._values[self.column.column_name].delval() else: raise AttributeError('cannot delete {0} columns'.format(self.column.column_name)) class BaseModel(object): """ The base model class, don't inherit from this, inherit from Model, defined below """ class DoesNotExist(_DoesNotExist): pass class MultipleObjectsReturned(_MultipleObjectsReturned): pass objects = QuerySetDescriptor() ttl = TTLDescriptor() consistency = ConsistencyDescriptor() iff = TransactionDescriptor() # custom timestamps, see USING TIMESTAMP X timestamp = TimestampDescriptor() if_not_exists = IfNotExistsDescriptor() # _len is lazily created by __len__ __table_name__ = None __keyspace__ = None __discriminator_value__ = None __options__ = None # the queryset class used for this class __queryset__ = query.ModelQuerySet __dmlquery__ = query.DMLQuery __consistency__ = None # can be set per query _timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP) _if_not_exists = False # optional if_not_exists flag to check existence before insertion _table_name = None # used internally to cache a derived table name def __init__(self, **values): self._values = {} self._ttl = self.__default_ttl__ self._timestamp = None self._transaction = None for name, column in self._columns.items(): value = values.get(name, None) if value is not None or isinstance(column, columns.BaseContainerColumn): value = column.to_python(value) value_mngr = column.value_manager(self, column, value) if name in values: value_mngr.explicit = True self._values[name] = value_mngr # a flag set by the deserializer to indicate # that update should be used when persisting changes self._is_persisted = False self._batch = None self._timeout = connection.NOT_SET def __repr__(self): return '{0}({1})'.format(self.__class__.__name__, ', '.join('{0}={1!r}'.format(k, getattr(self, k)) for k in self._defined_columns.keys() if k != self._discriminator_column_name)) def __str__(self): """ Pretty printing of models by their primary key """ return '{0} <{1}>'.format(self.__class__.__name__, ', '.join('{0}={1}'.format(k, getattr(self, k)) for k in self._primary_keys.keys())) @classmethod def _discover_polymorphic_submodels(cls): if not cls._is_polymorphic_base: raise ModelException('_discover_polymorphic_submodels can only be called on polymorphic base classes') def _discover(klass): if not klass._is_polymorphic_base and klass.__discriminator_value__ is not None: cls._discriminator_map[klass.__discriminator_value__] = klass for subklass in klass.__subclasses__(): _discover(subklass) _discover(cls) @classmethod def _get_model_by_discriminator_value(cls, key): if not cls._is_polymorphic_base: raise ModelException('_get_model_by_discriminator_value can only be called on polymorphic base classes') return cls._discriminator_map.get(key) @classmethod def _construct_instance(cls, values): """ method used to construct instances from query results this is where polymorphic deserialization occurs """ # we're going to take the values, which is from the DB as a dict # and translate that into our local fields # the db_map is a db_field -> model field map items = values.items() field_dict = dict([(cls._db_map.get(k, k), v) for k, v in items]) if cls._is_polymorphic: disc_key = field_dict.get(cls._discriminator_column_name) if disc_key is None: raise PolymorphicModelException('discriminator value was not found in values') poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: poly_base._discover_polymorphic_submodels() klass = poly_base._get_model_by_discriminator_value(disc_key) if klass is None: raise PolymorphicModelException( 'unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__) ) if not issubclass(klass, cls): raise PolymorphicModelException( '{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__) ) field_dict = dict((k, v) for k, v in field_dict.items() if k in klass._columns.keys()) else: klass = cls instance = klass(**field_dict) instance._is_persisted = True return instance def _can_update(self): """ Called by the save function to check if this should be persisted with update or insert :return: """ if not self._is_persisted: return False return all([not self._values[k].changed for k in self._primary_keys]) @classmethod def _get_keyspace(cls): """ Returns the manual keyspace, if set, otherwise the default keyspace """ return cls.__keyspace__ or DEFAULT_KEYSPACE @classmethod def _get_column(cls, name): """ Returns the column matching the given name, raising a key error if it doesn't exist :param name: the name of the column to return :rtype: Column """ return cls._columns[name] def __eq__(self, other): if self.__class__ != other.__class__: return False # check attribute keys keys = set(self._columns.keys()) other_keys = set(other._columns.keys()) if keys != other_keys: return False # check that all of the attributes match for key in other_keys: if getattr(self, key, None) != getattr(other, key, None): return False return True def __ne__(self, other): return not self.__eq__(other) @classmethod def column_family_name(cls, include_keyspace=True): """ Returns the column family name if it's been defined otherwise, it creates it from the module and class name """ cf_name = protect_name(cls._raw_column_family_name()) if include_keyspace: return '{0}.{1}'.format(protect_name(cls._get_keyspace()), cf_name) return cf_name @classmethod def _raw_column_family_name(cls): if not cls._table_name: if cls.__table_name__: cls._table_name = cls.__table_name__.lower() else: if cls._is_polymorphic and not cls._is_polymorphic_base: cls._table_name = cls._polymorphic_base._raw_column_family_name() else: camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2).lower()), s) cf_name = ccase(cls.__name__) # trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] cf_name = cf_name.lower() cf_name = re.sub(r'^_+', '', cf_name) cls._table_name = cf_name return cls._table_name def validate(self): """ Cleans and validates the field values """ for name, col in self._columns.items(): v = getattr(self, name) if v is None and not self._values[name].explicit and col.has_default: v = col.get_default() val = col.validate(v) setattr(self, name, val) # Let an instance be used like a dict of its columns keys/values def __iter__(self): """ Iterate over column ids. """ for column_id in self._columns.keys(): yield column_id def __getitem__(self, key): """ Returns column's value. """ if not isinstance(key, six.string_types): raise TypeError if key not in self._columns.keys(): raise KeyError return getattr(self, key) def __setitem__(self, key, val): """ Sets a column's value. """ if not isinstance(key, six.string_types): raise TypeError if key not in self._columns.keys(): raise KeyError return setattr(self, key, val) def __len__(self): """ Returns the number of columns defined on that model. """ try: return self._len except: self._len = len(self._columns.keys()) return self._len def keys(self): """ Returns a list of column IDs. """ return [k for k in self] def values(self): """ Returns list of column values. """ return [self[k] for k in self] def items(self): """ Returns a list of column ID/value tuples. """ return [(k, self[k]) for k in self] def _as_dict(self): """ Returns a map of column names to cleaned values """ values = self._dynamic_columns or {} for name, col in self._columns.items(): values[name] = col.to_database(getattr(self, name, None)) return values @classmethod def create(cls, **kwargs): """ Create an instance of this model in the database. Takes the model column values as keyword arguments. Returns the instance. """ extra_columns = set(kwargs.keys()) - set(cls._columns.keys()) if extra_columns: raise ValidationError("Incorrect columns passed: {0}".format(extra_columns)) return cls.objects.create(**kwargs) @classmethod def all(cls): """ Returns a queryset representing all stored objects This is a pass-through to the model objects().all() """ return cls.objects.all() @classmethod def filter(cls, *args, **kwargs): """ Returns a queryset based on filter parameters. This is a pass-through to the model objects().:method:`~cqlengine.queries.filter`. """ return cls.objects.filter(*args, **kwargs) @classmethod def get(cls, *args, **kwargs): """ Returns a single object based on the passed filter constraints. This is a pass-through to the model objects().:method:`~cqlengine.queries.get`. """ return cls.objects.get(*args, **kwargs) def timeout(self, timeout): """ Sets a timeout for use in :meth:`~.save`, :meth:`~.update`, and :meth:`~.delete` operations """ assert self._batch is None, 'Setting both timeout and batch is not supported' self._timeout = timeout return self def save(self): """ Saves an object to the database. .. code-block:: python #create a person instance person = Person(first_name='Kimberly', last_name='Eggleston') #saves it to Cassandra person.save() """ # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: raise PolymorphicModelException('cannot save polymorphic base model') else: setattr(self, self._discriminator_column_name, self.__discriminator_value__) self.validate() self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, if_not_exists=self._if_not_exists, transaction=self._transaction, timeout=self._timeout).save() # reset the value managers for v in self._values.values(): v.reset_previous_value() self._is_persisted = True self._ttl = self.__default_ttl__ self._timestamp = None return self def update(self, **values): """ Performs an update on the model instance. You can pass in values to set on the model for updating, or you can call without values to execute an update against any modified fields. If no fields on the model have been modified since loading, no query will be performed. Model validation is performed normally. It is possible to do a blind update, that is, to update a field without having first selected the object out of the database. See :ref:`Blind Updates <blind_updates>` """ for k, v in values.items(): col = self._columns.get(k) # check for nonexistant columns if col is None: raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.__class__.__name__, k)) # check for primary key update attempts if col.is_primary_key: raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(k, self.__module__, self.__class__.__name__)) setattr(self, k, v) # handle polymorphic models if self._is_polymorphic: if self._is_polymorphic_base: raise PolymorphicModelException('cannot update polymorphic base model') else: setattr(self, self._discriminator_column_name, self.__discriminator_value__) self.validate() self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, transaction=self._transaction, timeout=self._timeout).update() # reset the value managers for v in self._values.values(): v.reset_previous_value() self._is_persisted = True self._ttl = self.__default_ttl__ self._timestamp = None return self def delete(self): """ Deletes the object from the database """ self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__, timeout=self._timeout).delete() def get_changed_columns(self): """ Returns a list of the columns that have been updated since instantiation or save """ return [k for k, v in self._values.items() if v.changed] @classmethod def _class_batch(cls, batch): return cls.objects.batch(batch) def _inst_batch(self, batch): assert self._timeout is connection.NOT_SET, 'Setting both timeout and batch is not supported' self._batch = batch return self batch = hybrid_classmethod(_class_batch, _inst_batch) class ModelMetaClass(type): def __new__(cls, name, bases, attrs): # move column definitions into columns dict # and set default column names column_dict = OrderedDict() primary_keys = OrderedDict() pk_name = None # get inherited properties inherited_columns = OrderedDict() for base in bases: for k, v in getattr(base, '_defined_columns', {}).items(): inherited_columns.setdefault(k, v) # short circuit __abstract__ inheritance is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False) # short circuit __discriminator_value__ inheritance attrs['__discriminator_value__'] = attrs.get('__discriminator_value__') options = attrs.get('__options__') or {} attrs['__default_ttl__'] = options.get('default_time_to_live') def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj if col_obj.primary_key: primary_keys[col_name] = col_obj col_obj.set_column_name(col_name) # set properties attrs[col_name] = ColumnDescriptor(col_obj) column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, key=lambda x: x[1].position) is_polymorphic_base = any([c[1].discriminator_column for c in column_definitions]) column_definitions = [x for x in inherited_columns.items()] + column_definitions discriminator_columns = [c for c in column_definitions if c[1].discriminator_column] is_polymorphic = len(discriminator_columns) > 0 if len(discriminator_columns) > 1: raise ModelDefinitionException('only one discriminator_column can be defined in a model, {0} found'.format(len(discriminator_columns))) if attrs['__discriminator_value__'] and not is_polymorphic: raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True') discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None) if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)): raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns') # find polymorphic base class polymorphic_base = None if is_polymorphic and not is_polymorphic_base: def _get_polymorphic_base(bases): for base in bases: if getattr(base, '_is_polymorphic_base', False): return base klass = _get_polymorphic_base(base.__bases__) if klass: return klass polymorphic_base = _get_polymorphic_base(bases) defined_columns = OrderedDict(column_definitions) # check for primary key if not is_abstract and not any([v.primary_key for k, v in column_definitions]): raise ModelDefinitionException("At least 1 primary key is required.") counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)] data_columns = [c for c in defined_columns.values() if not c.primary_key and not isinstance(c, columns.Counter)] if counter_columns and data_columns: raise ModelDefinitionException('counter models may not have data columns') has_partition_keys = any(v.partition_key for (k, v) in column_definitions) # transform column definitions for k, v in column_definitions: # don't allow a column with the same name as a built-in attribute or method if k in BaseModel.__dict__: raise ModelDefinitionException("column '{0}' conflicts with built-in attribute/method".format(k)) # counter column primary keys are not allowed if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)): raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys') # this will mark the first primary key column as a partition # key, if one hasn't been set already if not has_partition_keys and v.primary_key: v.partition_key = True has_partition_keys = True _transform_column(k, v) partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) # setup partition key shortcut if len(partition_keys) == 0: if not is_abstract: raise ModelException("at least one partition key must be defined") if len(partition_keys) == 1: pk_name = [x for x in partition_keys.keys()][0] attrs['pk'] = attrs[pk_name] else: # composite partition key case, get/set a tuple of values _get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys()) _set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val)) attrs['pk'] = property(_get, _set) # some validation col_names = set() for v in column_dict.values(): # check for duplicate column names if v.db_field_name in col_names: raise ModelException("{0} defines the column {1} more than once".format(name, v.db_field_name)) if v.clustering_order and not (v.primary_key and not v.partition_key): raise ModelException("clustering_order may be specified only for clustering primary keys") if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'): raise ModelException("invalid clustering order {0} for column {1}".format(repr(v.clustering_order), v.db_field_name)) col_names.add(v.db_field_name) # create db_name -> model name map for loading db_map = {} for field_name, col in column_dict.items(): db_map[col.db_field_name] = field_name # add management members to the class attrs['_columns'] = column_dict attrs['_primary_keys'] = primary_keys attrs['_defined_columns'] = defined_columns # maps the database field to the models key attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name attrs['_dynamic_columns'] = {} attrs['_partition_keys'] = partition_keys attrs['_clustering_keys'] = clustering_keys attrs['_has_counter'] = len(counter_columns) > 0 # add polymorphic management attributes attrs['_is_polymorphic_base'] = is_polymorphic_base attrs['_is_polymorphic'] = is_polymorphic attrs['_polymorphic_base'] = polymorphic_base attrs['_discriminator_column'] = discriminator_column attrs['_discriminator_column_name'] = discriminator_column_name attrs['_discriminator_map'] = {} if is_polymorphic_base else None # setup class exceptions DoesNotExistBase = None for base in bases: DoesNotExistBase = getattr(base, 'DoesNotExist', None) if DoesNotExistBase is not None: break DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist', BaseModel.DoesNotExist) attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {}) MultipleObjectsReturnedBase = None for base in bases: MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None) if MultipleObjectsReturnedBase is not None: break MultipleObjectsReturnedBase = DoesNotExistBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned) attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned', (MultipleObjectsReturnedBase,), {}) # create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) udts = [] for col in column_dict.values(): columns.resolve_udts(col, udts) for user_type in set(udts): user_type.register_for_keyspace(klass._get_keyspace()) return klass @six.add_metaclass(ModelMetaClass) class Model(BaseModel): __abstract__ = True """ *Optional.* Indicates that this model is only intended to be used as a base class for other models. You can't create tables for abstract models, but checks around schema validity are skipped during class construction. """ __table_name__ = None """ *Optional.* Sets the name of the CQL table for this model. If left blank, the table name will be the name of the model, with it's module name as it's prefix. Manually defined table names are not inherited. """ __keyspace__ = None """ Sets the name of the keyspace used by this model. """ __options__ = None """ *Optional* Table options applied with this model (e.g. compaction, default ttl, cache settings, tec.) """ __discriminator_value__ = None """ *Optional* Specifies a value for the discriminator column when using model inheritance. """
jfelectron/python-driver
cassandra/cqlengine/models.py
Python
apache-2.0
33,196
''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from unittest import TestCase import os class TestVersion(TestCase): """ Class that tests the method of the version.py file used to format and compare version numbers of both Ambari (which use 3 digits separated by dots) and stacks (which use 4 digits separated by dots). """ def setUp(self): import imp self.test_directory = os.path.dirname(os.path.abspath(__file__)) test_file_path = os.path.join(self.test_directory, '../../../../ambari-common/src/main/python/resource_management/libraries/functions/version.py') with open(test_file_path, 'rb') as fp: self.version_module = imp.load_module('version', fp, test_file_path, ('.py', 'rb', imp.PY_SOURCE)) def test_format(self): l = [("2.2", "2.2.0.0"), ("2.2.1", "2.2.1.0"), ("2.2.1.3", "2.2.1.3")] for input, expected in l: actual = self.version_module.format_stack_version(input) self.assertEqual(expected, actual) gluster_fs_actual = self.version_module.format_stack_version("GlusterFS") self.assertEqual("", gluster_fs_actual) def test_format_with_hyphens(self): actual = self.version_module.format_stack_version("FOO-1.0") self.assertEqual("1.0.0.0", actual) actual = self.version_module.format_stack_version("1.0.0-1234") self.assertEqual("1.0.0.0", actual) actual = self.version_module.format_stack_version("FOO-1.0-9999") self.assertEqual("1.0.0.0", actual) def test_comparison(self): # All versions to compare, from 1.0.0.0 to 3.0.0.0, and only include elements that are a multiple of 7. versions = range(1000, 3000, 7) versions = [".".join(list(str(elem))) for elem in versions] for idx, x in enumerate(versions): for idy, y in enumerate(versions): # Expected value will either be -1, 0, 1, and it relies on the fact # that an increasing index implies a greater version number. expected_value = cmp(idx, idy) actual_value = self.version_module.compare_versions(x, y) self.assertEqual(expected_value, actual_value) # Try something fancier self.assertEqual(0, self.version_module.compare_versions("2.10", "2.10.0")) self.assertEqual(0, self.version_module.compare_versions("2.10", "2.10.0.0")) self.assertEqual(0, self.version_module.compare_versions("2.10.0", "2.10.0.0")) try: self.version_module.compare_versions("", "GlusterFS") except ValueError: pass else: self.fail("Did not raise exception")
radicalbit/ambari
ambari-server/src/test/python/TestVersion.py
Python
apache-2.0
3,260
import click import webbrowser import sys import floyd from floyd.client.auth import AuthClient from floyd.manager.auth_config import AuthConfigManager from floyd.model.access_token import AccessToken from floyd.model.credentials import Credentials from floyd.log import logger as floyd_logger @click.command() @click.option('--token', is_flag=True, default=False, help='Just enter token') @click.option('--username', '-u', help='FloydHub username') @click.option('--password', '-p', help='FloydHub password') def login(token, username, password): """ Log into Floyd via Auth0. """ if username: # Use username / password login if not password: password = click.prompt('Please enter your password', type=str, hide_input=True) password = password.strip() if not password: floyd_logger.info('You entered an empty string. Please make sure you enter your password correctly.') sys.exit(1) login_credentials = Credentials(username=username, password=password) access_code = AuthClient().login(login_credentials) if not access_code: floyd_logger.info("Failed to login") return else: # Fallback to the access token from the browser login if not token: cli_info_url = "{}/settings/security".format(floyd.floyd_web_host) click.confirm('Authentication token page will now open in your browser. Continue?', abort=True, default=True) webbrowser.open(cli_info_url) floyd_logger.info("Please copy and paste the authentication token.") access_code = click.prompt('This is an invisible field. Paste token and press ENTER', type=str, hide_input=True) access_code = access_code.strip() if not access_code: floyd_logger.info("Empty token received. Make sure your shell is handling the token appropriately.") floyd_logger.info("See docs for help: http://docs.floydhub.com/faqs/authentication/") return access_code = access_code.strip(" ") user = AuthClient().get_user(access_code) access_token = AccessToken(username=user.username, token=access_code) AuthConfigManager.set_access_token(access_token) floyd_logger.info("Login Successful as %s", user.username) @click.command() def logout(): """ Logout of Floyd. """ AuthConfigManager.purge_access_token()
mckayward/floyd-cli
floyd/cli/auth.py
Python
apache-2.0
2,580
"""Unit tests for the Azure Devops Server issues collector.""" from .base import AzureDevopsTestCase class AzureDevopsIssuesTest(AzureDevopsTestCase): """Unit tests for the Azure Devops Server issues metric.""" METRIC_TYPE = "issues" async def test_nr_of_issues(self): """Test that the number of issues is returned.""" response = await self.collect( get_request_json_return_value=dict(value=[self.work_item, self.work_item]), post_request_json_return_value=dict(workItems=[dict(id="id1"), dict(id="id2")]), ) self.assert_measurement(response, value="2") async def test_no_issues(self): """Test zero issues.""" response = await self.collect(post_request_json_return_value=dict(workItems=[])) self.assert_measurement(response, value="0", entities=[]) async def test_issues(self): """Test that the issues are returned.""" response = await self.collect( get_request_json_return_value=dict(value=[self.work_item]), post_request_json_return_value=dict(workItems=[dict(id="id")]), ) self.assert_measurement( response, entities=[ dict( key="id", project="Project", title="Title", work_item_type="Task", state="New", url=self.work_item_url, ) ], )
ICTU/quality-time
components/collector/tests/source_collectors/azure_devops/test_issues.py
Python
apache-2.0
1,498
# Copyright 2020 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest class TestUtils(unittest.TestCase): skip_condition = sys.version_info[0] < 3 skip_message = "Subtests are not supported in Python 2" @unittest.skipIf(skip_condition, skip_message) def test_PeekIterator(self): from google.cloud.spanner_dbapi.utils import PeekIterator cases = [ ("list", [1, 2, 3, 4, 6, 7], [1, 2, 3, 4, 6, 7]), ("iter_from_list", iter([1, 2, 3, 4, 6, 7]), [1, 2, 3, 4, 6, 7]), ("tuple", ("a", 12, 0xFF), ["a", 12, 0xFF]), ("iter_from_tuple", iter(("a", 12, 0xFF)), ["a", 12, 0xFF]), ("no_args", (), []), ] for name, data_in, expected in cases: with self.subTest(name=name): pitr = PeekIterator(data_in) actual = list(pitr) self.assertEqual(actual, expected) @unittest.skipIf(skip_condition, "Python 2 has an outdated iterator definition") def test_peekIterator_list_rows_converted_to_tuples(self): from google.cloud.spanner_dbapi.utils import PeekIterator # Cloud Spanner returns results in lists e.g. [result]. # PeekIterator is used by BaseCursor in its fetch* methods. # This test ensures that anything passed into PeekIterator # will be returned as a tuple. pit = PeekIterator([["a"], ["b"], ["c"], ["d"], ["e"]]) got = list(pit) want = [("a",), ("b",), ("c",), ("d",), ("e",)] self.assertEqual(got, want, "Rows of type list must be returned as tuples") seventeen = PeekIterator([[17]]) self.assertEqual(list(seventeen), [(17,)]) pit = PeekIterator([["%", "%d"]]) self.assertEqual(next(pit), ("%", "%d")) pit = PeekIterator([("Clark", "Kent")]) self.assertEqual(next(pit), ("Clark", "Kent")) @unittest.skipIf(skip_condition, "Python 2 has an outdated iterator definition") def test_peekIterator_nonlist_rows_unconverted(self): from google.cloud.spanner_dbapi.utils import PeekIterator pi = PeekIterator(["a", "b", "c", "d", "e"]) got = list(pi) want = ["a", "b", "c", "d", "e"] self.assertEqual(got, want, "Values should be returned unchanged") @unittest.skipIf(skip_condition, skip_message) def test_backtick_unicode(self): from google.cloud.spanner_dbapi.utils import backtick_unicode cases = [ ("SELECT (1) as foo WHERE 1=1", "SELECT (1) as foo WHERE 1=1"), ("SELECT (1) as föö", "SELECT (1) as `föö`"), ("SELECT (1) as `föö`", "SELECT (1) as `föö`"), ("SELECT (1) as `föö` `umläut", "SELECT (1) as `föö` `umläut"), ("SELECT (1) as `föö", "SELECT (1) as `föö"), ] for sql, want in cases: with self.subTest(sql=sql): got = backtick_unicode(sql) self.assertEqual(got, want) @unittest.skipIf(skip_condition, skip_message) def test_StreamedManyResultSets(self): from google.cloud.spanner_dbapi.utils import StreamedManyResultSets cases = [ ("iter_from_list", iter([1, 2, 3, 4, 6, 7]), [1, 2, 3, 4, 6, 7]), ("iter_from_tuple", iter(("a", 12, 0xFF)), ["a", 12, 0xFF]), ] for name, data_in, expected in cases: with self.subTest(name=name): stream_result = StreamedManyResultSets() stream_result._iterators.append(data_in) actual = list(stream_result) self.assertEqual(actual, expected)
googleapis/python-spanner
tests/unit/spanner_dbapi/test_utils.py
Python
apache-2.0
4,189
#Exercise 6 import yaml import json def main(): yaml_file = 'test_file.yml' json_file = 'jsontest.json' dict = { 'ip_add': '192.168.1.100', 'vendor': 'cisco' } list = [ 'week one', 99, 18 ] with open(yaml_file, "w") as f: f.write(yaml.dump(list, default_flow_style=False)) with open(json_file, "w") as f: json.dump(list, f) if __name__ == "__main__": main()
ttomasello/pynetclass
ttexercise6.py
Python
apache-2.0
406
"""Base OpHandler for ops that use group lasso regularizer. This OpHandler should not be called directly. It is a virtual base class for regularization source OpHandlers that use Group Lasso as their regularizer. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc from morph_net.framework import op_handler from morph_net.framework import op_handler_util from morph_net.framework import tpu_util from morph_net.op_regularizers import group_lasso_regularizer class GroupLassoBaseSourceOpHandler(op_handler.OpHandler): """Base OpHandler for source ops that use Group Lasso.""" __metaclass__ = abc.ABCMeta def __init__(self, threshold, l1_fraction=0.0): """Instantiate an instance. Args: threshold: Float scalar used as threshold for GroupLassoRegularizer. l1_fraction: Float scalar used as l1_fraction for GroupLassoRegularizer. """ self._threshold = threshold self._l1_fraction = l1_fraction @abc.abstractmethod def _reduce_dims(self, op): # Reduction dimensions for Group Lasso. pass @property def is_source_op(self): return True @property def is_passthrough(self): return False def assign_grouping(self, op, op_reg_manager): """Assign grouping to the given op and updates the manager. Args: op: tf.Operation to assign grouping to. op_reg_manager: OpRegularizerManager to keep track of the grouping. """ # This is a source op so begin by getting the OpGroup or creating one. op_slices = op_reg_manager.get_op_slices(op) for op_slice in op_slices: op_group = op_reg_manager.get_op_group(op_slice) if op_group is None: op_reg_manager.create_op_group_for_op_slice(op_slice) # Check if all input ops have groups, or tell the manager to process them. input_ops = op_handler_util.get_input_ops(op, op_reg_manager) input_ops_without_group = op_handler_util.get_ops_without_groups( input_ops, op_reg_manager) # Check if all output ops have groups, or tell the manager to process them. output_ops = op_handler_util.get_output_ops(op, op_reg_manager) output_ops_without_group = op_handler_util.get_ops_without_groups( output_ops, op_reg_manager) # Remove non-passthrough ops from outputs ops to group with. output_ops = op_handler_util.remove_non_passthrough_ops( output_ops, op_reg_manager) # Only group with ops that have the same size. Process the ops that have # mismatched size. output_ops_to_group, output_ops_to_process = ( op_handler_util.separate_same_size_ops(op, output_ops)) # Also process ungrouped ops. input_ops_to_process = input_ops_without_group output_ops_to_process.extend(output_ops_without_group) # Align op slice sizes if needed. output_op_slices = op_handler_util.get_op_slices( output_ops_to_group, op_reg_manager) aligned_op_slice_sizes = op_handler_util.get_aligned_op_slice_sizes( op_slices, [], output_op_slices) op_handler_util.reslice_ops([op] + output_ops_to_group, aligned_op_slice_sizes, op_reg_manager) # Repopulate OpSlice data, as ops may have been resliced. output_op_slices = op_handler_util.get_op_slices( output_ops_to_group, op_reg_manager) # Group with outputs. op_handler_util.group_op_with_inputs_and_outputs( op, [], output_op_slices, aligned_op_slice_sizes, op_reg_manager) # Reprocess ops. op_reg_manager.process_ops(output_ops_to_process + input_ops_to_process) def create_regularizer(self, op_slice): """Create a regularizer for this conv2d OpSlice. Args: op_slice: op_regularizer_manager.OpSlice that is a conv2d OpSlice. Returns: OpRegularizer for this conv2d op. """ start_index = op_slice.slice.start_index size = op_slice.slice.size weights = op_slice.op.inputs[1] # Input 1 are the weights. weights = tpu_util.maybe_convert_to_variable(weights) reduce_dims = self._reduce_dims(op_slice.op) rank = len(weights.shape.as_list()) if rank != len(reduce_dims) + 1: raise ValueError('Rank %d incompatible with reduce_dims %s for op %s' % (rank, reduce_dims, op_slice.op.name)) def _slice_weights(): """Slices the weight tensor according to op_slice information.""" if rank == 2: if reduce_dims[0] == 0: return weights[:, start_index:start_index + size] else: return weights[start_index:start_index + size, :] if rank == 3: if 2 not in reduce_dims: return weights[:, :, start_index:start_index + size] if 1 not in reduce_dims: return weights[:, start_index:start_index + size, :] if 0 not in reduce_dims: return weights[start_index:start_index + size, :, :] if rank == 4: if 3 not in reduce_dims: return weights[:, :, :, start_index:start_index + size] if 2 not in reduce_dims: return weights[:, :, start_index:start_index + size, :] if 1 not in reduce_dims: return weights[:, start_index:start_index + size, :, :] if 0 not in reduce_dims: return weights[start_index:start_index + size, :, :, :] if rank == 5: if 4 not in reduce_dims: return weights[:, :, :, :, start_index:start_index + size] raise ValueError('Unsupported reduce_dim for rank 5 tensors (Conv3D)') raise ValueError('Unsupported rank or bad reduce_dim') weight_tensor = _slice_weights() # If OpSlice size matches tensor size, use the entire tensor. Otherwise, # slice the tensor accordingly. return group_lasso_regularizer.GroupLassoRegularizer( weight_tensor=weight_tensor, reduce_dims=self._reduce_dims(op_slice.op), threshold=self._threshold, l1_fraction=self._l1_fraction)
google-research/morph-net
morph_net/framework/group_lasso_base_op_handler.py
Python
apache-2.0
5,974
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A user-friendly interface for program.py.""" import collections import logging import os import socket import sys import tempfile import time import gflags import makani from makani.avionics.bootloader import generate_image from makani.avionics.common import aio from makani.avionics.common import aio_version from makani.avionics.linux.provision import process_helper from makani.avionics.linux.provision import ui_helper from makani.avionics.network import network_config from makani.lib.bazel import bazel_util from makani.lib.python import c_helpers import program _NETWORK_CONFIG = network_config.NetworkConfig() def IpToAioNode(ip): assert ip.startswith('192.168.1.'), ('Ip {} not in AIO range ' '192.168.1.0/24.'.format(ip)) final_octet = int(ip.split('.')[-1]) node_num = aio.aio_node_to_ip_address.FinalOctetToAioNode(final_octet) return _NETWORK_CONFIG.aio_nodes[node_num] def DetectAio(timeout=1.1): """Detect AIO nodes on the network, present all options if none detected.""" sources = aio.aio_node_helper.Names() types = aio.message_type_helper.Names() client = aio.AioClient(types, timeout=0.1, allowed_sources=sources) ip_list = [] version_list = [] timer_start = time.time() while time.time() - timer_start < timeout: try: ip, header, _ = client.Recv(accept_invalid=True) ip_list.append(ip) version_list.append(header.version) except socket.error: pass client.Close() if ip_list and version_list: # De-duplication using set conversion. ip_tuple, version_tuple = zip(*set(zip(ip_list, version_list))) return tuple([IpToAioNode(ip) for ip in ip_tuple]), version_tuple return tuple(), tuple() def Tms570NodeDict(): my_dict = collections.defaultdict(list) for node in _NETWORK_CONFIG.aio_nodes: if node.tms570_node or node.label_name == 'unknown': my_dict[node.label_name].append(node) return my_dict def NodeSelectMenu(dialog_ui, dialog_kwargs=None): """Ask the user to select and AIO node using a menu and sub-menu.""" kwargs = dialog_kwargs or {} aio_node_tree = Tms570NodeDict() top_menu = sorted([(key, key) for key in aio_node_tree]) sub_ok, select = dialog_ui.Menu('Select AIO node label:', top_menu, **kwargs) if not sub_ok: return None sub_menu = [(str(val.enum_value), val.snake_name) for val in aio_node_tree[select]] sub_ok, select = dialog_ui.Menu('Select AIO node:', sub_menu, **kwargs) if not sub_ok: return None return select def GetSerialRevisions(serial_type='aio'): yaml_file = program.SERIAL_PARAM_FILE_TEMPLATE.format(serial_type) return program.codec.DecodeYaml(open(yaml_file).read()).keys() def GetSerialTypes(): serial_type = [] filename_suffix = os.path.basename( program.SERIAL_PARAM_FILE_TEMPLATE.format('')) filename_dirname = os.path.dirname(program.SERIAL_PARAM_FILE_TEMPLATE) for filename in os.listdir(filename_dirname): if filename.endswith(filename_suffix): serial_type.append(filename.replace(filename_suffix, '')) return serial_type class MenuFunction(object): """A mapping dialog menu entries to functions.""" def __init__(self, dialog_ui, title=None): self.dialog_ui = dialog_ui self.title = title self.tag = [] self.name = [] self.func = [] self.args = [] def Register(self, name, func, func_args=None, tag=None): """Creates a mapping from a dialog menu entry to func.""" if not tag: tag = str(len(self.tag)) assert isinstance(tag, str), 'Invalid parameter: tag must be a str.' assert isinstance(name, str), 'Invalid parameter: name must be a str.' assert hasattr(func, '__call__'), ('Invalid parameter: func must have ' '__call__ attribute.') self.tag.append(tag) self.name.append(name) self.func.append(func) self.args.append(func_args or []) def Run(self, menu_text, dialog_kwargs=None): """Presents a dialog menu and runs the user-selected entry.""" kwargs = dialog_kwargs or {} if self.title: kwargs['title'] = self.title assert isinstance(menu_text, str), ('Invalid parameter: menu_text must be ' 'a str.') sub_ok, tag = self.dialog_ui.Menu(menu_text, zip(self.tag, self.name), **kwargs) if sub_ok: index = self.tag.index(tag) return sub_ok, self.func[index](*self.args[index]) return sub_ok, None class ProgramWrapper(object): """A wrapper for program.py operations which provides a UI using dialog.""" program_py_command = [ 'python', os.path.join(makani.HOME, 'avionics', 'bootloader', 'program.py')] def __init__(self): self.dialog_ui = ui_helper.UserInterface() self.AutoDetect() def AutoDetect(self): timeout = 1.1 while True: if self.DetectNodeAndVersion(detection_timeout=timeout): break else: ok = self.dialog_ui.YesNo('Please select a node from the list.', title='Node Scanner', yes_label='OK', no_label='Quit') if not ok: break if self.SelectNode(): break # User selected cancel in SelectNode menu, try harder to detect. else: timeout = 3.3 def DetectNodeAndVersion(self, detection_timeout=1.1): """Detect an AIO nodes, offer selection if more than one is found.""" ok = False while not ok: self.dialog_ui.Info('Scanning for AIO nodes...') try: detected_nodes, versions = DetectAio(timeout=detection_timeout) if detected_nodes: select_node_menu = [(str(node.enum_value), str(node.camel_name)) for node in detected_nodes] sentinel = str(detected_nodes[-1].enum_value + 1) select_node_menu.append((sentinel, 'Select from complete list')) ok, node_ind = self.dialog_ui.Menu('Please select an AIO node:', options=select_node_menu, cancel_label='Rescan') if node_ind == sentinel: return False else: return False except socket.error, e: self.dialog_ui.Message('Socket error: {}'.format(e), title='Node Scanner') return False self.select_node = detected_nodes[int(node_ind)] self.version = versions[int(node_ind)] return detected_nodes and ok def SelectNode(self): node_ind = NodeSelectMenu(self.dialog_ui, dialog_kwargs={'cancel_label': 'Re-scan'}) if node_ind: self.select_node = _NETWORK_CONFIG.aio_nodes[int(node_ind)] self.version = None return True else: return False def HasCarrierBoard(self): node_prefix = self.select_node.snake_name.partition('_')[0] return node_prefix not in ('cs', 'motor') def SerialParamSelect(self, hardware_type): """Present menu to select serial parameter revision, and serial_number.""" rev_list = sorted(GetSerialRevisions(hardware_type)) if 'common' in rev_list: rev_list.remove('common') # 'common' is not to be used on hardware. sub_ok, rev = self.dialog_ui.Menu('Select the hardware revision:', zip(rev_list, rev_list)) if not sub_ok: return None, None sub_ok, serial_number = self.dialog_ui.Input('Please enter the' ' serial number:') if not sub_ok: return None, None return rev, serial_number def RunDialogProcess(self, process, title): """Run process in an dialog progress-box with munged stdout & stderr.""" with tempfile.NamedTemporaryFile() as temp_file: def WriteAndDisplay(line): temp_file.write(line) temp_file.flush() self.dialog_ui.dialog_instance.progressbox(file_path=temp_file.name, title=title, width=100, height=30) return process_helper.RunProcess(process, parse_stdout=WriteAndDisplay, parse_stderr=WriteAndDisplay) def GetFirmwareFiles(self, file_suffix): """Returns a list of firmware files with file_suffix removed.""" assert isinstance(self.select_node, network_config.AioNode) firmware_path = os.path.join( bazel_util.GetTms570BinDirectory(), 'avionics', os.path.dirname(self.select_node.application_path)) matching_files = [] for filename in os.listdir(firmware_path): if filename.endswith(file_suffix): matching_files.append(filename.replace(file_suffix, '')) return matching_files def GetConfigNames(self): """Returns a list of config file short-names compiled for select_node.""" return self.GetFirmwareFiles('_config_params.bin') def GetCalibNames(self): """Returns a list of calib file short-names compiled for select_node.""" return self.GetFirmwareFiles('_calib_params.bin') def RenameNode(self): """Rename the node from node to a user-selected node type.""" rename_to = NodeSelectMenu(self.dialog_ui) if rename_to: rename_to = _NETWORK_CONFIG.aio_nodes[int(rename_to)] rc, stdout, stderr = self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name, '--rename_to', rename_to.snake_name], title='Renaming node...') if rc == 0: self.select_node = rename_to self.UpdateVersion(timeout=3) return rc, stdout, stderr return -1, '', 'User cancelled.' def ProgramApplication(self): """Program the application on select_node.""" rc, stdout, stderr = self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name], title='Programming application...') return rc, stdout, stderr def ProgramBootloader(self): """Program the bootloader on select_node.""" rc, stdout, stderr = self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name, '--bootloader'], title='Programming bootloader...') return rc, stdout, stderr def ProgramSerial(self): """Program the serial number into select_node.""" hardware_type = self.SerialTypeMenu(is_carrier=False) if not hardware_type: return -1, '', 'User cancelled' rev, serial_number = self.SerialParamSelect(hardware_type) if not rev or not serial_number: return -1, '', 'User cancelled.' rc, stdout, stderr = self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name, '--serial', hardware_type, rev, serial_number], title='Programming serial...') return rc, stdout, stderr def ProgramCarrierSerial(self): """Program the carrier serial board onto select_node.""" node_prefix = self.select_node.snake_name.partition('_')[0] if node_prefix in ['cs', 'motor']: self.dialog_ui.Message('{} type nodes do not have carriers.'.format( node_prefix), title='Carrier Serial Programmer') else: carrier_type = self.SerialTypeMenu(is_carrier=True) if not carrier_type: return -1, '', 'User cancelled.' rev, serial_number = self.SerialParamSelect(carrier_type) if not rev or not serial_number: return -1, '', 'User cancelled.' rc, stdout, stderr = self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name, '--carrier_serial', carrier_type, rev, serial_number], title='Programming carrier serial...') return rc, stdout, stderr def ProgramCalib(self): """Program the calib params into select_node.""" calibs = self.GetCalibNames() if not calibs: return (-1, '', 'No valid calibrations for ' '{}.'.format(self.select_node.snake_name)) sub_ok, calib_name = self.dialog_ui.Menu('Select calibration:', zip(calibs, calibs)) if sub_ok: return self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name, '--calib', calib_name], title='Programming calib...') return -1, '', 'User cancelled.' def ProgramConfig(self): """Program the config params into select_node.""" configs = self.GetConfigNames() if not configs: return (-1, '', 'No valid configs for {}.'.format(self.select_node.snake_name)) sub_ok, config_name = self.dialog_ui.Menu('Select config:', zip(configs, configs)) if sub_ok: return self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name, '--config', config_name], title='Programming config...') return -1, '', 'User cancelled.' def UpgradeBootloader(self): """Upgrade the bootloader on select_node.""" return self.RunDialogProcess( self.program_py_command + [self.select_node.snake_name, '--upgrade_bootloader'], title='Upgrading bootloader...') def CheckConsole(self): """Display the Stdio and SelfTest messages from select_node using dialog.""" sources = [self.select_node.enum_name] types = ['kMessageTypeStdio', 'kMessageTypeSelfTest'] self.dialog_ui.Info('Watching for Stdio messages...(3s)', title='Console Checker') try: client = aio.AioClient(types, timeout=3, allowed_sources=sources) _, _, message = client.Recv(accept_invalid=True) self.dialog_ui.Message(getattr(message, 'text', repr(message)), title='Console Message') except socket.timeout, e: return -1, '', 'No Stdio messages found.' except socket.error, e: if str(e) == 'timed out': return -1, '', 'No Stdio messages found.' raise finally: client.Close() return 0, '', '' def UpdateVersion(self, timeout=1): """Get AIO version from header of any packet from select_node.""" sources = [self.select_node.enum_name] types = aio.message_type_helper.Names() self.dialog_ui.Info('Watching for messages...({}s)'.format(timeout), title='Version Checker') try: client = aio.AioClient(types, timeout=timeout, allowed_sources=sources) _, header, _ = client.Recv(accept_invalid=True) except socket.timeout as e: self.version = None return -1, '', str(e) except socket.error as e: if str(e) == 'timed out': self.version = None return -1, '', str(e) else: raise else: self.version = str(header.version) finally: client.Close() return 0, '', '' def SerialTypeMenu(self, is_carrier=False): """Ask the user to select a hardware type of select_node. The menu options are derived from serial_params files. Args: is_carrier: Set to True if flashing serial params to a carrier board. Returns: Selected hardware type or None if the user cancelled. """ default = 'aio' serial_types = GetSerialTypes() node_prefix = self.select_node.snake_name.partition('_')[0] if is_carrier: menu_text = 'Select carrier hardware type:' serial_types.remove('aio') # Carrier cannot be AIO node. serial_types.remove('motor') serial_types.remove('cs') if node_prefix in serial_types: default = node_prefix else: if (node_prefix in serial_types and not self.HasCarrierBoard()): default = node_prefix menu_text = 'Select hardware type:' serial_types.sort() sub_ok, serial_type = self.dialog_ui.Menu(menu_text, zip(serial_types, serial_types), default_item=default) if sub_ok: return serial_type def HardwareIdentityMenu(self): """Ask the user to select a hardware identity of select_node. The menu options are derived from identity_types.h. Returns: Selected hardware identity or None if the user cancelled. """ default = 'aio' node_prefix = self.select_node.snake_name.partition('_')[0] hardware_types = [c_helpers.CamelToSnake(camel) for camel in generate_image.hardware_type_helper.ShortNames()] hardware_types.remove('unknown') if (node_prefix in hardware_types and not self.HasCarrierBoard()): default = node_prefix rc, hardware_type = self.dialog_ui.Menu( 'Select hardware type:', zip(hardware_types, hardware_types), default_item=default) if rc: return hardware_type def ToggleJlink(self): """Toggles the --jlink flag for program.py, creates a new menu.""" if '--jlink' not in self.program_py_command: hardware_type = self.HardwareIdentityMenu() if hardware_type: self.program_py_command.append('--jlink') self.program_py_command += ['--force_hardware', hardware_type] else: self.program_py_command = [ 'python', os.path.join(makani.HOME, 'avionics', 'bootloader', 'program.py')] return 0, '', '' def CreateMenu(program_wrapper): """Creates a MenuFunction instance and returns it.""" jlink_mode = '--jlink' in program_wrapper.program_py_command new_menu = MenuFunction(program_wrapper.dialog_ui, 'Program.py Menu') new_menu.Register('Program Application', program_wrapper.ProgramApplication) new_menu.Register('Program Bootloader', program_wrapper.ProgramBootloader) if not jlink_mode: new_menu.Register('Rename Node', program_wrapper.RenameNode) new_menu.Register('Program Serial', program_wrapper.ProgramSerial) if not jlink_mode and program_wrapper.HasCarrierBoard(): new_menu.Register('Program Carrier Serial', program_wrapper.ProgramCarrierSerial) new_menu.Register('Program Config', program_wrapper.ProgramConfig) new_menu.Register('Program Calib', program_wrapper.ProgramCalib) if not jlink_mode: new_menu.Register('Upgrade Bootloader', program_wrapper.UpgradeBootloader) new_menu.Register('Check Console', program_wrapper.CheckConsole) new_menu.Register('Check Version', program_wrapper.UpdateVersion) new_menu.Register('Toggle Jlink', program_wrapper.ToggleJlink) return new_menu def main(): logging.basicConfig(level=logging.WARNING) flags = gflags.FLAGS try: _ = flags(sys.argv) except gflags.FlagsError, e: print ('%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], flags)) sys.exit(1) program_py = ProgramWrapper() assert hasattr(program_py, 'select_node'), 'No node was selected.' ok = True while ok: menu_title = 'Node: {}\n'.format(program_py.select_node.snake_name) if program_py.version: menu_title += 'Detected AIO Version: {}\n'.format(program_py.version) menu_title += 'Compiled AIO Version: {}\n'.format(aio_version.AIO_VERSION) menu_title += 'Using jlink: {}'.format('--jlink' in program_py.program_py_command) ok, result = CreateMenu(program_py).Run(menu_title, dialog_kwargs={'cancel_label': 'Quit'}) if ok and result[0] != 0: # Replace \\n with spaces, or dialog will assume that all newlines are # escaped twice and the output won't have newlines where it should. program_py.dialog_ui.Message(('Failure (process returned {}):\n' '{}').format(result[0], result[2].replace('\\n', ' ')), title='Error') if __name__ == '__main__': main()
google/makani
avionics/bootloader/program_ui.py
Python
apache-2.0
20,711