code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
import unittest import mock from greenpithumb import light_sensor class LightSensorTest(unittest.TestCase): def setUp(self): self.mock_adc = mock.Mock() channel = 1 self.light_sensor = light_sensor.LightSensor(self.mock_adc, channel) def test_light_50_pct(self): """Near midpoint light sensor value should return near 50.""" self.mock_adc.read_adc.return_value = 511 self.assertAlmostEqual(self.light_sensor.light(), 50.0, places=1) def test_ambient_light_too_low(self): """Light sensor value less than min should raise a ValueError.""" with self.assertRaises(light_sensor.LightSensorLowError): self.mock_adc.read_adc.return_value = ( light_sensor._LIGHT_SENSOR_MIN_VALUE - 1) self.light_sensor.light()
JeetShetty/GreenPiThumb
tests/test_light_sensor.py
Python
apache-2.0
826
#!/usr/bin/env python # encoding: utf-8 """ Copyright 2011, 2012 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os, time, logging, threading, Queue class Threadable: def __init__(self,name="tvod.util.Threadable"): self._exit = False self._name = name # properties def get_exit(self): return self._exit def set_exit(self,value): assert isinstance(value,bool) self._exit = value def get_name(self): return self._name exit = property(get_exit,set_exit) name = property(get_name) # methods def runloop(self): while not self.exit: self.dowork() time.sleep(0.5) def dowork(self): pass class WatchFolder(Threadable): def __init__(self,path,timedelta=10.0): assert os.path.isdir(path) assert timedelta > 0 Threadable.__init__(self,name="<WatchFolder %s>" % os.path.basename(path)) self._path = path self._files = dict() self._timedelta = timedelta self._scantime = None # properties def get_path(self): return self._path path = property(get_path) # methods def scan(self): # scan for new files new_files = dict([(f, None) for f in os.listdir(self._path)]) added_files = [f for f in new_files if (not f in self._files and self.valid_filename(f))] deleted_files = [f for f in self._files if (not f in new_files and self.valid_filename(f))] # report on changes if added_files: self.added(added_files) if deleted_files: self.deleted(deleted_files) # reset files self._files = new_files def dowork(self): if self._scantime==None or (time.time() - self._scantime) > self._timedelta: logging.debug("SCANNING") self.scan() self._scantime = time.time() # These are the messages which need to be overridden def valid_filename(self,filename): if filename.startswith('.') or filename.endswith('~'): return False return True def added(self,paths): pass def deleted(self,paths): pass class XMLWatchFolder(WatchFolder): def __init__(self,path,queue): assert isinstance(queue,Queue.Queue) WatchFolder.__init__(self,path) self.queue = queue def valid_filename(self,filename): if not WatchFolder.valid_filename(self,filename): return False (basename,extension) = os.path.splitext(filename) if extension.lower() != ".xml": return False return True def added(self,filenames): for f in filenames: path = os.path.join(self.path,f) if not (os.path.exists(path) and os.path.getsize(path)): logging.debug("Ignoring non-existent or empty file: %s" % path) continue if not os.path.isfile(path): continue logging.debug("ADDED: %s/%s" % (os.path.basename(self.path),f)) self.queue.put(WorkerItem(path)) def deleted(self,filenames): for f in filenames: logging.debug("DELETED: %s/%s" % (os.path.basename(self.path),f)) class Worker(Threadable): def __init__(self,queue,number=0): assert isinstance(queue,Queue.Queue) Threadable.__init__(self,name="<Worker %d>" % number) self._queue = queue # methods def dowork(self): try: item = self._queue.get(True,0.5) assert isinstance(item,WorkerItem) self.process(item) except Queue.Empty, e: pass def process(self,item): pass class WorkerItem: def __init__(self,path): assert path self._path = path # properties def get_path(self): return self._path path = property(get_path) # convert into a string def __str__(self): return "<WorkerItem %s>" % self._path
tectronics/tvod-integrate
lib/python/tvod/util.py
Python
apache-2.0
3,896
import unreal_engine as ue import json class FilmActor: def begin_play(self): self.pawn = self.uobject.get_owner() def getjson(self): ue.log("@@@@video getting json:") loc = self.uobject.get_actor_location() rot = self.uobject.get_actor_forward() data = { "x":loc.x,"y":loc.y,"z":loc.z, "rx":rot.x, "ry":rot.y, "rz": rot.z } return json.dumps(data) def addtoworld(self): ue.log("@@@@video add to world") return "" def setjson(self,js): ue.log("@@@@video setting json:") data = json.loads(js) loc = self.uobject.get_actor_location() loc.x = data["x"] loc.y = data["y"] loc.z = data["z"] self.uobject.set_actor_location(loc) rot = self.uobject.get_actor_forward() return True def tick(self, delta_time): pass
meahmadi/ThreeDHighway
Content/Scripts/FilmActor.py
Python
apache-2.0
767
#!/usr/bin/python """ Example from pybedtools documentation (:ref:`third example`) to count \ reads in introns and exons using multiple CPUs. """ from __future__ import print_function import pybedtools import argparse import os import sys import multiprocessing def featuretype_filter(feature, featuretype): """ Only passes features with the specified *featuretype* """ if feature[2] == featuretype: return True return False def subset_featuretypes(featuretype): return g.filter(featuretype_filter, featuretype).saveas() def count_reads_in_features(features): """ Callback function to count reads in features """ return features.intersect(abam=bam, b=features.fn, s=stranded, bed=True, stream=True).count() def main(): """ Third quick example from the documentation -- count reads introns and exons, in parallel """ ap = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]), usage=__doc__) ap.add_argument('--gff', required=True, help='GFF or GTF file containing annotations') ap.add_argument('--bam', required=True, help='BAM file containing reads to be counted') ap.add_argument('--stranded', action='store_true', help='Use strand-specific merging and overlap. ' 'Default is to ignore strand') ap.add_argument('--no-parallel', dest='noparallel', action='store_true', help='Disables parallel computation') ap.add_argument('-o', '--output', help='Optional file to which results will be written; ' 'default is stdout') ap.add_argument('-v', '--verbose', action='store_true', help='Verbose (goes to stderr)') args = ap.parse_args() gff = args.gff bam = args.bam stranded = args.stranded parallel = not args.noparallel # Some GFF files have invalid entries -- like chromosomes with negative # coords or features of length = 0. This line removes them and saves the # result in a tempfile g = pybedtools.BedTool(gff).remove_invalid().saveas() # Decide which version of map to use. If parallel, we only need 3 # processes. pool = multiprocessing.Pool(processes=3) # Get separate files for introns and exons in parallel (if specified) featuretypes = ('intron', 'exon') introns, exons = pool.map(subset_featuretypes, featuretypes) # Perform some genome algebra to get unique and shared regions exon_only = exons.subtract(introns).merge().remove_invalid().saveas() intron_only = introns.subtract(exons).merge().remove_invalid().saveas() intron_and_exon = exons\ .intersect(introns).merge().remove_invalid().saveas() # Do intersections with BAM file in parallel features = (exon_only, intron_only, intron_and_exon) results = pool.map(count_reads_in_features, features) labels = (' exon only:', ' intron only:', 'intron and exon:') for label, reads in zip(labels, results): print('%s %s' % (label, reads)) pybedtools.cleanup(verbose=False) if __name__ == "__main__": main()
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/EGG-INFO/scripts/intron_exon_reads.py
Python
apache-2.0
3,362
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Author: mcxiaoke # @Date: 2018-01-26 from __future__ import print_function, unicode_literals, absolute_import import json import sys import os import re import time import shutil import random import mimetypes import imghdr import traceback import json import redis import logging import requests from requests.exceptions import RequestException TYPE_CAT = 'cats' TYPE_DOG = 'dogs' TYPE_OTHER = 'others' SOURCE_ROOT = os.path.join('..', 'images') TWO_HOUR_EXPIRE = 60*60*2 # in seconds MEDIA_ID_EXPIRE = TWO_HOUR_EXPIRE * 35 # in seconds ACCESS_TOKEN_KEY = 'wechat:token:v1:%s' MEDIA_ID_KEY = 'wechat:media_ids:v1:%s' MEDIA_ID_OUTPUT = 'data' MEDIA_ID_USER_KEY = 'wechat:media_ids:user:v1:%s:%s' MEDIA_ID_FILE = 'media_ids_v1_%s.txt' UPLOAD_IMAGE_URL = 'https://api.weixin.qq.com/cgi-bin/media/upload?access_token=%s&type=image' GET_TOKEN_URL = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' logging.basicConfig(level=logging.INFO) logger = logging.getLogger('MediaStore') def get_wechat_access_token(app_id, app_secret): url = GET_TOKEN_URL % (app_id, app_secret) logger.info('get_wechat_access_token url=%s' % url) response = requests.get(url) response.encoding = 'utf-8' logger.info('get_wechat_access_token result=%s' % response.json()) return response.json()['access_token'] class MediaStore(object): _redis = redis.StrictRedis(decode_responses=True) def __init__(self, name, app_id, app_secret, r=_redis, expire=MEDIA_ID_EXPIRE): assert name, 'name can not be None' assert app_id, 'app_id can not be None' assert app_secret, 'app_secret can not be None' self.name = name self.app_id = app_id self.app_secret = app_secret self.expire = expire self.r = r logger.debug('__init__ name=%s app_id=%s, app_secret=%s' % (name, app_id, app_secret)) def _get_media_key(self, type_name=''): return MEDIA_ID_KEY % type_name def _get_media_file(self, type_name=''): return os.path.join(MEDIA_ID_OUTPUT, MEDIA_ID_FILE % type_name) def _get_user_key(self, user_id, type_name=''): return MEDIA_ID_USER_KEY % (type_name, user_id) def _get_access_token(self): token = self.r.get(ACCESS_TOKEN_KEY % self.app_id) if not token: token = get_wechat_access_token(self.app_id, self.app_secret) logger.info('get_wechat_access_token token=%s' % token) if token: self.r.set(ACCESS_TOKEN_KEY % self.app_id, token) self.r.expire(ACCESS_TOKEN_KEY % self.app_id, TWO_HOUR_EXPIRE) return token def clear_media_ids(self, type_name=''): logger.info('clear_media_ids type=%s' % type_name) self.r.delete(self._get_media_key(type_name)) def save_media_ids(self, media_ids, type_name='', replace=True): if media_ids: with open(self._get_media_file(type_name), 'w') as f: f.write('\n'.join(media_ids)) key = self._get_media_key(type_name) if replace: self.r.delete(key) rt = self.r.sadd(key, *media_ids) self.r.expire(key, self.expire) logger.info('save_media_ids %s media ids saved %s' % (self.media_ids_length(type_name), rt)) return media_ids def upload_image(self, filepath): token = self._get_access_token() if not token: raise IOError('token is None') url = UPLOAD_IMAGE_URL % token files = {'media': open(filepath, 'rb')} try: response = requests.post(url, files=files) response.encoding = 'utf-8' return response.json()['media_id'] except RequestException as e: logger.error('upload_image error=%s' % e) def upload_images(self, source_dir, type_name='', max_count=100): if not source_dir or not os.path.isdir(source_dir): return logger.info('upload_images [%s] for type [%s]' % (source_dir, type_name)) names = os.listdir(source_dir) if len(names) > max_count: names = random.sample(names, max_count) count = 0 mids = [] for name in names: filepath = os.path.join(source_dir, name) filepath = os.path.abspath(filepath) mime_type, _ = mimetypes.guess_type(name) if mime_type not in ['image/jpeg', 'image/png', 'image/gif']: logger.warning('upload_images invalid=%s' % filepath) continue logger.info('upload_images file=%s' % filepath) media_id = self.upload_image(filepath) if media_id: logger.info('upload_images result=%s' % media_id) mids.append(media_id) count += 1 if count > max_count: break self.save_media_ids(mids, type_name) def random_user_media_id(self, user_id=None, type_name=''): if not user_id: return self.random_media_id(type_name) media_key = self._get_media_key(type_name) user_key = self._get_user_key(user_id, type_name) mids = self.r.sdiff(media_key, user_key) mid = None if mids: mid = random.choice(list(mids)) if mid: self.r.sadd(user_key, mid) self.r.expire(user_key, self.expire) if not mid: self.r.delete(user_key) mid = self.random_media_id(type_name) logger.debug('random_user_media_id user_id=%s result=%s' % (user_id, mid)) return mid def all_media_ids(self, type_name=''): return self.r.smembers(self._get_media_key(type_name)) def media_ids_length(self, type_name=''): return self.r.scard(self._get_media_key(type_name)) def random_media_id(self, type_name=''): return self.r.srandmember(self._get_media_key(type_name)) from config import WECHAT_APPID, WECHAT_APPSECRET, WECHAT2_APPID, WECHAT2_APPSECRET store1 = MediaStore('Cat', WECHAT_APPID, WECHAT_APPSECRET) store2 = MediaStore('Miu', WECHAT2_APPID, WECHAT2_APPSECRET) def update_app(store, root=SOURCE_ROOT): for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER): source_dir = os.path.join(root, type_name) store.upload_images(source_dir, type_name) def update_all(root=SOURCE_ROOT): check_all(root) update_app(store1, root) update_app(store2, root) def check_all(root=SOURCE_ROOT): for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER): source_dir = os.path.abspath(os.path.join(root, type_name)) if not os.path.exists(source_dir): print('ERROR: check_all source dir [%s] not exists' % source_dir) exit(1) if not os.path.isdir(source_dir): print('ERROR: check_all source dir [%s] not directory' % source_dir) exit(2) if not os.listdir(source_dir): print('ERROR: check_all source dir [%s] is empty' % source_dir) exit(2) print('all directories exists, check passed.') def test_all(): for store in [store1, store2]: for type_name in (TYPE_CAT, TYPE_DOG, TYPE_OTHER): print('\n[Store:%s] found %s values for type %s, read test:' % (store.name, store.media_ids_length(type_name), type_name)) for i in range(0, 10): print(store1.random_user_media_id('test', type_name)) for i in range(0,10): assert store1.random_user_media_id('test', type_name), 'No media id found' assert store1.random_media_id(type_name), 'No media id found' print('all tests passed.') if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( prog='wechat_uploader', description='WeChat Images Uploader v0.1.0') parser.add_argument('-c', '--check', action="store_true", help='check source dir') parser.add_argument('-t', '--test', action="store_true", help='test read media id') parser.add_argument('-u', '--upload', action="store_true", help='upload all images') parser.add_argument('-s', '--source', help='images source directory') args = parser.parse_args() # print(args) source_dir = args.source or SOURCE_ROOT if args.check: check_all(source_dir) elif args.upload: update_all(source_dir) elif args.test: test_all() else: parser.print_help()
mcxiaoke/python-labs
scripts/wechat_upload.py
Python
apache-2.0
8,692
"""The auto-tuning module of tvm This module includes: * Tuning space definition API * Efficient auto-tuners * Tuning result and database support * Distributed measurement to scale up tuning """ from . import database from . import feature from . import measure from . import record from . import task from . import tuner from . import util from . import env from . import tophub # some shortcuts from .measure import measure_option, MeasureInput, MeasureResult, MeasureErrorNo, \ LocalBuilder, LocalRunner, RPCRunner from .tuner import callback from .task import template, get_config, create, ConfigSpace, ConfigEntity, \ register_topi_compute, register_topi_schedule, \ DispatchContext, FallbackContext, ApplyHistoryBest as apply_history_best from .env import GLOBAL_SCOPE
mlperf/training_results_v0.6
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/autotvm/__init__.py
Python
apache-2.0
794
sum=0 for x in range(0,1000): if(x%3==0 or x%5==0): sum+=x print(sum)
scottnm/ProjectEuler
python/Problem1-Multiples_of_3_&_5.py
Python
apache-2.0
82
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import hashlib import json from dataclasses import dataclass from enum import Enum from typing import Any, Callable, Iterable, Set, TypeVar from pkg_resources import Requirement from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints from pants.util.ordered_set import FrozenOrderedSet BEGIN_LOCKFILE_HEADER = b"# --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---" END_LOCKFILE_HEADER = b"# --- END PANTS LOCKFILE METADATA ---" _concrete_metadata_classes: dict[int, type[LockfileMetadata]] = {} def _lockfile_metadata_version( version: int, ) -> Callable[[type[LockfileMetadata]], type[LockfileMetadata]]: """Decorator to register a Lockfile metadata version subclass with a given version number. The class must be a frozen dataclass """ def _dec(cls: type[LockfileMetadata]) -> type[LockfileMetadata]: # Only frozen dataclasses may be registered as lockfile metadata: cls_dataclass_params = getattr(cls, "__dataclass_params__", None) if not cls_dataclass_params or not cls_dataclass_params.frozen: raise ValueError( "Classes registered with `_lockfile_metadata_version` may only be " "frozen dataclasses" ) _concrete_metadata_classes[version] = cls return cls return _dec class InvalidLockfileError(Exception): pass @dataclass(frozen=True) class LockfileMetadata: """Base class for metadata that is attached to a given lockfiles. This class, and provides the external API for serializing, deserializing, and validating the contents of individual lockfiles. New versions of metadata implement a concrete subclass and provide deserialization and validation logic, along with specialist serialization logic. To construct an instance of the most recent concrete subclass, call `LockfileMetadata.new()`. """ _LockfileMetadataSubclass = TypeVar("_LockfileMetadataSubclass", bound="LockfileMetadata") valid_for_interpreter_constraints: InterpreterConstraints @staticmethod def new( valid_for_interpreter_constraints: InterpreterConstraints, requirements: set[Requirement], ) -> LockfileMetadata: """Call the most recent version of the `LockfileMetadata` class to construct a concrete instance. This static method should be used in place of the `LockfileMetadata` constructor. This gives calling sites a predictable method to call to construct a new `LockfileMetadata` for writing, while still allowing us to support _reading_ older, deprecated metadata versions. """ return LockfileMetadataV2(valid_for_interpreter_constraints, requirements) @staticmethod def from_lockfile( lockfile: bytes, lockfile_path: str | None = None, resolve_name: str | None = None ) -> LockfileMetadata: """Parse all relevant metadata from the lockfile's header.""" in_metadata_block = False metadata_lines = [] for line in lockfile.splitlines(): if line == BEGIN_LOCKFILE_HEADER: in_metadata_block = True elif line == END_LOCKFILE_HEADER: break elif in_metadata_block: metadata_lines.append(line[2:]) error_suffix = ( "To resolve this error, you will need to regenerate the lockfile by running " "`./pants generate-lockfiles" ) if resolve_name: error_suffix += " --resolve={tool_name}" error_suffix += "`." if lockfile_path is not None and resolve_name is not None: lockfile_description = f"the lockfile `{lockfile_path}` for `{resolve_name}`" elif lockfile_path is not None: lockfile_description = f"the lockfile `{lockfile_path}`" elif resolve_name is not None: lockfile_description = f"the lockfile for `{resolve_name}`" else: lockfile_description = "this lockfile" if not metadata_lines: raise InvalidLockfileError( f"Could not find a Pants metadata block in {lockfile_description}. {error_suffix}" ) try: metadata = json.loads(b"\n".join(metadata_lines)) except json.decoder.JSONDecodeError: raise InvalidLockfileError( f"Metadata header in {lockfile_description} is not a valid JSON string and can't " "be decoded. " + error_suffix ) concrete_class = _concrete_metadata_classes[metadata["version"]] return concrete_class._from_json_dict(metadata, lockfile_description, error_suffix) @classmethod def _from_json_dict( cls: type[_LockfileMetadataSubclass], json_dict: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> _LockfileMetadataSubclass: """Construct a `LockfileMetadata` subclass from the supplied JSON dict. *** Not implemented. Subclasses should override. *** `lockfile_description` is a detailed, human-readable description of the lockfile, which can be read by the user to figure out which lockfile is broken in case of an error. `error_suffix` is a string describing how to fix the lockfile. """ raise NotImplementedError( "`LockfileMetadata._from_json_dict` should not be directly " "called." ) def add_header_to_lockfile(self, lockfile: bytes, *, regenerate_command: str) -> bytes: metadata_dict = self._header_dict() metadata_json = json.dumps(metadata_dict, ensure_ascii=True, indent=2).splitlines() metadata_as_a_comment = "\n".join(f"# {l}" for l in metadata_json).encode("ascii") header = b"%b\n%b\n%b" % (BEGIN_LOCKFILE_HEADER, metadata_as_a_comment, END_LOCKFILE_HEADER) regenerate_command_bytes = ( f"# This lockfile was autogenerated by Pants. To regenerate, run:\n#\n" f"# {regenerate_command}" ).encode() return b"%b\n#\n%b\n\n%b" % (regenerate_command_bytes, header, lockfile) def _header_dict(self) -> dict[Any, Any]: """Produce a dictionary to be serialized into the lockfile header. Subclasses should call `super` and update the resulting dictionary. """ version: int for ver, cls in _concrete_metadata_classes.items(): if isinstance(self, cls): version = ver break else: raise ValueError("Trying to serialize an unregistered `LockfileMetadata` subclass.") return { "version": version, "valid_for_interpreter_constraints": [ str(ic) for ic in self.valid_for_interpreter_constraints ], } def is_valid_for( self, expected_invalidation_digest: str | None, user_interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str], user_requirements: Iterable[Requirement] | None, ) -> LockfileMetadataValidation: """Returns Truthy if this `LockfileMetadata` can be used in the current execution context.""" raise NotImplementedError("call `is_valid_for` on subclasses only") @_lockfile_metadata_version(1) @dataclass(frozen=True) class LockfileMetadataV1(LockfileMetadata): requirements_invalidation_digest: str @classmethod def _from_json_dict( cls: type[LockfileMetadataV1], json_dict: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> LockfileMetadataV1: metadata = _get_metadata(json_dict, lockfile_description, error_suffix) interpreter_constraints = metadata( "valid_for_interpreter_constraints", InterpreterConstraints, InterpreterConstraints ) requirements_digest = metadata("requirements_invalidation_digest", str, None) return LockfileMetadataV1(interpreter_constraints, requirements_digest) def _header_dict(self) -> dict[Any, Any]: d = super()._header_dict() d["requirements_invalidation_digest"] = self.requirements_invalidation_digest return d def is_valid_for( self, expected_invalidation_digest: str | None, user_interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str], _: Iterable[Requirement] | None, # User requirements are not used by V1 ) -> LockfileMetadataValidation: failure_reasons: set[InvalidLockfileReason] = set() if expected_invalidation_digest is None: return LockfileMetadataValidation(failure_reasons) if self.requirements_invalidation_digest != expected_invalidation_digest: failure_reasons.add(InvalidLockfileReason.INVALIDATION_DIGEST_MISMATCH) if not self.valid_for_interpreter_constraints.contains( user_interpreter_constraints, interpreter_universe ): failure_reasons.add(InvalidLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH) return LockfileMetadataValidation(failure_reasons) @_lockfile_metadata_version(2) @dataclass(frozen=True) class LockfileMetadataV2(LockfileMetadata): """Lockfile version that permits specifying a requirements as a set rather than a digest. Validity is tested by the set of requirements strings being the same in the user requirements as those in the stored requirements. """ requirements: set[Requirement] @classmethod def _from_json_dict( cls: type[LockfileMetadataV2], json_dict: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> LockfileMetadataV2: metadata = _get_metadata(json_dict, lockfile_description, error_suffix) requirements = metadata( "generated_with_requirements", Set[Requirement], lambda l: {Requirement.parse(i) for i in l}, ) interpreter_constraints = metadata( "valid_for_interpreter_constraints", InterpreterConstraints, InterpreterConstraints ) return LockfileMetadataV2(interpreter_constraints, requirements) def _header_dict(self) -> dict[Any, Any]: out = super()._header_dict() # Requirements need to be stringified then sorted so that tests are deterministic. Sorting # followed by stringifying does not produce a meaningful result. out["generated_with_requirements"] = ( sorted(str(i) for i in self.requirements) if self.requirements is not None else None ) return out def is_valid_for( self, _: str | None, # Validation digests are not used by V2; this param will be deprecated user_interpreter_constraints: InterpreterConstraints, interpreter_universe: Iterable[str], user_requirements: Iterable[Requirement] | None, ) -> LockfileMetadataValidation: failure_reasons: set[InvalidLockfileReason] = set() if user_requirements is None: return LockfileMetadataValidation(failure_reasons) if self.requirements != set(user_requirements): failure_reasons.add(InvalidLockfileReason.REQUIREMENTS_MISMATCH) if not self.valid_for_interpreter_constraints.contains( user_interpreter_constraints, interpreter_universe ): failure_reasons.add(InvalidLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH) return LockfileMetadataValidation(failure_reasons) def calculate_invalidation_digest(requirements: Iterable[str]) -> str: """Returns an invalidation digest for the given requirements.""" m = hashlib.sha256() inputs = { # `FrozenOrderedSet` deduplicates while keeping ordering, which speeds up the sorting if # the input was already sorted. "requirements": sorted(FrozenOrderedSet(requirements)), } m.update(json.dumps(inputs).encode("utf-8")) return m.hexdigest() class InvalidLockfileReason(Enum): INVALIDATION_DIGEST_MISMATCH = "invalidation_digest_mismatch" INTERPRETER_CONSTRAINTS_MISMATCH = "interpreter_constraints_mismatch" REQUIREMENTS_MISMATCH = "requirements_mismatch" class LockfileMetadataValidation: """Boolean-like value which additionally carries reasons why a validation failed.""" failure_reasons: set[InvalidLockfileReason] def __init__(self, failure_reasons: Iterable[InvalidLockfileReason] = ()): self.failure_reasons = set(failure_reasons) def __bool__(self): return not self.failure_reasons T = TypeVar("T") def _get_metadata( metadata: dict[Any, Any], lockfile_description: str, error_suffix: str, ) -> Callable[[str, type[T], Callable[[Any], T] | None], T]: """Returns a function that will get a given key from the `metadata` dict, and optionally do some verification and post-processing to return a value of the correct type.""" def get_metadata(key: str, type_: type[T], coerce: Callable[[Any], T] | None) -> T: val: Any try: val = metadata[key] except KeyError: raise InvalidLockfileError( f"Required key `{key}` is not present in metadata header for " f"{lockfile_description}. {error_suffix}" ) if not coerce: if isinstance(val, type_): return val raise InvalidLockfileError( f"Metadata value `{key}` in {lockfile_description} must " f"be a {type(type_).__name__}. {error_suffix}" ) else: try: return coerce(val) except Exception: raise InvalidLockfileError( f"Metadata value `{key}` in {lockfile_description} must be able to " f"be converted to a {type(type_).__name__}. {error_suffix}" ) return get_metadata
patricklaw/pants
src/python/pants/backend/python/util_rules/lockfile_metadata.py
Python
apache-2.0
14,210
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import distutils.version as dist_version import os import sys from dragon.db.sqlalchemy.session import get_engine from dragon.db import migration import sqlalchemy import migrate from migrate.versioning import util as migrate_util from dragon.openstack.common import exception from dragon.openstack.common.gettextutils import _ _REPOSITORY = None @migrate_util.decorator def patched_with_engine(f, *a, **kw): url = a[0] engine = migrate_util.construct_engine(url, **kw) try: kw['engine'] = engine return f(*a, **kw) finally: if isinstance(engine, migrate_util.Engine) and engine is not url: migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) engine.dispose() # TODO(jkoelker) When migrate 0.7.3 is released and nova depends # on that version or higher, this can be removed MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') if (not hasattr(migrate, '__version__') or dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): migrate_util.with_engine = patched_with_engine # NOTE(jkoelker) Delay importing migrate until we are patched from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository try: from migrate.versioning import exceptions as versioning_exceptions except ImportError: try: from migrate import exceptions as versioning_exceptions except ImportError: sys.exit(_("python-migrate is not installed. Exiting.")) #_REPOSITORY = None def db_sync(version=None): if version is not None: try: version = int(version) except ValueError: raise exception.Error(_("version should be an integer")) current_version = db_version() repository = _find_migrate_repo() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version) def db_version(): repository = _find_migrate_repo() try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError as exc: # If we aren't version controlled there may be an existing, # non-version controlled database present. meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables): raise exc db_version_control(migration.INIT_VERSION) return versioning_api.db_version(get_engine(), repository) def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(get_engine(), repository, version) return version def _find_migrate_repo(): """Get the path for the migrate repository.""" path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') assert os.path.exists(path) global _REPOSITORY if _REPOSITORY is None: _REPOSITORY = Repository(path) return _REPOSITORY
os-cloud-storage/openstack-workload-disaster-recovery
dragon/db/sqlalchemy/migration.py
Python
apache-2.0
3,810
# python3 # pylint: disable=g-bad-file-header # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Analysis for discounting_chain.""" from typing import Optional, Sequence from bsuite.experiments.discounting_chain import sweep from bsuite.utils import plotting import numpy as np import pandas as pd import plotnine as gg NUM_EPISODES = sweep.NUM_EPISODES BASE_REGRET = 0.08 TAGS = sweep.TAGS _HORIZONS = np.array([1, 3, 10, 30, 100]) def score(df: pd.DataFrame) -> float: """Output a single score for discounting_chain.""" n_eps = np.minimum(df.episode.max(), sweep.NUM_EPISODES) ave_return = df.loc[df.episode == n_eps, 'total_return'].mean() / n_eps raw_score = 1. - 10. * (1.1 - ave_return) return np.clip(raw_score, 0, 1) def _mapping_seed_compatibility(df: pd.DataFrame) -> pd.DataFrame: """Utility function to maintain compatibility with old bsuite runs.""" # Discounting chain kwarg "seed" was renamed to "mapping_seed" if 'mapping_seed' in df.columns: nan_seeds = df.mapping_seed.isna() if np.any(nan_seeds): df.loc[nan_seeds, 'mapping_seed'] = df.loc[nan_seeds, 'seed'] print('WARNING: seed renamed to "mapping_seed" for compatibility.') else: if 'seed' in df.columns: print('WARNING: seed renamed to "mapping_seed" for compatibility.') df['mapping_seed'] = df.seed else: print('ERROR: outdated bsuite run, please relaunch.') return df def dc_preprocess(df_in: pd.DataFrame) -> pd.DataFrame: """Preprocess discounting chain data for use with regret metrics.""" df = df_in.copy() df = _mapping_seed_compatibility(df) df['optimal_horizon'] = _HORIZONS[ (df.mapping_seed % len(_HORIZONS)).astype(int)] df['total_regret'] = 1.1 * df.episode - df.total_return df['optimal_horizon'] = df.optimal_horizon.astype('category') return df def plot_learning(df: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: """Plots the average regret through time by optimal_horizon.""" df = dc_preprocess(df_in=df) p = plotting.plot_regret_learning( df_in=df, group_col='optimal_horizon', sweep_vars=sweep_vars, max_episode=sweep.NUM_EPISODES ) p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75) p += gg.coord_cartesian(ylim=(0, 0.1)) return p def plot_average(df: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: """Plots the average regret at 1k episodes by optimal_horizon.""" df = dc_preprocess(df_in=df) p = plotting.plot_regret_average( df_in=df, group_col='optimal_horizon', episode=sweep.NUM_EPISODES, sweep_vars=sweep_vars ) p += gg.geom_hline(gg.aes(yintercept=BASE_REGRET), linetype='dashed', alpha=0.4, size=1.75) return p def plot_seeds(df_in: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: """Plot the returns through time individually by run.""" df = dc_preprocess(df_in) df['average_return'] = 1.1 - (df.total_regret.diff() / df.episode.diff()) p = plotting.plot_individual_returns( df_in=df, max_episode=NUM_EPISODES, return_column='average_return', colour_var='optimal_horizon', yintercept=1.1, sweep_vars=sweep_vars, ) return p + gg.ylab('average episodic return')
deepmind/bsuite
bsuite/experiments/discounting_chain/analysis.py
Python
apache-2.0
4,042
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class Thermal100Temperature(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ Thermal100Temperature - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'member_id': 'str', 'oem': 'ResourceOem', 'physical_context': 'PhysicalContext100PhysicalContext', 'related_item': 'list[Odata400IdRef]', 'related_itemodata_count': 'Odata400Count', 'related_itemodata_navigation_link': 'Odata400IdRef', 'status': 'ResourceStatus' } self.attribute_map = { 'member_id': 'MemberId', 'oem': 'Oem', 'physical_context': 'PhysicalContext', 'related_item': 'RelatedItem', 'related_itemodata_count': '[email protected]', 'related_itemodata_navigation_link': '[email protected]', 'status': 'Status' } self._member_id = None self._oem = None self._physical_context = None self._related_item = None self._related_itemodata_count = None self._related_itemodata_navigation_link = None self._status = None @property def member_id(self): """ Gets the member_id of this Thermal100Temperature. This is the identifier for the member within the collection. :return: The member_id of this Thermal100Temperature. :rtype: str """ return self._member_id @member_id.setter def member_id(self, member_id): """ Sets the member_id of this Thermal100Temperature. This is the identifier for the member within the collection. :param member_id: The member_id of this Thermal100Temperature. :type: str """ self._member_id = member_id @property def oem(self): """ Gets the oem of this Thermal100Temperature. This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections. :return: The oem of this Thermal100Temperature. :rtype: ResourceOem """ return self._oem @oem.setter def oem(self, oem): """ Sets the oem of this Thermal100Temperature. This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections. :param oem: The oem of this Thermal100Temperature. :type: ResourceOem """ self._oem = oem @property def physical_context(self): """ Gets the physical_context of this Thermal100Temperature. Describes the area or device to which this temperature measurement applies. :return: The physical_context of this Thermal100Temperature. :rtype: PhysicalContext100PhysicalContext """ return self._physical_context @physical_context.setter def physical_context(self, physical_context): """ Sets the physical_context of this Thermal100Temperature. Describes the area or device to which this temperature measurement applies. :param physical_context: The physical_context of this Thermal100Temperature. :type: PhysicalContext100PhysicalContext """ self._physical_context = physical_context @property def related_item(self): """ Gets the related_item of this Thermal100Temperature. Describes the areas or devices to which this temperature measurement applies. :return: The related_item of this Thermal100Temperature. :rtype: list[Odata400IdRef] """ return self._related_item @related_item.setter def related_item(self, related_item): """ Sets the related_item of this Thermal100Temperature. Describes the areas or devices to which this temperature measurement applies. :param related_item: The related_item of this Thermal100Temperature. :type: list[Odata400IdRef] """ self._related_item = related_item @property def related_itemodata_count(self): """ Gets the related_itemodata_count of this Thermal100Temperature. :return: The related_itemodata_count of this Thermal100Temperature. :rtype: Odata400Count """ return self._related_itemodata_count @related_itemodata_count.setter def related_itemodata_count(self, related_itemodata_count): """ Sets the related_itemodata_count of this Thermal100Temperature. :param related_itemodata_count: The related_itemodata_count of this Thermal100Temperature. :type: Odata400Count """ self._related_itemodata_count = related_itemodata_count @property def related_itemodata_navigation_link(self): """ Gets the related_itemodata_navigation_link of this Thermal100Temperature. :return: The related_itemodata_navigation_link of this Thermal100Temperature. :rtype: Odata400IdRef """ return self._related_itemodata_navigation_link @related_itemodata_navigation_link.setter def related_itemodata_navigation_link(self, related_itemodata_navigation_link): """ Sets the related_itemodata_navigation_link of this Thermal100Temperature. :param related_itemodata_navigation_link: The related_itemodata_navigation_link of this Thermal100Temperature. :type: Odata400IdRef """ self._related_itemodata_navigation_link = related_itemodata_navigation_link @property def status(self): """ Gets the status of this Thermal100Temperature. :return: The status of this Thermal100Temperature. :rtype: ResourceStatus """ return self._status @status.setter def status(self, status): """ Sets the status of this Thermal100Temperature. :param status: The status of this Thermal100Temperature. :type: ResourceStatus """ self._status = status def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
jlongever/redfish-client-python
on_http_redfish_1_0/models/thermal_1_0_0_temperature.py
Python
apache-2.0
8,295
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-05-04 12:05 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0001_initial'), ] operations = [ migrations.AddField( model_name='notification', name='queued', field=models.BooleanField(db_index=True, default=False), ), ]
CSchool/SchoolSite
CSchoolSite/main/migrations/0002_notification_queued.py
Python
apache-2.0
457
from __future__ import unicode_literals from django import template from django.conf import settings from django.utils import timezone from ads.models import Ad register = template.Library() @register.inclusion_tag('ads/tags/render_ads_zone.html', takes_context=True) def render_ads_zone(context, zone): """ Returns an advertise for a ``zone``. Tag usage: {% load ads_tags %} {% render_zone 'zone' %} """ context.update({ 'google_adsense_client': settings.ADS_GOOGLE_ADSENSE_CLIENT, 'zone': zone, 'zone_info': settings.ADS_ZONES.get(zone, None) }) return context @register.simple_tag def get_ads_count(zone): """ Returns ads count for ``zone``. Tag usage: {% load ads_tags %} {% get_ads_count 'zone' as ads_count %} {% get_ads_count 'zone1,zone2,zone3' as ads_count %} """ zone = zone.split(',') return Ad.objects.public().filter(zone__in=zone).count()
razisayyed/django-ads
ads/templatetags/ads_tags.py
Python
apache-2.0
956
''' Created on Sep 15, 2012 Agent classes. Contains references to instances of classes containing observer handlers and code Agent Instances are created automatically. Create a named Handler instance under the Agent, as an instance of the desired handler class, by create (POST) of a JSON object containing a dictionary of settings for example Agent.create({'resourceCName': 'addHandler_1','resourceClass': 'addHandler'}) @author: mjkoster ''' from RESTfulResource import RESTfulResource from LinkFormatProxy import LinkFormatProxy import subprocess class Handler(RESTfulResource): # single base class for handlers to extend directly, contains convenience methods for linking resources def __init__(self, parentObject=None, resourceDescriptor = {}): RESTfulResource.__init__(self, parentObject, resourceDescriptor) self._settings = self._resourceDescriptor # use the constructor descriptor for the initial settings # link cache keeps endpoints hashed by pathFromBase string, only need to walk the path one time self._linkBaseDict = self.Resources.get('baseObject').resources self._linkCache = {} self._init() def _init(self): pass def get(self, Key=None): if Key != None : return self._settings[Key] else : return self._settings def set(self, newSettings): # create an instance of a handler from settings dictionary self._settings.update(newSettings) def handleNotify(self, updateRef=None): # external method to call from Observer-Notifier self._handleNotify(updateRef) def _handleNotify(self, updateRef=None ): # override this for handling state changes from an observer pass def linkToRef(self, linkPath): ''' takes a path string and walks the object tree from a base dictionary returns a ref to the resource at the path endpoint store translations in a hash cache for fast lookup after the first walk ''' self._linkPath = linkPath if self._linkPath in self._linkCache.keys() : return self._linkCache[self._linkPath] # cache miss, walk path and update cache at end self._currentDict = self._linkBaseDict self._pathElements = linkPath.split('/') for pathElement in self._pathElements[:-1] : # all but the last, which should be the endpoint self._currentDict = self._currentDict[pathElement].resources self._resource = self._currentDict[self._pathElements[-1] ] self._linkCache.update({ self._linkPath : self._resource }) return self._resource def getByLink(self, linkPath): return self.linkToRef(linkPath).get() def setByLink(self, linkPath, newValue): self.linkToRef(linkPath).set(newValue) class addHandler(Handler): # an example appHandler that adds two values together and stores the result # define a method for handling state changes in observed resources def _handleNotify(self, updateRef = None ): # get the 2 addends, add them, and set the sum location self._addend1 = self.getByLink(self._settings['addendLink1']) self._addend2 = self.getByLink(self._settings['addendLink2']) self.setByLink( self._settings['sumOutLink'], self._addend1 + self._addend2 ) # simple print handler that echoes the value each time an observed resource is updated class logPrintHandler(Handler): def _handleNotify(self, resource) : print resource.Properties.get('resourceName'), ' = ', resource.get() class BLE_ColorLED_handler(Handler): def _handleNotify(self, resource = None ): subprocess.call([("/usr/local/bin/gatttool"),\ ("--device="+self._settings['MACaddress']),\ ("--addr-type="+self._settings['MACtype']),\ ("--char-write"),\ ("--handle="+self._settings['charHandle']),\ ("--value=0x"+resource.get())]) class Agent(RESTfulResource): # Agent is a container for Handlers and daemons, instantiated as a resource of a SmartObject def __init__(self, parentObject=None, resourceDescriptor = {}): RESTfulResource.__init__(self, parentObject, resourceDescriptor) self._handlers = {} def get(self, handlerName=None): if handlerName == None: return self._handlers # to get the list of names else: if self._handlers.has_key(handlerName) : return self._handlers[handlerName] # to get reference to handler resources by handler name return None # new create takes dictionary built from JSON object POSTed to parent resource def create(self, resourceDescriptor): resourceName = resourceDescriptor['resourceName'] resourceClass = resourceDescriptor['resourceClass'] # import the module if it's specified in the descriptor if resourceDescriptor.has_key('resourceClassPath') : resourceClassPath = resourceDescriptor['resourceClassPath'] self.importByPath(resourceClassPath) if resourceName not in self.resources: # create new instance of the named class and add to resources directory, return the ref self.resources.update({resourceName : globals()[resourceClass](self, resourceDescriptor)}) #pass the constructor the entire descriptor for creating the properties object #self.resources.update({resourceName : globals()[resourceClass](self, resourceDescriptor)}) self._handlers.update({resourceName: resourceClass}) return self.resources[resourceName] # returns a reference to the created instance # need to destroy instance of code module # FIXME Doesn't seem to work. Need to look at this and recursive import issue, devise dynamic import system def importByPath(self,classPath): # separate the module path from the class,import the module, and return the class name self._components = classPath.split('.') self._module = __import__( '.'.join(self._components[:-1]) ) return self._module
connectIOT/iottoolkit
iottoolkit/core/Agent.py
Python
apache-2.0
6,297
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Deploy a model in AI Platform.""" import logging import json import time import subprocess from googleapiclient import discovery from googleapiclient import errors _WAIT_FOR_COMPLETION_SLEEP_SECONDS = 10 _PYTHON_VERSION = '3.5' _RUN_TIME_VERSION = '1.15' def _create_service(): """Gets service instance to start API searches. :return: """ return discovery.build('ml', 'v1') def copy_artifacts(source_path, destination_path): """ :param source_path: :param destination_path: :return: """ logging.info( 'Moving model directory from {} to {}'.format(source_path, destination_path)) subprocess.call( "gsutil -m cp -r {} {}".format(source_path, destination_path), shell=True) class AIPlatformModel(object): def __init__(self, project_id): self._project_id = project_id self._service = _create_service() def model_exists(self, model_name): """ :param model_name: :return: """ models = self._service.projects().models() try: response = models.list( parent='projects/{}'.format(self._project_id)).execute() if response: for model in response['models']: if model['name'].rsplit('/', 1)[1] == model_name: return True else: return False except errors.HttpError as err: logging.error('%s', json.loads(err.content)['error']['message']) def _list_model_versions(self, model_name): """Lists existing model versions in the project. Args: model_name: Model name to list versions for. Returns: Dictionary of model versions. """ versions = self._service.projects().models().versions() try: return versions.list( parent='projects/{}/models/{}'.format(self._project_id, model_name)).execute() except errors.HttpError as err: logging.error('%s', json.loads(err.content)['error']['message']) def create_model(self, model_name, model_region='us-central1'): """ :param model_name: :param model_region: :return: """ if not self.model_exists(model_name): body = { 'name': model_name, 'regions': model_region, 'description': 'MLflow model' } parent = 'projects/{}'.format(self._project_id) try: self._service.projects().models().create( parent=parent, body=body).execute() logging.info('Model "%s" has been created.', model_name) except errors.HttpError as err: logging.error('"%s". Skipping model creation.', json.loads(err.content)['error']['message']) else: logging.warning('Model "%s" already exists.', model_name) def deploy_model(self, bucket_name, model_name, model_version, runtime_version=_RUN_TIME_VERSION): """Deploys model on AI Platform. Args: bucket_name: Cloud Storage Bucket name that stores saved model. model_name: Model name to deploy. model_version: Model version. runtime_version: Runtime version. Raises: RuntimeError if deployment completes with errors. """ # For details on request body, refer to: # https://cloud.google.com/ml-engine/reference/rest/v1/projects # .models.versions/create model_version_exists = False model_versions_list = self._list_model_versions(model_name) # Field: version.name Error: A name should start with a letter and # contain only letters, numbers and underscores model_version = 'mlflow_{}'.format(model_version) if model_versions_list: for version in model_versions_list['versions']: if version['name'].rsplit('/', 1)[1] == model_version: model_version_exists = True if not model_version_exists: request_body = { 'name': model_version, 'deploymentUri': '{}'.format(bucket_name), 'framework': 'TENSORFLOW', 'runtimeVersion': runtime_version, 'pythonVersion': _PYTHON_VERSION } parent = 'projects/{}/models/{}'.format(self._project_id, model_name) response = self._service.projects().models().versions().create( parent=parent, body=request_body).execute() op_name = response['name'] while True: deploy_status = ( self._service.projects().operations().get( name=op_name).execute()) if deploy_status.get('done'): logging.info('Model "%s" with version "%s" deployed.', model_name, model_version) break if deploy_status.get('error'): logging.error(deploy_status['error']) raise RuntimeError( 'Failed to deploy model for serving: {}'.format( deploy_status['error'])) logging.info( 'Waiting for %d seconds for "%s" with "%s" version to be ' 'deployed.', _WAIT_FOR_COMPLETION_SLEEP_SECONDS, model_name, model_version) time.sleep(_WAIT_FOR_COMPLETION_SLEEP_SECONDS) else: logging.info('Model "%s" with version "%s" already exists.', model_name, model_version)
GoogleCloudPlatform/ml-on-gcp
tutorials/tensorflow/mlflow_gcp/trainer/model_deployment.py
Python
apache-2.0
6,618
from typing import Any, Callable, Tuple, Union from packed import pack, unpack import jj from jj import default_app, default_handler from jj.apps import BaseApp, create_app from jj.http.codes import BAD_REQUEST, OK from jj.http.methods import ANY, DELETE, GET, POST from jj.matchers import LogicalMatcher, RequestMatcher, ResolvableMatcher, exists from jj.requests import Request from jj.resolvers import Registry, Resolver from jj.responses import RelayResponse, Response, StreamResponse from ._history import HistoryRepository from ._remote_response import RemoteResponseType __all__ = ("Mock",) MatcherType = Union[RequestMatcher, LogicalMatcher] class Mock(jj.App): def __init__(self, app_factory: Callable[..., BaseApp] = create_app, resolver_factory: Callable[..., Resolver] = Resolver) -> None: self._resolver = resolver_factory(Registry(), default_app, default_handler) self._app = app_factory(resolver=self._resolver) self._repo = HistoryRepository() def _decode(self, payload: bytes) -> Tuple[str, MatcherType, RemoteResponseType]: def resolver(cls: Any, **kwargs: Any) -> Any: return cls.__unpacked__(**kwargs, resolver=self._resolver) decoded = unpack(payload, {ResolvableMatcher: resolver}) handler_id = decoded.get("id") assert isinstance(handler_id, str) matcher = decoded.get("request") assert isinstance(matcher, (RequestMatcher, LogicalMatcher)) response = decoded.get("response") assert isinstance(response, (Response, RelayResponse)) return handler_id, matcher, response @jj.match(POST, headers={"x-jj-remote-mock": exists}) async def register(self, request: Request) -> Response: payload = await request.read() try: handler_id, matcher, response = self._decode(payload) except Exception: return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST}) async def handler(request: Request) -> RemoteResponseType: return response.copy() self._resolver.register_attribute("handler_id", handler_id, handler) setattr(self._app.__class__, handler_id, matcher(handler)) return Response(status=OK, json={"status": OK}) @jj.match(DELETE, headers={"x-jj-remote-mock": exists}) async def deregister(self, request: Request) -> Response: payload = await request.read() try: handler_id, *_ = self._decode(payload) except Exception: return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST}) try: delattr(self._app.__class__, handler_id) except AttributeError: pass await self._repo.delete_by_tag(handler_id) return Response(status=OK, json={"status": OK}) @jj.match(GET, headers={"x-jj-remote-mock": exists}) async def history(self, request: Request) -> Response: payload = await request.read() try: handler_id, *_ = self._decode(payload) except Exception: return Response(status=BAD_REQUEST, json={"status": BAD_REQUEST}) history = await self._repo.get_by_tag(handler_id) packed = pack(history) return Response(status=OK, body=packed) @jj.match(ANY) async def resolve(self, request: Request) -> StreamResponse: handler = await self._resolver.resolve(request, self._app) response = await handler(request) handler_id = self._resolver.get_attribute("handler_id", handler, default=None) if handler_id: await self._repo.add(request, response, tags=[handler_id]) return response
nikitanovosibirsk/jj
jj/mock/_mock.py
Python
apache-2.0
3,717
""" EPYNET Classes """ from . import epanet2 from .objectcollection import ObjectCollection from .baseobject import BaseObject, lazy_property from .pattern import Pattern class Node(BaseObject): """ Base EPANET Node class """ static_properties = {'elevation': epanet2.EN_ELEVATION} properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE} def __init__(self, uid, network): super(Node, self).__init__(uid, network) self.links = ObjectCollection() def get_index(self, uid): if not self._index: self._index = self.network().ep.ENgetnodeindex(uid) return self._index def set_object_value(self, code, value): return self.network().ep.ENsetnodevalue(self.index, code, value) def get_object_value(self, code): return self.network().ep.ENgetnodevalue(self.index, code) @property def index(self): return self.get_index(self.uid) @lazy_property def coordinates(self): return self.network().ep.ENgetcoord(self.index) # extra functionality @lazy_property def upstream_links(self): """ return a list of upstream links """ if self.results != {}: raise ValueError("This method is only supported for steady state simulations") links = ObjectCollection() for link in self.links: if (link.to_node == self and link.flow >= 1e-3) or (link.from_node == self and link.flow < -1e-3): links[link.uid] = link return links @lazy_property def downstream_links(self): """ return a list of downstream nodes """ if self.results != {}: raise ValueError("This method is only supported for steady state simulations") links = ObjectCollection() for link in self.links: if (link.from_node == self and link.flow >= 1e-3) or (link.to_node == self and link.flow < 1e-3): links[link.uid] = link return links @lazy_property def inflow(self): outflow = 0 for link in self.upstream_links: outflow += abs(link.flow) return outflow @lazy_property def outflow(self): outflow = 0 for link in self.downstream_links: outflow += abs(link.flow) return outflow """ calculates all the water flowing out of the node """ class Reservoir(Node): """ EPANET Reservoir Class """ node_type = "Reservoir" class Junction(Node): """ EPANET Junction Class """ static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'emitter': epanet2.EN_EMITTER} properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND} node_type = "Junction" @property def pattern(self): pattern_index = int(self.get_property(epanet2.EN_PATTERN)) uid = self.network().ep.ENgetpatternid(pattern_index) return Pattern(uid, self.network()) @pattern.setter def pattern(self, value): if isinstance(value, int): pattern_index = value elif isinstance(value, str): pattern_index = self.network().ep.ENgetpatternindex(value) else: pattern_index = value.index self.network().solved = False self.set_object_value(epanet2.EN_PATTERN, pattern_index) class Tank(Node): """ EPANET Tank Class """ node_type = "Tank" static_properties = {'elevation': epanet2.EN_ELEVATION, 'basedemand': epanet2.EN_BASEDEMAND, 'initvolume': epanet2.EN_INITVOLUME, 'diameter': epanet2.EN_TANKDIAM, 'minvolume': epanet2.EN_MINVOLUME, 'minlevel': epanet2.EN_MINLEVEL, 'maxlevel': epanet2.EN_MAXLEVEL, 'maxvolume': 25, 'tanklevel': epanet2.EN_TANKLEVEL} properties = {'head': epanet2.EN_HEAD, 'pressure': epanet2.EN_PRESSURE, 'demand': epanet2.EN_DEMAND, 'volume': 24, 'level': epanet2.EN_TANKLEVEL}
VitensTC/epynet
epynet/node.py
Python
apache-2.0
4,030
import numpy as np import random import os import shutil import platform import pytest import ray from ray.test_utils import wait_for_condition from ray.internal.internal_api import memory_summary MB = 1024 * 1024 def _init_ray(): return ray.init( num_cpus=2, object_store_memory=700e6, _system_config={"plasma_unlimited": True}) def _check_spilled_mb(address, spilled=None, restored=None, fallback=None): def ok(): s = memory_summary(address=address["redis_address"], stats_only=True) print(s) if restored: if "Restored {} MiB".format(restored) not in s: return False else: if "Restored" in s: return False if spilled: if "Spilled {} MiB".format(spilled) not in s: return False else: if "Spilled" in s: return False if fallback: if "Plasma filesystem mmap usage: {} MiB".format( fallback) not in s: return False else: if "Plasma filesystem mmap usage:" in s: return False return True wait_for_condition(ok, timeout=3, retry_interval_ms=1000) @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_fallback_when_spilling_impossible_on_put(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) x1p = ray.get(x1) # x2 will be fallback allocated on the filesystem. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) x2p = ray.get(x2) del x1p del x2p _check_spilled_mb(address, spilled=None, fallback=400) finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_spilling_when_possible_on_put(): try: address = _init_ray() results = [] for _ in range(5): results.append(ray.put(np.zeros(400 * MB, dtype=np.uint8))) _check_spilled_mb(address, spilled=1600) finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_fallback_when_spilling_impossible_on_get(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) # x1 will be spilled. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) _check_spilled_mb(address, spilled=400) # x1 will be restored, x2 will be spilled. x1p = ray.get(x1) _check_spilled_mb(address, spilled=800, restored=400) # x2 will be restored, triggering a fallback allocation. x2p = ray.get(x2) _check_spilled_mb(address, spilled=800, restored=800, fallback=400) del x1p del x2p finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_spilling_when_possible_on_get(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) # x1 will be spilled. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) _check_spilled_mb(address, spilled=400) # x1 will be restored, x2 will be spilled. ray.get(x1) _check_spilled_mb(address, spilled=800, restored=400) # x2 will be restored, spilling x1. ray.get(x2) _check_spilled_mb(address, spilled=800, restored=800) finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_task_unlimited(): try: address = _init_ray() x1 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) refs = [x1] # x1 is spilled. x2 = ray.put(np.zeros(400 * MB, dtype=np.uint8)) x2p = ray.get(x2) sentinel = ray.put(np.zeros(100 * MB, dtype=np.uint8)) _check_spilled_mb(address, spilled=400) @ray.remote def consume(refs): # triggers fallback allocation, spilling of the sentinel ray.get(refs[0]) # triggers fallback allocation. return ray.put(np.zeros(400 * MB, dtype=np.uint8)) # round 1 ray.get(consume.remote(refs)) _check_spilled_mb(address, spilled=500, restored=400, fallback=400) del x2p del sentinel finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_task_unlimited_multiget_args(): try: address = _init_ray() # Too many refs to fit into memory. refs = [] for _ in range(10): refs.append(ray.put(np.zeros(200 * MB, dtype=np.uint8))) x2 = ray.put(np.zeros(600 * MB, dtype=np.uint8)) x2p = ray.get(x2) _check_spilled_mb(address, spilled=2000) @ray.remote def consume(refs): # Should work without thrashing. ray.get(refs) return os.getpid() ray.get([consume.remote(refs) for _ in range(1000)]) _check_spilled_mb(address, spilled=2000, restored=2000, fallback=2000) del x2p finally: ray.shutdown() @pytest.mark.skipif( platform.system() == "Windows", reason="Need to fix up for Windows.") def test_fd_reuse_no_memory_corruption(shutdown_only): @ray.remote class Actor: def produce(self, i): s = int(random.random() * 200) z = np.ones(s * 1024 * 1024) z[0] = i return z def consume(self, x, i): print(x) assert x[0] == i, x ray.init(object_store_memory=100e6) a = Actor.remote() b = Actor.remote() for i in range(20): x_id = a.produce.remote(i) ray.get(b.consume.remote(x_id, i)) @pytest.mark.skipif( platform.system() != "Linux", reason="Only Linux handles fallback allocation disk full error.") def test_fallback_allocation_failure(shutdown_only): ray.init( object_store_memory=100e6, _temp_dir="/dev/shm", _system_config={"plasma_unlimited": True}) shm_size = shutil.disk_usage("/dev/shm").total object_size = max(100e6, shm_size // 5) num_exceptions = 0 refs = [] for i in range(8): try: refs.append(ray.put(np.zeros(object_size, dtype=np.uint8))) except ray.exceptions.ObjectStoreFullError: num_exceptions = num_exceptions + 1 assert num_exceptions > 0 # TODO(ekl) enable this test once we implement this behavior. # @pytest.mark.skipif( # platform.system() == "Windows", reason="Need to fix up for Windows.") # def test_task_unlimited_huge_args(): # try: # address = _init_ray() # # # PullManager should raise an error, since the set of task args is # # too huge to fit into memory. # @ray.remote # def consume(*refs): # return "ok" # # # Too many refs to fit into memory. # refs = [] # for _ in range(10): # refs.append(ray.put(np.zeros(200 * MB, dtype=np.uint8))) # # with pytest.raises(Exception): # ray.get(consume.remote(*refs)) # finally: # ray.shutdown() if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", __file__]))
pcmoritz/ray-1
python/ray/tests/test_plasma_unlimited.py
Python
apache-2.0
7,447
from __future__ import print_function import pandas as pd from sklearn.base import TransformerMixin class FamilyCounter(TransformerMixin): def __init__(self, use=True): self.use = use def transform(self, features_raw, **transform_params): if self.use: features = features_raw.copy(deep=True) family = features_raw[['SibSp', 'Parch']]\ .apply(lambda x: x[0] + x[1], axis=1) features.drop('SibSp', axis=1, inplace=True) features.drop('Parch', axis=1, inplace=True) return pd.concat([features, pd.DataFrame({'Family': family})], axis=1) return features_raw def fit(self, X, y=None, **fit_params): return self def get_params(self, *args, **kwargs): return { 'use': self.use } def set_params(self, **params): if 'use' in params: self.use = params.get('use')
wojtekwalczak/kaggle_titanic
titanic/transformers/FamilyCounter.py
Python
apache-2.0
946
#coding: utf-8 __author__ = 'horacioibrahim' import unittest, os import datetime from time import sleep, ctime, time from random import randint from hashlib import md5 from types import StringType # python-iugu package modules import merchant, customers, config, invoices, errors, plans, subscriptions def check_tests_environment(): """ For tests is need environment variables to instantiate merchant. Or Edit tests file to instantiate merchant.IuguMerchant(account_id=YOUR_ID) """ try: global ACCOUNT_ID ACCOUNT_ID = os.environ["ACCOUNT_ID"] except KeyError: raise errors.IuguConfigTestsErrors("Only for tests is required an environment " \ "variable ACCOUNT_ID or edit file tests.py") class TestMerchant(unittest.TestCase): check_tests_environment() # Checks if enviroment variables defined def setUp(self): self.EMAIL_CUSTOMER = "[email protected]" self.client = merchant.IuguMerchant(account_id=ACCOUNT_ID, api_mode_test=True) def tearDown(self): pass def test_create_payment_token_is_test(self): response = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') self.assertTrue(response.is_test) def test_create_payment_token(self): response = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') self.assertEqual(response.status, 200) def test_create_charge_credit_card(self): item = merchant.Item("Produto My Test", 1, 10000) token = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') charge = self.client.create_charge(self.EMAIL_CUSTOMER, item, token=token) self.assertEqual(charge.is_success(), True) def test_create_charge_bank_slip(self): item = merchant.Item("Produto Bank Slip", 1, 1000) charge = self.client.create_charge(self.EMAIL_CUSTOMER, item) self.assertEqual(charge.is_success(), True) class TestCustomer(unittest.TestCase): def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.random_user_email = email def test_create_customer_basic_info(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) c = consumer.create() c.remove() self.assertEqual(consumer.email, c.email) def test_create_customer_basic_email(self): consumer = customers.IuguCustomer() c = consumer.create(email=self.random_user_email) c.remove() self.assertEqual(consumer.email, c.email) def test_create_customer_extra_attrs(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) c = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) c.remove() self.assertEqual(c.custom_variables[0]['name'], "local") self.assertEqual(c.custom_variables[0]['value'], "cup") def test_get_customer(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create() c = consumer.get(customer_id=consumer_new.id) consumer_new.remove() self.assertEqual(consumer.email, c.email) def test_set_customer(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) c = consumer.set(consumer_new.id, name="Lago Mario") consumer_new.remove() self.assertEqual(c.name, "Lago Mario") def test_customer_save(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) # Edit info consumer_new.name = "Ibrahim Horacio" # Save as instance consumer_new.save() # verify results check_user = consumer.get(consumer_new.id) consumer_new.remove() self.assertEqual(check_user.name, "Ibrahim Horacio") def test_customer_delete_by_id(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) consumer.delete(consumer_new.id) self.assertRaises(errors.IuguGeneralException, consumer.get, consumer_new.id) def test_customer_delete_instance(self): consumer = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) consumer_new = consumer.create(name="Mario Lago", notes="It's the man", custom_variables={'local':'cup'}) r = consumer_new.remove() self.assertRaises(errors.IuguGeneralException, consumer.get, consumer_new.id) class TestCustomerLists(unittest.TestCase): def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.random_user_email = email self.c = customers.IuguCustomer(api_mode_test=True, email=self.random_user_email) # creating customers for tests with lists p1, p2, p3 = "Andrea", "Bruna", "Carol" self.one = self.c.create(name=p1, notes="It's the man", custom_variables={'local':'cup'}) # I'm not happy with it (sleep), but was need. This certainly occurs because # time data is not a timestamp. sleep(1) self.two = self.c.create(name=p2, notes="It's the man", custom_variables={'local':'cup'}) sleep(1) self.three = self.c.create(name=p3, notes="It's the man", custom_variables={'local':'cup'}) sleep(1) self.p1, self.p2, self.p3 = p1, p2, p3 def tearDown(self): self.one.remove() self.two.remove() self.three.remove() def test_getitems(self): customers_list = self.c.getitems() self.assertEqual(type(customers_list), list) def test_getitems_limit(self): # get items with auto DESC order customers_list = self.c.getitems(limit=2) self.assertEqual(len(customers_list), 2) def test_getitems_start(self): # get items with auto DESC order sleep(2) customers_list = self.c.getitems(limit=3) # get latest three customers reference_customer = customers_list[2].name customers_list = self.c.getitems(skip=2) self.assertEqual(customers_list[0].name, reference_customer) def test_getitems_query_by_match_in_name(self): hmd5 = md5() hmd5.update(ctime(time())) salt = hmd5.hexdigest() term = 'name_inexistent_or_improbable_here_{salt}'.format(salt=salt) # test value/term in >>name<< customer = self.c.create(name=term) sleep(2) items = self.c.getitems(query=term) # assert valid because name customer.remove() self.assertEqual(items[0].name, term) def test_getitems_query_by_match_in_notes(self): hmd5 = md5() hmd5.update(ctime(time())) salt = hmd5.hexdigest() term = 'name_inexistent_or_improbable_here_{salt}'.format(salt=salt) # test value/term in >>notes<< customer = self.c.create(name="Sub Zero", notes=term) sleep(2) items = self.c.getitems(query=term) customer.remove() self.assertEqual(items[0].notes, term) def test_getitems_query_by_match_in_email(self): hmd5 = md5() hmd5.update(ctime(time())) salt = hmd5.hexdigest() term = 'name_inexistent_or_improbable_here_{salt}'.format(salt=salt) # test value/term in >>email<< email = term + '@email.com' self.c.email = email customer = self.c.create() sleep(2) items = self.c.getitems(query=term) customer.remove() self.assertIn(term, items[0].email) # Uncomment/comment the next one line to disable/enable the test @unittest.skip("Database of webservice is not empty") def test_getitems_sort(self): sleep(1) # Again. It's need # Useful to test database with empty data (previous data, old tests) customers_list = self.c.getitems(sort="name") # monkey skip if len(customers_list) < 4: self.assertEqual(customers_list[0].name, self.p1) else: raise TypeError("API Database is not empty. This test isn't useful. " \ "Use unittest.skip() before this method.") def test_getitems_created_at_from(self): sleep(1) customers_list = self.c.getitems(created_at_from=self.three.created_at) self.assertEqual(customers_list[0].id, self.three.id) # Uncomment the next one line to disable the test # @unittest.skip("Real-time interval not reached") def test_getitems_created_at_to(self): sleep(1) customers_list = self.c.getitems(created_at_to=self.one.created_at) self.assertEqual(customers_list[0].id, self.three.id) def test_getitems_updated_since(self): # get items with auto DESC order sleep(1) customers_list = self.c.getitems(updated_since=self.three.created_at) self.assertEqual(customers_list[0].id, self.three.id) class TestCustomerPayments(unittest.TestCase): def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.random_user_email = email self.client = customers.IuguCustomer(email="[email protected]") self.customer = self.client.create() self.instance_payment = self.customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2014) def tearDown(self): self.instance_payment.remove() self.customer.remove() # if you remove customer also get payment def test_create_payment_method_new_user_by_create(self): """ Test create payment method to new recent user returned by create() of IuguCustomer """ instance_payment = self.customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2014) instance_payment.remove() self.assertTrue(isinstance(instance_payment, customers.IuguPaymentMethod)) def test_create_payment_method_existent_user_by_get(self): """ Test create payment method of existent user returned by get() of IuguCustomer. """ new_customer = self.client.create() # Test with user from get() existent_customer = self.client.get(new_customer.id) instance_payment = existent_customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2015) instance_payment.remove() self.assertTrue(isinstance(instance_payment, customers.IuguPaymentMethod)) def test_create_payment_method_existent_user_by_getitems(self): """ Test create payment method of existent user returned by getitems() of IuguCustomer """ # Test with user from getitems() customers_list = self.client.getitems() c_0 = customers_list[0] instance_payment = c_0.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2016) instance_payment.remove() self.assertTrue(isinstance(instance_payment, customers.IuguPaymentMethod)) def test_create_payment_method_non_existent_user_by_instance(self): """ Test create payment method to instance's user before it was created in API. So without ID. """ create = self.client.payment.create self.assertRaises(errors.IuguPaymentMethodException, create, description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2016) def test_create_payment_method_raise_general(self): # Create payment method without data{} where API returns error. customer = self.client.create() self.assertRaises(errors.IuguGeneralException, customer.payment.create, description="Second payment method") customer.remove() def test_get_payment_method_by_payment_id_customer_id(self): # Test get payment based payment_id and customer_id id = self.instance_payment.id # two args passed payment = self.client.payment.get(id, customer_id=self.customer.id) self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) def test_get_payment_by_customer(self): # Test get payment by instance's customer (existent in API) id = self.instance_payment.id # one arg passed. user is implicit to customer payment = self.customer.payment.get(id) self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) def test_set_payment_by_payment_id_customer_id(self): # Changes payment method base payment_id and customer_id id = self.instance_payment.id # two args passed payment = self.client.payment.set(id, "New Card Name", customer_id=self.customer.id) self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) payment_test = self.customer.payment.get(payment.id) self.assertEqual(payment_test.description, payment.description) def test_set_payment_by_customer(self): # Changes payment method base payment_id of an intance's customer id = self.instance_payment.id # one arg passed. user is implicit to customer payment = self.customer.payment.set(id, "New Card Name") self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) payment_test = self.customer.payment.get(payment.id) self.assertEqual(payment_test.description, payment.description) def test_set_payment_by_customer_by_save(self): """ Changes payment method of an instance's payment no payment_id or no customer_id is need""" self.instance_payment.description = "New Card Name" # no args passed. To payment method instance this is implicit payment = self.instance_payment.save() self.assertTrue(isinstance(payment, customers.IuguPaymentMethod)) payment_test = self.customer.payment.get(payment.id) self.assertEqual(payment_test.description, payment.description) def test_set_payment_remove(self): """ Changes payment method of an instance's payment no payment_id or no customer_id is need""" instance_payment = self.customer.payment.create(description="New payment method", number='4111111111111111', verification_value=123, first_name="Joao", last_name="Maria", month=12, year=2014) instance_payment.remove() # Try get payment already removed payment_test = self.customer.payment.get # copy method self.assertRaises(errors.IuguGeneralException, payment_test, instance_payment.id) def test_set_payment_remove_by_attrs(self): """ """ instance_payment = self.customer.payment instance_payment.payment_data.description = "New payment method" instance_payment.payment_data.number = number='4111111111111111' instance_payment.payment_data.verification_value = 123 instance_payment.payment_data.first_name = "Joao" instance_payment.payment_data.last_name = "Silva" instance_payment.payment_data.month = 12 instance_payment.payment_data.year = 2015 instance_payment = instance_payment.create(description="Meu cartao") instance_payment.remove() self.assertRaises(errors.IuguGeneralException, instance_payment.get, instance_payment.id) def test_getitems_payments(self): payment_one = self.customer.payment.create(description="New payment One", number='4111111111111111', verification_value=123, first_name="World", last_name="Cup", month=12, year=2014) payment_two = self.customer.payment.create(description="New payment Two", number='4111111111111111', verification_value=123, first_name="Is a ", last_name="Problem", month=12, year=2015) payment_three = self.customer.payment.create(description="New payment Three", number='4111111111111111', verification_value=123, first_name="To Brazil", last_name="Worry", month=12, year=2015) list_of_payments = self.customer.payment.getitems() self.assertTrue(isinstance(list_of_payments, list)) self.assertTrue(isinstance(list_of_payments[0], customers.IuguPaymentMethod)) class TestInvoice(unittest.TestCase): TODAY = datetime.date.today().strftime("%d/%m/%Y") check_tests_environment() # Checks if enviroment variables defined def setUp(self): hash_md5 = md5() number = randint(1, 50) hash_md5.update(str(number)) email = "{email}@test.com".format(email=hash_md5.hexdigest()) self.customer_email = email # create a customer for tests c = customers.IuguCustomer() self.consumer = c.create(email="[email protected]") # create a invoice item = merchant.Item("Prod 1", 1, 1190) self.item = item self.invoice_obj = invoices.IuguInvoice(email=self.customer_email, item=item, due_date=self.TODAY) self.invoice = self.invoice_obj.create(draft=True) # to tests for refund self.EMAIL_CUSTOMER = "[email protected]" self.client = merchant.IuguMerchant(account_id=ACCOUNT_ID, api_mode_test=True) def tearDown(self): if self.invoice.id: # if id is None already was removed self.invoice.remove() self.consumer.remove() def test_invoice_raise_required_email(self): i = invoices.IuguInvoice() self.assertRaises(errors.IuguInvoiceException, i.create, due_date="30/11/2020", items=self.item) def test_invoice_raise_required_due_date(self): i = invoices.IuguInvoice() self.assertRaises(errors.IuguInvoiceException, i.create, email="[email protected]", items=self.item) def test_invoice_raise_required_items(self): i = invoices.IuguInvoice() self.assertRaises(errors.IuguInvoiceException, i.create, due_date="30/11/2020", email="[email protected]") def test_invoice_create_basic(self): self.assertTrue(isinstance(self.invoice, invoices.IuguInvoice)) def test_invoice_with_customer_id(self): res = self.invoice_obj.create(customer_id=self.consumer.id) self.assertEqual(res.customer_id, self.consumer.id) def test_invoice_create_all_fields_as_draft(self): response = self.invoice_obj.create(draft=True, return_url='http://hipy.co/success', expired_url='http://hipy.co/expired', notification_url='http://hipy.co/webhooks', tax_cents=200, discount_cents=500, customer_id=self.consumer.id, ignore_due_email=True) self.assertTrue(isinstance(response, invoices.IuguInvoice)) existent_invoice = invoices.IuguInvoice.get(response.id) self.assertEqual(existent_invoice.expiration_url, response.expiration_url) response.remove() def test_invoice_create_all_fields_as_pending(self): response = self.invoice_obj.create(draft=False, return_url='http://example.com/success', expired_url='http://example.com/expired', notification_url='http://example.com/webhooks', tax_cents=200, discount_cents=500, customer_id=self.consumer.id, ignore_due_email=True) self.assertTrue(isinstance(response, invoices.IuguInvoice)) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # response.remove() def test_invoice_created_check_id(self): self.assertIsNotNone(self.invoice.id) def test_invoice_create_with_custom_variables_in_create(self): invoice = self.invoice_obj.create(draft=True, custom_variables={'city': 'Brasilia'}) self.assertEqual(invoice.custom_variables[0]["name"], "city") self.assertEqual(invoice.custom_variables[0]["value"], "Brasilia") invoice.remove() def test_invoice_create_with_custom_variables_in_set(self): invoice = self.invoice_obj.set(invoice_id=self.invoice.id, custom_variables={'city': 'Brasilia'}) self.assertEqual(invoice.custom_variables[0]["name"], "city") self.assertEqual(invoice.custom_variables[0]["value"], "Brasilia") def test_invoice_get_one(self): # test start here res = invoices.IuguInvoice.get(self.invoice.id) self.assertEqual(res.items[0].description, "Prod 1") def test_invoice_create_as_draft(self): self.assertEqual(self.invoice.status, 'draft') def test_invoice_edit_email_with_set(self): id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, email="[email protected]") self.assertEqual(invoice_edited.email, u"[email protected]") def test_invoice_edit_return_url_with_set(self): return_url = "http://hipy.co" id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, return_url=return_url) self.assertEqual(invoice_edited.return_url, return_url) @unittest.skip("It isn't support by API") def test_invoice_edit_expired_url_with_set(self): expired_url = "http://hipy.co" id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, expired_url=expired_url) self.assertEqual(invoice_edited.expiration_url, expired_url) def test_invoice_edit_notification_url_with_set(self): notification_url = "http://hipy.co" id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, notification_url=notification_url) self.assertEqual(invoice_edited.notification_url, notification_url) def test_invoice_edit_tax_cents_with_set(self): tax_cents = 200 id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, tax_cents=tax_cents) self.assertEqual(invoice_edited.tax_cents, tax_cents) def test_invoice_edit_discount_cents_with_set(self): discount_cents = 500 id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, discount_cents=discount_cents) self.assertEqual(invoice_edited.discount_cents, discount_cents) def test_invoice_edit_customer_id_with_set(self): customer_id = self.consumer.id id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, customer_id=customer_id) self.assertEqual(invoice_edited.customer_id, customer_id) @unittest.skip("without return from API of the field/attribute ignore_due_email") def test_invoice_edit_ignore_due_email_with_set(self): ignore_due_email = True id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, ignore_due_email=ignore_due_email) self.assertEqual(invoice_edited.ignore_due_email, ignore_due_email) # TODO: def test_invoice_edit_subscription_id_with_set(self): # TODO: test_invoice_edit_credits_with_set(self): def test_invoice_edit_due_date_with_set(self): due_date = self.TODAY response_from_api = str(datetime.date.today()) id = self.invoice.id invoice_edited = self.invoice_obj.set(invoice_id=id, due_date=due_date) self.assertEqual(invoice_edited.due_date, response_from_api) def test_invoice_edit_items_with_set(self): self.invoice.items[0].description = "Prod Fixed Text and Value" id = self.invoice.id items = self.invoice.items[0] invoice_edited = self.invoice_obj.set(invoice_id=id, items=items) self.assertEqual(invoice_edited.items[0].description, "Prod Fixed Text and Value") def test_invoice_changed_items_with_save(self): self.invoice.items[0].description = "Prod Saved by Instance" # inv_one is instance not saved. Now, we have invoice saved # and invoice_edited that is the response of webservice res = self.invoice.save() self.assertEqual(res.items[0].description, "Prod Saved by Instance") def test_invoice_destroy_item(self): # Removes one item, the unique, created in invoice self.invoice.items[0].remove() re_invoice = self.invoice.save() self.assertEqual(re_invoice.items, None) def test_invoice_remove(self): # wait webservice response time sleep(3) self.invoice.remove() self.assertEqual(self.invoice.id, None) def test_invoice_get_and_save(self): inv = invoices.IuguInvoice.get(self.invoice.id) inv.email = "[email protected]" obj = inv.save() self.assertEqual(obj.email, inv.email) def test_invoice_getitems_and_save(self): sleep(2) # wating...API to persist data inv = None invs = invoices.IuguInvoice.getitems() for i in invs: if i.id == self.invoice.id: inv = i inv.email = "[email protected]" obj = inv.save() self.assertEqual(obj.email, inv.email) def test_invoice_cancel(self): invoice = self.invoice_obj.create(draft=False) re_invoice = invoice.cancel() self.assertEqual(re_invoice.status, "canceled") invoice.remove() #@unittest.skip("Support only invoice paid") # TODO def test_invoice_refund(self): item = merchant.Item("Produto My Test", 1, 10000) token = self.client.create_payment_token('4111111111111111', 'JA', 'Silva', '12', '2010', '123') charge = self.client.create_charge(self.EMAIL_CUSTOMER, item, token=token) invoice = invoices.IuguInvoice.get(charge.invoice_id) re_invoice = invoice.refund() self.assertEqual(re_invoice.status, "refunded") def test_invoice_getitems(self): # wait webservice response time sleep(3) l = invoices.IuguInvoice.getitems() self.assertIsInstance(l, list) self.assertIsInstance(l[0], invoices.IuguInvoice) def test_invoice_getitems_limit(self): invoice_2 = self.invoice_obj.create() sleep(3) l = invoices.IuguInvoice.getitems(limit=2) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # invoice_2.remove() self.assertEqual(len(l), 2) def test_invoice_getitems_skip(self): invoice_1 = self.invoice_obj.create() invoice_2 = self.invoice_obj.create() invoice_3 = self.invoice_obj.create() sleep(3) l1 = invoices.IuguInvoice.getitems(limit=3) keep_checker = l1[2] l2 = invoices.IuguInvoice.getitems(skip=2) skipped = l2[0] # after skip 2 the first must be keep_checker # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # invoice_1.remove() # invoice_2.remove() # invoice_3.remove() self.assertEqual(keep_checker.id, skipped.id) # TODO: def test_invoice_getitems_created_at_from(self): # TODO:def test_invoice_getitems_created_at_to(self): # TODO: def test_invoice_getitems_updated_since(self): def test_invoice_getitems_query(self): res = self.invoice_obj.create(customer_id=self.consumer.id) sleep(3) queryset = invoices.IuguInvoice.getitems(query=res.id) self.assertEqual(queryset[0].customer_id, res.customer_id) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # res.remove() def test_invoice_getitems_customer_id(self): res = self.invoice_obj.create(customer_id=self.consumer.id) sleep(3) queryset = invoices.IuguInvoice.getitems(query=res.id) self.assertEqual(queryset[0].customer_id, res.customer_id) # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # res.remove() @unittest.skip("API no support sort (in moment)") def test_invoice_getitems_sort(self): invoice_1 = self.invoice_obj.create() invoice_2 = self.invoice_obj.create() invoice_3 = self.invoice_obj.create() sleep(3) l1 = invoices.IuguInvoice.getitems(limit=3) keep_checker = l1[2] l2 = invoices.IuguInvoice.getitems(limit=3, sort="id") skipped = l2[0] # after skip 2 the first must be keep_checker # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. # invoice_1.remove() # invoice_2.remove() # invoice_3.remove() self.assertEqual(keep_checker.id, skipped.id) class TestPlans(unittest.TestCase): def setUp(self): hash_md5 = md5() seed = randint(1, 199) variation = randint(4, 8) hash_md5.update(str(seed)) identifier = hash_md5.hexdigest()[:variation] self.identifier = identifier # random because can't be repeated plan = plans.IuguPlan() self.plan = plan.create(name="My SetUp Plan", identifier=self.identifier, interval=1, interval_type="months", currency="BRL", value_cents=1500) # features self.features = plans.Feature() self.features.name = "Add feature %s" % self.identifier self.features.identifier = self.identifier self.features.value = 11 def tearDown(self): self.plan.remove() def test_plan_create(self): plan = plans.IuguPlan() identifier = self.identifier + "salt" new_plan = plan.create(name="My first lib Plan", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1000) self.assertIsInstance(new_plan, plans.IuguPlan) self.assertTrue(new_plan.id) new_plan.remove() def test_plan_create_without_required_fields(self): plan = plans.IuguPlan() self.assertRaises(errors.IuguPlansException, plan.create) def test_plan_create_features(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # init object plan = plans.IuguPlan(name="Plan with features", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1000) plan.features = [self.features,] new_plan_with_features = plan.create() self.assertIsInstance(new_plan_with_features.features[0], plans.Feature) self.assertEqual(new_plan_with_features.features[0].value, self.features.value) new_plan_with_features.remove() def test_plan_get(self): plan_id = self.plan.id plan = plans.IuguPlan.get(plan_id) self.assertEqual(self.identifier, plan.identifier) def test_plan_get_identifier(self): plan = plans.IuguPlan.get_by_identifier(self.identifier) self.assertEqual(self.identifier, plan.identifier) def test_plan_remove(self): plan = plans.IuguPlan() new_plan = plan.create(name="Remove me", identifier="to_remove", interval=1, interval_type="months", currency="BRL", value_cents=2000) removed_id = new_plan.id new_plan.remove() self.assertRaises(errors.IuguGeneralException, plans.IuguPlan.get, removed_id) def test_plan_edit_changes_name_by_set(self): plan_id = self.plan.id new_name = "New name %s" % self.identifier modified_plan = self.plan.set(plan_id, name=new_name) self.assertEqual(new_name, modified_plan.name) def test_plan_edit_changes_identifier_by_set(self): plan_id = self.plan.id new_identifier = "New identifier %s" % self.identifier modified_plan = self.plan.set(plan_id, identifier=new_identifier) self.assertEqual(new_identifier, modified_plan.identifier) def test_plan_edit_changes_interval_by_set(self): plan_id = self.plan.id new_interval = 3 modified_plan = self.plan.set(plan_id, interval=new_interval) self.assertEqual(new_interval, modified_plan.interval) def test_plan_edit_changes_currency_by_set(self): plan_id = self.plan.id new_currency = "US" self.assertRaises(errors.IuguPlansException, self.plan.set, plan_id, currency=new_currency) def test_plan_edit_changes_value_cents_by_set(self): plan_id = self.plan.id value_cents = 3000 modified_plan = self.plan.set(plan_id, value_cents=value_cents) self.assertEqual(value_cents, modified_plan.prices[0].value_cents) def test_plan_edit_changes_features_name_by_set(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features Name" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id changed_features = plan_returned.features changed_features[0].name = "Changed Name of Features" # return plan changed plan_changed = plan.set(plan_returned.id, features=[changed_features[0]]) self.assertEqual(plan_changed.features[0].name, plan_returned.features[0].name) plan_returned.remove() def test_plan_edit_changes_features_identifier_by_set(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features Identifier" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id changed_features = plan_returned.features changed_features[0].identifier = "Crazy_Change" # return plan changed plan_changed = plan.set(plan_returned.id, features=[changed_features[0]]) self.assertEqual(plan_changed.features[0].identifier, plan_returned.features[0].identifier) plan_returned.remove() def test_plan_edit_changes_features_value_by_set(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features Identifier" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id changed_features = plan_returned.features changed_features[0].value = 10000 # return plan changed plan_changed = plan.set(plan_returned.id, features=[changed_features[0]]) self.assertEqual(plan_changed.features[0].value, plan_returned.features[0].value) plan_returned.remove() def test_plan_edit_changes_name_by_save(self): self.plan.name = "New name %s" % self.identifier response = self.plan.save() self.assertEqual(response.name, self.plan.name) def test_plan_edit_changes_identifier_by_save(self): seed = randint(1, 999) self.plan.identifier = "New_identifier_%s_%s" % (self.identifier, seed) response = self.plan.save() self.assertEqual(response.identifier, self.plan.identifier) def test_plan_edit_changes_interval_by_save(self): self.plan.interval = 4 response = self.plan.save() self.assertEqual(response.interval, 4) def test_plan_edit_changes_currency_by_save(self): # API only support BRL self.plan.currency = "US" # response = self.plan.save() self.assertRaises(errors.IuguPlansException, self.plan.save) def test_plan_edit_changes_value_cents_by_save(self): self.plan.value_cents = 4000 response = self.plan.save() self.assertEqual(response.prices[0].value_cents, 4000) # TODO: test prices attribute of plan in level one def test_plan_edit_changes_features_name_by_save(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features by Save" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id to_change_features = plan_returned.features to_change_features[0].name = "Features New by Save" # return plan changed and to save instance plan_returned.features = [to_change_features[0]] plan_changed = plan_returned.save() self.assertEqual(plan_changed.features[0].name, "Features New by Save") plan_returned.remove() def test_plan_edit_changes_features_identifier_by_save(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features by Save" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id to_change_features = plan_returned.features to_change_features[0].identifier = "Crazy_Changed" # return plan changed and to save instance plan_returned.features = [to_change_features[0]] plan_changed = plan_returned.save() self.assertEqual(plan_changed.features[0].identifier, "Crazy_Changed") plan_returned.remove() def test_plan_edit_changes_features_value_by_save(self): salt = randint(1, 99) identifier = self.identifier + str(salt) # creating a plan with features plan = plans.IuguPlan() plan.features = [self.features,] plan.name = "Changes Features by Save" plan.identifier = identifier # workaround: setUp already creates plan.interval = 2 plan.interval_type = "weeks" plan.currency = "BRL" plan.value_cents = 3000 plan_returned = plan.create() # to change features name where features already has an id to_change_features = plan_returned.features to_change_features[0].value = 8000 # return plan changed and to save instance plan_returned.features = [to_change_features[0]] plan_changed = plan_returned.save() self.assertEqual(plan_changed.features[0].value, 8000) plan_returned.remove() def test_plan_getitems_filter_limit(self): # creating a plan with features salt = str(randint(1, 199)) + self.identifier plan = plans.IuguPlan() plan_a = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=1000) salt = str(randint(1, 199)) + self.identifier plan_b = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=2000) salt = str(randint(1, 199)) + self.identifier plan_c = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=3000) all_plans = plans.IuguPlan.getitems(limit=3) self.assertEqual(len(all_plans), 3) plan_a.remove() plan_b.remove() plan_c.remove() def test_plan_getitems_filter_skip(self): # creating a plan with features salt = str(randint(1, 199)) + self.identifier plan = plans.IuguPlan() plan_a = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=1000) salt = str(randint(1, 199)) + self.identifier plan_b = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=2000) salt = str(randint(1, 199)) + self.identifier plan_c = plan.create(name="Get Items...", identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=3000) sleep(2) all_plans_limit = plans.IuguPlan.getitems(limit=3) all_plans_skip = plans.IuguPlan.getitems(skip=2, limit=3) self.assertEqual(all_plans_limit[2].id, all_plans_skip[0].id) plan_a.remove() plan_b.remove() plan_c.remove() def test_plan_getitems_filter_query(self): salt = str(randint(1, 199)) + self.identifier name_repeated = salt plan = plans.IuguPlan() plan_a = plan.create(name=name_repeated, identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=1000) salt = str(randint(1, 199)) + self.identifier plan_b = plan.create(name=name_repeated, identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=2000) salt = str(randint(1, 199)) + self.identifier plan_c = plan.create(name=name_repeated, identifier=salt, interval=2, interval_type="weeks", currency="BRL", value_cents=3000) sleep(3) # waiting API to keep data all_filter_query = plans.IuguPlan.getitems(query=name_repeated) self.assertEqual(all_filter_query[0].name, name_repeated) self.assertEqual(len(all_filter_query), 3) plan_a.remove() plan_b.remove() plan_c.remove() #@unittest.skip("TODO support this test") # TODO: def test_plan_getitems_filter_updated_since(self): #@unittest.skip("Sort not work fine. Waiting support of API providers") #def test_plan_getitems_filter_sort(self): class TestSubscriptions(unittest.TestCase): def clean_invoices(self, recent_invoices): """ Removes invoices created in backgrounds of tests """ # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. (API CHANGED) # #if recent_invoices: # invoice = recent_invoices[0] # invoices.IuguInvoice().remove(invoice_id=invoice["id"]) pass def setUp(self): # preparing object... seed = randint(1, 10000) md5_hash = md5() md5_hash.update(str(seed)) plan_id_random = md5_hash.hexdigest()[:12] plan_name = "Subs Plan %s" % plan_id_random name = "Ze %s" % plan_id_random email = "{name}@example.com".format(name=plan_id_random) # plans for multiple tests self.plan_new = plans.IuguPlan().create(name=plan_name, identifier=plan_id_random, interval=1, interval_type="weeks", currency="BRL", value_cents=9900) plan_identifier = "plan_for_changes_%s" % plan_id_random self.plan_two = plans.IuguPlan().create(name="Plan Two", identifier=plan_identifier, interval=1, interval_type="weeks", currency="BRL", value_cents=8800) # one client self.customer = customers.IuguCustomer().create(name=name, email=email) # for tests to edit subscriptions subs_obj = subscriptions.IuguSubscription() self.subscription = subs_obj.create(customer_id=self.customer.id, plan_identifier=self.plan_two.identifier) def tearDown(self): # Attempt to delete the invoices created by subscriptions cases # But this not remove all invoices due not recognizable behavior # as the API no forever return recents_invoices for created # invoices # The comments below was put because API don't exclude # an invoice that was paid. So only does refund. (API CHANGED) #### if self.subscription.recent_invoices: #### invoice = self.subscription.recent_invoices[0] #### # to instanciate invoice from list of the invoices returned by API #### invoice_obj = invoices.IuguInvoice.get(invoice["id"]) #### # The comments below was put because API don't exclude #### # an invoice that was paid. So only does refund. #### invoice_obj.remove() self.plan_new.remove() self.plan_two.remove() self.subscription.remove() self.customer.remove() def test_subscription_create(self): # Test to create a subscription only client_id and plan_identifier p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.create(self.customer.id, self.plan_new.identifier) self.assertIsInstance(subscription_new, subscriptions.IuguSubscription) self.assertEqual(subscription_new.plan_identifier, self.plan_new.identifier) self.clean_invoices(subscription_new.recent_invoices) subscription_new.remove() def test_subscription_create_with_custom_variables(self): p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.create(self.customer.id, self.plan_new.identifier, custom_variables={'city':'Recife'}) self.assertEqual(subscription_new.custom_variables[0]["name"], "city") self.assertEqual(subscription_new.custom_variables[0]["value"], "Recife") self.clean_invoices(subscription_new.recent_invoices) subscription_new.remove() def test_subscription_set_with_custom_variables(self): p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.set(sid=self.subscription.id, custom_variables={'city':'Recife'}) self.assertEqual(subscription_new.custom_variables[0]["name"], "city") self.assertEqual(subscription_new.custom_variables[0]["value"], "Recife") # self.clean_invoices(subscription_new.recent_invoices) @unittest.skip("API does not support this only_on_charge_success. CHANGED") def test_subscription_create_only_on_charge_success_with_payment(self): # Test to create subscriptions with charge only customer = customers.IuguCustomer().create(name="Pay now", email="[email protected]") pay = customer.payment.create(description="Payment X", number="4111111111111111", verification_value='123', first_name="Romario", last_name="Baixo", month=12, year=2018) p_obj = subscriptions.IuguSubscription() new_subscription = p_obj.create(customer.id, self.plan_new.identifier, only_on_charge_success=True) self.assertEqual(new_subscription.recent_invoices[0]["status"], "paid") self.clean_invoices(new_subscription.recent_invoices) new_subscription.remove() customer.remove() def test_subscription_create_only_on_charge_success_less_payment(self): # Test to create subscriptions with charge only p_obj = subscriptions.IuguSubscription() self.assertRaises(errors.IuguGeneralException, p_obj.create, self.customer.id, self.plan_new.identifier, only_on_charge_success=True) def test_subscription_remove(self): # Test to remove subscription p_obj = subscriptions.IuguSubscription() subscription_new = p_obj.create(self.customer.id, self.plan_new.identifier) sid = subscription_new.id self.clean_invoices(subscription_new.recent_invoices) subscription_new.remove() self.assertRaises(errors.IuguGeneralException, subscriptions.IuguSubscription.get, sid) def test_subscription_get(self): subscription = subscriptions.IuguSubscription.get(self.subscription.id) self.assertIsInstance(subscription, subscriptions.IuguSubscription) def test_subscription_getitems(self): subscription_list = subscriptions.IuguSubscription.getitems() self.assertIsInstance(subscription_list[0], subscriptions.IuguSubscription) def test_subscription_getitem_limit(self): client_subscriptions = subscriptions.IuguSubscription() sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_3 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_4 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sleep(3) # slower API subscriptions_list = subscriptions.IuguSubscription.getitems(limit=1) self.assertEqual(len(subscriptions_list), 1) self.assertEqual(subscriptions_list[0].id, sub_4.id) self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) self.clean_invoices(sub_3.recent_invoices) self.clean_invoices(sub_4.recent_invoices) a, b, c, d = sub_1.remove(), sub_2.remove(), sub_3.remove(), sub_4.remove() def test_subscription_getitem_skip(self): client_subscriptions = subscriptions.IuguSubscription() sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_3 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_4 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sleep(2) subscriptions_list = subscriptions.IuguSubscription.getitems(skip=1) self.assertEqual(subscriptions_list[0].id, sub_3.id) self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) self.clean_invoices(sub_3.recent_invoices) self.clean_invoices(sub_4.recent_invoices) a, b, c, d = sub_1.remove(), sub_2.remove(), sub_3.remove(), sub_4.remove() # TODO: def test_subscription_getitem_created_at_from(self): def test_subscription_getitem_query(self): term = self.customer.name sleep(3) # very slow API! waiting... subscriptions_list = subscriptions.IuguSubscription.getitems(query=term) self.assertGreaterEqual(len(subscriptions_list), 1) # TODO: def test_subscription_getitem_updated_since(self): @unittest.skip("API not support this. No orders is changed") def test_subscription_getitem_sort(self): client_subscriptions = subscriptions.IuguSubscription() sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_3 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_4 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) subscriptions_list = subscriptions.IuguSubscription.getitems(sort="-created_at") #self.assertEqual(subscriptions_list[0].id, sub_3.id) self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) self.clean_invoices(sub_3.recent_invoices) self.clean_invoices(sub_4.recent_invoices) a, b, c, d = sub_1.remove(), sub_2.remove(), sub_3.remove(), sub_4.remove() def test_subscription_getitem_customer_id(self): client_subscriptions = subscriptions.IuguSubscription() # previous subscription was created in setUp sub_1 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sub_2 = client_subscriptions.create(self.customer.id, self.plan_new.identifier) sleep(3) subscriptions_list = subscriptions.IuguSubscription.\ getitems(customer_id=self.customer.id) self.assertEqual(len(subscriptions_list), 3) # sub_1 + sub_2 + setUp self.clean_invoices(sub_1.recent_invoices) self.clean_invoices(sub_2.recent_invoices) a, b = sub_1.remove(), sub_2.remove() def test_subscription_set_plan(self): # Test to change an existent plan in subscription subs = subscriptions.IuguSubscription() subscription = subs.create(self.customer.id, self.plan_new.identifier) sid = subscription.id plan_identifier = self.plan_new.identifier + str("_Newest_ID") # changes to this new plan plan_newest = plans.IuguPlan().create("Plan Name: Newest", plan_identifier, 1, "months", "BRL", 5000) # editing... subscription = subscriptions.IuguSubscription().set(sid, plan_identifier=plan_newest.identifier) self.assertEqual(subscription.plan_identifier, plan_identifier) self.clean_invoices(subscription.recent_invoices) subscription.remove() plan_newest.remove() @unittest.skip("API does not support. It returns error 'Subscription Not Found'") def test_subscription_set_customer_id(self): # Test if customer_id changed. Iugu's support (number 782) customer = customers.IuguCustomer().create(name="Cortella", email="[email protected]") subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, customer_id=customer.id) self.assertEqual(subscription.customer_id, customer.id) customer.remove() def test_subscription_set_expires_at(self): # Test if expires_at was changed subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, expires_at="12/12/2014") self.assertEqual(subscription.expires_at, "2014-12-12") def test_subscription_set_suspended(self): # Test if suspended was changed subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, suspended=True) self.assertEqual(subscription.suspended, True) @unittest.skip("Waiting API developers to support this question") def test_subscription_set_skip_charge(self): # Test if skip_charge was marked print self.subscription.id subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, skip_charge=True) self.assertEqual(subscription.suspended, True) def test_subscription_set_subitems(self): # Test if to insert a new item subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) self.assertEqual(subscription.subitems[0].description, subitem.description) def test_subscription_set_subitems_description(self): # Test if subitem/item descriptions was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.description = "Subitems Edited" subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,] ) self.assertEqual(subscription.subitems[0].description, item_with_id.description) def test_subscription_set_subitems_price_cents(self): # Test if subitem/item price_cents was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.price_cents = 2900 subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,] ) self.assertEqual(subscription.subitems[0].price_cents, item_with_id.price_cents) def test_subscription_set_subitems_quantity(self): # Test if subitem/item quantity was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.quantity = 4 subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,] ) self.assertEqual(subscription.subitems[0].quantity, item_with_id.quantity) def test_subscription_set_subitems_recurrent(self): # Test if subitem/item recurrent was changed subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.recurrent = True subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,]) self.assertEqual(subscription.subitems[0].recurrent, item_with_id.recurrent) def test_subscription_set_subitems_destroy(self): # Test if subitem/item was erased subitem = merchant.Item("Subitems", 1, 2345) subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[subitem,]) item_with_id = subscription.subitems[0] item_with_id.destroy = True subscription = subscriptions.IuguSubscription().\ set(self.subscription.id, subitems=[item_with_id,]) self.assertEqual(subscription.subitems, []) def test_subscription_create_credit_based_with_custom_variables(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=10, custom_variables={'city':"Recife"}) self.assertEqual(subscription.custom_variables[0]['name'], "city") self.assertEqual(subscription.custom_variables[0]['value'], "Recife") self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credit_based_with_custom_variables(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=10) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, custom_variables={'city':"Madrid"}) self.assertEqual(subscription.custom_variables[0]['name'], "city") self.assertEqual(subscription.custom_variables[0]['value'], "Madrid") self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=10) self.assertIsInstance(subscription, subscriptions.SubscriptionCreditsBased) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based_error_price_cents(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased() self.assertRaises(errors.IuguSubscriptionsException, subscription.create, self.customer.id, credits_cycle=2, price_cents=0) def test_subscription_create_credit_based_error_price_cents_empty(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased() self.assertRaises(errors.IuguSubscriptionsException, subscription.create, self.customer.id, credits_cycle=2, price_cents=None) def test_subscription_create_credit_based_price_cents(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) self.assertEqual(subscription.price_cents, 2000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based_credits_cycle(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) self.assertEqual(subscription.credits_cycle, 2) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_create_credit_based_credits_min(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000, credits_min=4000) self.assertEqual(subscription.credits_min, 4000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credit_based_price_cents(self): # Test if price_cents changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=1200) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, price_cents=3249) self.assertEqual(subscription.price_cents, 3249) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credits_cycle(self): # Test if credits_cycle changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=1300) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, credits_cycle=10) self.assertEqual(subscription.credits_cycle, 10) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_set_credits_min(self): # Test if credits_min changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=1400) subscription = subscriptions.SubscriptionCreditsBased().\ set(subscription.id, credits_min=2000) self.assertEqual(subscription.credits_min, 2000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_credit_based_get(self): # Test if credits_min changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) subscription = subscriptions.SubscriptionCreditsBased().\ get(subscription.id) self.assertIsInstance(subscription, subscriptions.SubscriptionCreditsBased) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_credit_based_getitems(self): # Test if credits_min changed subscription = subscriptions.SubscriptionCreditsBased().\ create(self.customer.id, credits_cycle=2, price_cents=2000) sleep(2) subscription_list = subscriptions.SubscriptionCreditsBased().\ getitems() self.assertIsInstance(subscription_list[0], subscriptions.SubscriptionCreditsBased) self.clean_invoices(subscription.recent_invoices) subscription.remove() # Test save method @unittest.skip("This is not support by API. Return not found") def test_subscription_save_customer_id(self): # Iugu's support (number 782) customer = customers.IuguCustomer().create(name="Subs save", email="[email protected]") self.subscription.customer_id = customer.id obj = self.subscription.save() self.assertEqual(customer.id, obj.customer_id) customer.remove() def test_subscription_save_expires_at(self): self.subscription.expires_at = "12/12/2020" obj = self.subscription.save() self.assertEqual(obj.expires_at, "2020-12-12") def test_subscription_save_subitems(self): # Test if to save a new item subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].description, subitem.description) def test_subscription_save_subitems_description(self): # Test if subitem/item descriptions was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.description = "Subitems Edited" self.subscription.subitems = [item_with_id] obj = self.subscription.save() self.assertEqual(obj.subitems[0].description, item_with_id.description) def test_subscription_save_subitems_price_cents(self): # Test if subitem/item price_cents was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.price_cents = 2900 self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].price_cents, item_with_id.price_cents) def test_subscription_save_subitems_quantity(self): # Test if subitem/item quantity was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.quantity = 4 self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].quantity, item_with_id.quantity) def test_subscription_save_subitems_recurrent(self): # Test if subitem/item recurrent was changed subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.recurrent = True self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems[0].recurrent, item_with_id.recurrent) def test_subscription_save_subitems__destroy(self): # Test if subitem/item was erased subitem = merchant.Item("Subitems", 1, 2345) self.subscription.subitems = [subitem,] new_subscription = self.subscription.save() item_with_id = new_subscription.subitems[0] item_with_id.destroy = True self.subscription.subitems = [item_with_id,] obj = self.subscription.save() self.assertEqual(obj.subitems, []) def test_subscription_save_suspended(self): self.subscription.suspended = True obj = self.subscription.save() self.assertEqual(obj.suspended, True) # @unittest.skip("Waiting API developers to support this question") # TODO: def test_subscription_save_skip_charge(self): def test_subscription_save_price_cents(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1000) subscription.price_cents = 8188 obj = subscription.save() self.assertEqual(obj.price_cents, 8188) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_save_credits_cycle(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1000) subscription.credits_cycle = 5 obj = subscription.save() self.assertEqual(obj.credits_cycle, 5) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_save_credits_min(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) subscription.credits_min = 9000 obj = subscription.save() self.assertEqual(obj.credits_min, 9000) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_suspend(self): obj = subscriptions.IuguSubscription().suspend(self.subscription.id) self.assertEqual(obj.suspended, True) @unittest.skip("API not support this activate by REST .../activate") def test_subscription_activate(self): obj = subscriptions.IuguSubscription().suspend(self.subscription.id) self.subscription.suspended = True self.subscription.save() obj = subscriptions.IuguSubscription().activate(self.subscription.id) self.assertEqual(obj.suspended, False) def test_subscription_change_plan(self): seed = randint(1, 999) identifier = "%s_%s" % (self.plan_new.identifier, str(seed)) plan_again_change = plans.IuguPlan().create(name="Change Test", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1111) obj = subscriptions.IuguSubscription().change_plan( plan_again_change.identifier, sid=self.subscription.id) self.assertEqual(obj.plan_identifier, identifier) self.clean_invoices(obj.recent_invoices) plan_again_change.remove() def test_subscription_change_plan_by_instance(self): seed = randint(1, 999) identifier = "%s_%s" % (self.plan_new.identifier, str(seed)) plan_again_change = plans.IuguPlan().create(name="Change Test", identifier=identifier, interval=1, interval_type="months", currency="BRL", value_cents=1112) obj = self.subscription.change_plan(plan_again_change.identifier) self.assertEqual(obj.plan_identifier, identifier) self.clean_invoices(obj.recent_invoices) plan_again_change.remove() def test_subscription_add_credits(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) obj = subscriptions.SubscriptionCreditsBased().add_credits(sid=subscription.id, quantity=20) self.assertEqual(obj.credits, 20) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_add_credits_by_instance(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) obj = subscription.add_credits(sid=subscription.id, quantity=20) self.assertEqual(obj.credits, 20) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_remove_credits(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) subscription.add_credits(quantity=20) obj = subscriptions.SubscriptionCreditsBased().\ remove_credits(sid=subscription.id, quantity=5) self.assertEqual(obj.credits, 15) self.clean_invoices(subscription.recent_invoices) subscription.remove() def test_subscription_remove_credits_by_instance(self): subscription = subscriptions.SubscriptionCreditsBased() subscription = subscription.create(customer_id=self.customer.id, credits_cycle=2, price_cents=1100) subscription.add_credits(quantity=20) sleep(2) obj = subscription.remove_credits(quantity=5) self.assertEqual(obj.credits, 15) self.clean_invoices(subscription.recent_invoices) subscription.remove() class TestTransfer(unittest.TestCase): # TODO: to create this tests pass if __name__ == '__main__': unittest.main()
horacioibrahim/iugu-python
lib/iugu/tests.py
Python
apache-2.0
80,119
""" Copyright (C) 2017 Open Source Robotics Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import math import numpy as np import latlon import ecef class Enu(object): def __init__(self, e, n, u): self.e = e self.n = n self.u = u def __eq__(self, other): return self.e == other.e and self.n == other.n and self.u == other.u def __hash__(self): return hash((self.e, self.n, self.u)) def to_ecef(self, origin): # this doesn't work at the poles because longitude is not uniquely defined there sin_lon = origin._sin_lon() sin_lat = origin._sin_lat() cos_lon = origin._cos_lon() cos_lat = origin._cos_lat() global_to_ecef_matrix = np.array([[-sin_lon, -cos_lon * sin_lat, cos_lon * cos_lat], [cos_lon, - sin_lon * sin_lat, sin_lon * cos_lat], [0, cos_lat, sin_lat]]) enu_vector = np.array([[self.e], [self.n], [self.u]]) ecef_vector = np.dot(global_to_ecef_matrix, enu_vector) return ecef.Ecef(ecef_vector[0][0], ecef_vector[1][0], ecef_vector[2][0])
ekumenlabs/terminus
terminus/geometry/enu.py
Python
apache-2.0
1,657
# # Copyright 2011 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Package contains helpers for gdata library. """
adviti/melange
app/soc/views/helper/gdata_apis/__init__.py
Python
apache-2.0
638
"""Configure number in a device through MQTT topic.""" from __future__ import annotations import functools import logging import voluptuous as vol from homeassistant.components import number from homeassistant.components.number import ( DEFAULT_MAX_VALUE, DEFAULT_MIN_VALUE, DEFAULT_STEP, NumberEntity, ) from homeassistant.const import ( CONF_NAME, CONF_OPTIMISTIC, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE, ) from homeassistant.core import HomeAssistant, callback from homeassistant.helpers import config_validation as cv from homeassistant.helpers.reload import async_setup_reload_service from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.typing import ConfigType from . import PLATFORMS, MqttCommandTemplate, subscription from .. import mqtt from .const import CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC, DOMAIN from .debug_info import log_messages from .mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity, async_setup_entry_helper CONF_COMMAND_TEMPLATE = "command_template" _LOGGER = logging.getLogger(__name__) CONF_MIN = "min" CONF_MAX = "max" CONF_PAYLOAD_RESET = "payload_reset" CONF_STEP = "step" DEFAULT_NAME = "MQTT Number" DEFAULT_OPTIMISTIC = False DEFAULT_PAYLOAD_RESET = "None" MQTT_NUMBER_ATTRIBUTES_BLOCKED = frozenset( { number.ATTR_MAX, number.ATTR_MIN, number.ATTR_STEP, } ) def validate_config(config): """Validate that the configuration is valid, throws if it isn't.""" if config.get(CONF_MIN) >= config.get(CONF_MAX): raise vol.Invalid(f"'{CONF_MAX}' must be > '{CONF_MIN}'") return config _PLATFORM_SCHEMA_BASE = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend( { vol.Optional(CONF_COMMAND_TEMPLATE): cv.template, vol.Optional(CONF_MAX, default=DEFAULT_MAX_VALUE): vol.Coerce(float), vol.Optional(CONF_MIN, default=DEFAULT_MIN_VALUE): vol.Coerce(float), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean, vol.Optional(CONF_PAYLOAD_RESET, default=DEFAULT_PAYLOAD_RESET): cv.string, vol.Optional(CONF_STEP, default=DEFAULT_STEP): vol.All( vol.Coerce(float), vol.Range(min=1e-3) ), vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, }, ).extend(MQTT_ENTITY_COMMON_SCHEMA.schema) PLATFORM_SCHEMA = vol.All( _PLATFORM_SCHEMA_BASE, validate_config, ) DISCOVERY_SCHEMA = vol.All( _PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA), validate_config, ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None ): """Set up MQTT number through configuration.yaml.""" await async_setup_reload_service(hass, DOMAIN, PLATFORMS) await _async_setup_entity(hass, async_add_entities, config) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up MQTT number dynamically through MQTT discovery.""" setup = functools.partial( _async_setup_entity, hass, async_add_entities, config_entry=config_entry ) await async_setup_entry_helper(hass, number.DOMAIN, setup, DISCOVERY_SCHEMA) async def _async_setup_entity( hass, async_add_entities, config, config_entry=None, discovery_data=None ): """Set up the MQTT number.""" async_add_entities([MqttNumber(hass, config, config_entry, discovery_data)]) class MqttNumber(MqttEntity, NumberEntity, RestoreEntity): """representation of an MQTT number.""" _entity_id_format = number.ENTITY_ID_FORMAT _attributes_extra_blocked = MQTT_NUMBER_ATTRIBUTES_BLOCKED def __init__(self, hass, config, config_entry, discovery_data): """Initialize the MQTT Number.""" self._config = config self._optimistic = False self._sub_state = None self._current_number = None NumberEntity.__init__(self) MqttEntity.__init__(self, hass, config, config_entry, discovery_data) @staticmethod def config_schema(): """Return the config schema.""" return DISCOVERY_SCHEMA def _setup_from_config(self, config): """(Re)Setup the entity.""" self._optimistic = config[CONF_OPTIMISTIC] self._templates = { CONF_COMMAND_TEMPLATE: MqttCommandTemplate( config.get(CONF_COMMAND_TEMPLATE), self.hass ).async_render, CONF_VALUE_TEMPLATE: config.get(CONF_VALUE_TEMPLATE), } value_template = self._templates[CONF_VALUE_TEMPLATE] if value_template is None: self._templates[CONF_VALUE_TEMPLATE] = lambda value: value else: value_template.hass = self.hass self._templates[ CONF_VALUE_TEMPLATE ] = value_template.async_render_with_possible_json_value async def _subscribe_topics(self): """(Re)Subscribe to topics.""" @callback @log_messages(self.hass, self.entity_id) def message_received(msg): """Handle new MQTT messages.""" payload = self._templates[CONF_VALUE_TEMPLATE](msg.payload) try: if payload == self._config[CONF_PAYLOAD_RESET]: num_value = None elif payload.isnumeric(): num_value = int(payload) else: num_value = float(payload) except ValueError: _LOGGER.warning("Payload '%s' is not a Number", msg.payload) return if num_value is not None and ( num_value < self.min_value or num_value > self.max_value ): _LOGGER.error( "Invalid value for %s: %s (range %s - %s)", self.entity_id, num_value, self.min_value, self.max_value, ) return self._current_number = num_value self.async_write_ha_state() if self._config.get(CONF_STATE_TOPIC) is None: # Force into optimistic mode. self._optimistic = True else: self._sub_state = await subscription.async_subscribe_topics( self.hass, self._sub_state, { "state_topic": { "topic": self._config.get(CONF_STATE_TOPIC), "msg_callback": message_received, "qos": self._config[CONF_QOS], } }, ) if self._optimistic and (last_state := await self.async_get_last_state()): self._current_number = last_state.state @property def min_value(self) -> float: """Return the minimum value.""" return self._config[CONF_MIN] @property def max_value(self) -> float: """Return the maximum value.""" return self._config[CONF_MAX] @property def step(self) -> float: """Return the increment/decrement step.""" return self._config[CONF_STEP] @property def unit_of_measurement(self) -> str | None: """Return the unit of measurement.""" return self._config.get(CONF_UNIT_OF_MEASUREMENT) @property def value(self): """Return the current value.""" return self._current_number async def async_set_value(self, value: float) -> None: """Update the current value.""" current_number = value if value.is_integer(): current_number = int(value) payload = self._templates[CONF_COMMAND_TEMPLATE](current_number) if self._optimistic: self._current_number = current_number self.async_write_ha_state() await mqtt.async_publish( self.hass, self._config[CONF_COMMAND_TOPIC], payload, self._config[CONF_QOS], self._config[CONF_RETAIN], ) @property def assumed_state(self): """Return true if we do optimistic updates.""" return self._optimistic
home-assistant/home-assistant
homeassistant/components/mqtt/number.py
Python
apache-2.0
8,276
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import uuid import freezegun import pretend import pytest from pyramid.httpexceptions import HTTPMovedPermanently, HTTPSeeOther from warehouse.accounts import views from warehouse.accounts.interfaces import IUserService, TooManyFailedLogins from ...common.db.accounts import UserFactory class TestFailedLoginView: exc = TooManyFailedLogins(resets_in=datetime.timedelta(seconds=600)) request = pretend.stub() resp = views.failed_logins(exc, request) assert resp.status == "429 Too Many Failed Login Attempts" assert resp.detail == ( "There have been too many unsuccessful login attempts. Please try " "again later." ) assert dict(resp.headers).get("Retry-After") == "600" class TestUserProfile: def test_user_redirects_username(self, db_request): user = UserFactory.create() if user.username.upper() != user.username: username = user.username.upper() else: username = user.username.lower() db_request.current_route_path = pretend.call_recorder( lambda username: "/user/the-redirect/" ) db_request.matchdict = {"username": username} result = views.profile(user, db_request) assert isinstance(result, HTTPMovedPermanently) assert result.headers["Location"] == "/user/the-redirect/" assert db_request.current_route_path.calls == [ pretend.call(username=user.username), ] def test_returns_user(self, db_request): user = UserFactory.create() assert views.profile(user, db_request) == { "user": user, "projects": [], } class TestLogin: @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_get_returns_form(self, pyramid_request, next_url): user_service = pretend.stub() pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) form_obj = pretend.stub() form_class = pretend.call_recorder(lambda d, user_service: form_obj) if next_url is not None: pyramid_request.GET["next"] = next_url result = views.login(pyramid_request, _form_class=form_class) assert result == { "form": form_obj, "redirect": {"field": "next", "data": next_url}, } assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert form_class.calls == [ pretend.call(pyramid_request.POST, user_service=user_service), ] @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_post_invalid_returns_form(self, pyramid_request, next_url): user_service = pretend.stub() pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) pyramid_request.method = "POST" if next_url is not None: pyramid_request.POST["next"] = next_url form_obj = pretend.stub(validate=pretend.call_recorder(lambda: False)) form_class = pretend.call_recorder(lambda d, user_service: form_obj) result = views.login(pyramid_request, _form_class=form_class) assert result == { "form": form_obj, "redirect": {"field": "next", "data": next_url}, } assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert form_class.calls == [ pretend.call(pyramid_request.POST, user_service=user_service), ] assert form_obj.validate.calls == [pretend.call()] @pytest.mark.parametrize("with_user", [True, False]) def test_post_validate_redirects(self, monkeypatch, pyramid_request, with_user): remember = pretend.call_recorder( lambda request, user_id: [("foo", "bar")] ) monkeypatch.setattr(views, "remember", remember) new_session = {} user_id = uuid.uuid4() user_service = pretend.stub( find_userid=pretend.call_recorder(lambda username: user_id), update_user=pretend.call_recorder(lambda *a, **kw: None), ) pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) pyramid_request.method = "POST" pyramid_request.session = pretend.stub( items=lambda: [("a", "b"), ("foo", "bar")], update=new_session.update, invalidate=pretend.call_recorder(lambda: None), new_csrf_token=pretend.call_recorder(lambda: None), ) pyramid_request.set_property( lambda r: str(uuid.uuid4()) if with_user else None, name="unauthenticated_userid", ) form_obj = pretend.stub( validate=pretend.call_recorder(lambda: True), username=pretend.stub(data="theuser"), ) form_class = pretend.call_recorder(lambda d, user_service: form_obj) now = datetime.datetime.utcnow() with freezegun.freeze_time(now): result = views.login(pyramid_request, _form_class=form_class) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" assert result.headers["foo"] == "bar" assert form_class.calls == [ pretend.call(pyramid_request.POST, user_service=user_service), ] assert form_obj.validate.calls == [pretend.call()] assert user_service.find_userid.calls == [pretend.call("theuser")] assert user_service.update_user.calls == [ pretend.call(user_id, last_login=now), ] if with_user: assert new_session == {} else: assert new_session == {"a": "b", "foo": "bar"} assert remember.calls == [pretend.call(pyramid_request, str(user_id))] assert pyramid_request.session.invalidate.calls == [pretend.call()] assert pyramid_request.find_service.calls == [ pretend.call(IUserService, context=None), pretend.call(IUserService, context=None), ] assert pyramid_request.session.new_csrf_token.calls == [pretend.call()] @pytest.mark.parametrize( # The set of all possible next URLs. Since this set is infinite, we # test only a finite set of reasonable URLs. ("expected_next_url, observed_next_url"), [ ("/security/", "/security/"), ("http://example.com", "/"), ], ) def test_post_validate_no_redirects(self, pyramid_request, expected_next_url, observed_next_url): user_service = pretend.stub( find_userid=pretend.call_recorder(lambda username: 1), update_user=lambda *a, **k: None, ) pyramid_request.find_service = pretend.call_recorder( lambda iface, context: user_service ) pyramid_request.method = "POST" pyramid_request.POST["next"] = expected_next_url form_obj = pretend.stub( validate=pretend.call_recorder(lambda: True), username=pretend.stub(data="theuser"), ) form_class = pretend.call_recorder(lambda d, user_service: form_obj) result = views.login(pyramid_request, _form_class=form_class) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == observed_next_url class TestLogout: @pytest.mark.parametrize("next_url", [None, "/foo/bar/", "/wat/"]) def test_get_returns_empty(self, pyramid_request, next_url): if next_url is not None: pyramid_request.GET["next"] = next_url assert views.logout(pyramid_request) == \ {"redirect": {"field": "next", "data": next_url}} def test_post_forgets_user(self, monkeypatch, pyramid_request): forget = pretend.call_recorder(lambda request: [("foo", "bar")]) monkeypatch.setattr(views, "forget", forget) pyramid_request.method = "POST" pyramid_request.session = pretend.stub( invalidate=pretend.call_recorder(lambda: None), ) result = views.logout(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" assert result.headers["foo"] == "bar" assert forget.calls == [pretend.call(pyramid_request)] assert pyramid_request.session.invalidate.calls == [pretend.call()] @pytest.mark.parametrize( # The set of all possible next URLs. Since this set is infinite, we # test only a finite set of reasonable URLs. ("expected_next_url, observed_next_url"), [ ("/security/", "/security/"), ("http://example.com", "/"), ], ) def test_post_redirects_user(self, pyramid_request, expected_next_url, observed_next_url): pyramid_request.method = "POST" pyramid_request.POST["next"] = expected_next_url result = views.logout(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == observed_next_url class TestRegister: def test_get(self, pyramid_request): form_inst = pretend.stub() form = pretend.call_recorder(lambda *args, **kwargs: form_inst) pyramid_request.find_service = pretend.call_recorder( lambda *args, **kwargs: pretend.stub( enabled=False, csp_policy=pretend.stub(), merge=lambda _: None, ) ) result = views.register(pyramid_request, _form_class=form) assert result["form"] is form_inst def test_redirect_authenticated_user(self): result = views.register(pretend.stub(authenticated_userid=1)) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" def test_register_redirect(self, pyramid_request): pyramid_request.method = "POST" pyramid_request.find_service = pretend.call_recorder( lambda *args, **kwargs: pretend.stub( csp_policy={}, merge=lambda _: {}, enabled=False, verify_response=pretend.call_recorder(lambda _: None), find_userid=pretend.call_recorder(lambda _: None), find_userid_by_email=pretend.call_recorder(lambda _: None), create_user=pretend.call_recorder( lambda *args, **kwargs: pretend.stub(id=1), ), update_user=lambda *args, **kwargs: None, ) ) pyramid_request.route_path = pretend.call_recorder(lambda name: "/") pyramid_request.POST.update({ "username": "username_value", "password": "MyStr0ng!shP455w0rd", "password_confirm": "MyStr0ng!shP455w0rd", "email": "[email protected]", "full_name": "full_name", }) result = views.register(pyramid_request) assert isinstance(result, HTTPSeeOther) assert result.headers["Location"] == "/" class TestClientSideIncludes: def test_edit_gravatar_csi_returns_user(self, db_request): user = UserFactory.create() assert views.edit_gravatar_csi(user, db_request) == { "user": user, } class TestProfileCallout: def test_profile_callout_returns_user(self): user = pretend.stub() request = pretend.stub() assert views.profile_callout(user, request) == {"user": user}
alex/warehouse
tests/unit/accounts/test_views.py
Python
apache-2.0
12,295
''' Created on Nov 26, 2014 @author: Yury Zhauniarovich <y.zhalnerovich{at}gmail.com> ''' import os, time from interfaces.adb_interface import AdbInterface from bboxcoverage import BBoxCoverage from running_strategies import IntentInvocationStrategy import smtplib import email.utils from email.mime.text import MIMEText APK_DIR_SOURCES = ["", ""] SMTP_SERVER = 'smtp.gmail.com' SMTP_PORT = 587 SENDER = "" PASSWORD = "" TO_EMAIL = "" def sendMessage(subj, email_message): msg = MIMEText(email_message) msg['To'] = email.utils.formataddr(('Recipient', TO_EMAIL)) msg['From'] = email.utils.formataddr(('Author', SENDER)) msg['Subject'] = subj server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT) try: server.set_debuglevel(True) # identify ourselves, prompting server for supported features server.ehlo() # If we can encrypt this session, do it if server.has_extn('STARTTLS'): server.starttls() server.ehlo() # re-identify ourselves over TLS connection server.login(SENDER, PASSWORD) server.sendmail(SENDER, [TO_EMAIL], msg.as_string()) finally: server.quit() def getExecutionDevice(): ''' This method allows a user to select a device that is used to for further analysis. ''' dev_list = AdbInterface.getDeviceSerialsList() devNum = len(dev_list) if devNum <= 0: print "No device has been detected! Connect your device and restart the application!" return if devNum == 1: return dev_list[0] choice = None if devNum > 1: print "Select the device to use for analysis:\n" for i in xrange(0, devNum): print "%d. %s\n" % ((i + 1), dev_list[i]) while not choice: try: choice = int(raw_input()) if choice not in range(1, devNum+1): choice = None print 'Invalid choice! Choose right number!' except ValueError: print 'Invalid Number! Choose right number!' return dev_list[choice-1] def getSubdirs(rootDir): return [os.path.join(rootDir, name) for name in os.listdir(rootDir) if os.path.isdir(os.path.join(rootDir, name))] def getInstrApkInFolder(folder): for f in os.listdir(folder): if f.endswith("_aligned.apk"): filepath = os.path.join(folder, f) return filepath return None def runMainIntentsStrategy(adb, androidManifest, delay=10): automaticTriggeringStrategy = IntentInvocationStrategy(adbDevice=adb, pathToAndroidManifest=androidManifest) automaticTriggeringStrategy.run(delay=delay) #main part adb = AdbInterface() device = getExecutionDevice() if not device: exit(1) adb.setTargetSerial(device) bboxcoverage = BBoxCoverage() for apk_dir_source in APK_DIR_SOURCES: print "\n\nStarting experiment for directory: [%s]" % apk_dir_source result_directories = getSubdirs(apk_dir_source) for directory in result_directories: apk_file = getInstrApkInFolder(directory) if apk_file: print "Starting experiment for apk: [%s]" % apk_file try: bboxcoverage.initAlreadyInstrApkEnv(pathToInstrApk=apk_file, resultsDir=directory) except: print "Exception while initialization!" continue try: bboxcoverage.installApkOnDevice() except: print "Exception while installation apk on device!" bboxcoverage.uninstallPackage() try: bboxcoverage.installApkOnDevice() except: continue package_name = bboxcoverage.getPackageName() params = {} params["strategy"] = "main_intents" params["package_name"] = package_name params["main_activity"] = bboxcoverage.androidManifest.getMainActivity() try: bboxcoverage.startTesting() except: print "Exception while startTesting!" bboxcoverage.uninstallPackage() continue try: runMainIntentsStrategy(adb=adb, androidManifest=bboxcoverage.androidManifestFile, delay=10) except: print "Exception while running strategy!" bboxcoverage.uninstallPackage() continue try: bboxcoverage.stopTesting("main_intents", paramsToWrite=params) except: print "Exception while running strategy!" bboxcoverage.uninstallPackage() continue time.sleep(3) bboxcoverage.uninstallPackage() time.sleep(5) sendMessage("[BBoxTester]", "Experiments done for directory [%s]!" % apk_dir_source)
zyrikby/BBoxTester
BBoxTester/main_intents_strategy.py
Python
apache-2.0
5,189
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implements vlans for vmwareapi. """ from nova import db from nova import exception from nova import flags from nova import log as logging from nova import utils from nova.virt.vmwareapi_conn import VMWareAPISession from nova.virt.vmwareapi import network_utils LOG = logging.getLogger("nova.network.vmwareapi_net") FLAGS = flags.FLAGS flags.DEFINE_string('vlan_interface', 'vmnic0', 'Physical network adapter name in VMware ESX host for ' 'vlan networking') def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): """Create a vlan and bridge unless they already exist.""" # Open vmwareapi session host_ip = FLAGS.vmwareapi_host_ip host_username = FLAGS.vmwareapi_host_username host_password = FLAGS.vmwareapi_host_password if not host_ip or host_username is None or host_password is None: raise Exception(_("Must specify vmwareapi_host_ip," "vmwareapi_host_username " "and vmwareapi_host_password to use" "connection_type=vmwareapi")) session = VMWareAPISession(host_ip, host_username, host_password, FLAGS.vmwareapi_api_retry_count) vlan_interface = FLAGS.vlan_interface # Check if the vlan_interface physical network adapter exists on the host if not network_utils.check_if_vlan_interface_exists(session, vlan_interface): raise exception.NetworkAdapterNotFound(adapter=vlan_interface) # Get the vSwitch associated with the Physical Adapter vswitch_associated = network_utils.get_vswitch_for_vlan_interface( session, vlan_interface) if vswitch_associated is None: raise exception.SwicthNotFoundForNetworkAdapter(adapter=vlan_interface) # Check whether bridge already exists and retrieve the the ref of the # network whose name_label is "bridge" network_ref = network_utils.get_network_with_the_name(session, bridge) if network_ref is None: # Create a port group on the vSwitch associated with the vlan_interface # corresponding physical network adapter on the ESX host network_utils.create_port_group(session, bridge, vswitch_associated, vlan_num) else: # Get the vlan id and vswitch corresponding to the port group pg_vlanid, pg_vswitch = \ network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge) # Check if the vswitch associated is proper if pg_vswitch != vswitch_associated: raise exception.InvalidVLANPortGroup(bridge=bridge, expected=vswitch_associated, actual=pg_vswitch) # Check if the vlan id is proper for the port group if pg_vlanid != vlan_num: raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, pgroup=pg_vlanid)
termie/nova-migration-demo
nova/network/vmwareapi_net.py
Python
apache-2.0
3,772
# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import json import math import re import six import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder import interface from cinder import utils from cinder.volume import driver from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper from cinder.volume.drivers.huawei import huawei_conf from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro from cinder.volume.drivers.huawei import replication from cinder.volume.drivers.huawei import rest_client from cinder.volume.drivers.huawei import smartx from cinder.volume import utils as volume_utils from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) huawei_opts = [ cfg.StrOpt('cinder_huawei_conf_file', default='/etc/cinder/cinder_huawei_conf.xml', help='The configuration file for the Cinder Huawei driver.'), cfg.StrOpt('hypermetro_devices', default=None, help='The remote device hypermetro will use.'), cfg.StrOpt('metro_san_user', default=None, help='The remote metro device san user.'), cfg.StrOpt('metro_san_password', default=None, help='The remote metro device san password.'), cfg.StrOpt('metro_domain_name', default=None, help='The remote metro device domain name.'), cfg.StrOpt('metro_san_address', default=None, help='The remote metro device request url.'), cfg.StrOpt('metro_storage_pools', default=None, help='The remote metro device pool names.'), ] CONF = cfg.CONF CONF.register_opts(huawei_opts) snap_attrs = ('id', 'volume_id', 'volume', 'provider_location') Snapshot = collections.namedtuple('Snapshot', snap_attrs) vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') Volume = collections.namedtuple('Volume', vol_attrs) class HuaweiBaseDriver(driver.VolumeDriver): def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) if not self.configuration: msg = _('Configuration is not found.') raise exception.InvalidInput(reason=msg) self.active_backend_id = kwargs.get('active_backend_id') self.configuration.append_config_values(huawei_opts) self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) self.metro_flag = False self.replica = None def get_local_and_remote_dev_conf(self): self.loc_dev_conf = self.huawei_conf.get_local_device() # Now just support one replication_devices. replica_devs = self.huawei_conf.get_replication_devices() self.replica_dev_conf = replica_devs[0] if replica_devs else {} def get_local_and_remote_client_conf(self): if self.active_backend_id: return self.replica_dev_conf, self.loc_dev_conf else: return self.loc_dev_conf, self.replica_dev_conf def do_setup(self, context): """Instantiate common class and login storage system.""" # Set huawei private configuration into Configuration object. self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() client_conf, replica_client_conf = ( self.get_local_and_remote_client_conf()) # init local client if not client_conf: msg = _('Get active client failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self.client = rest_client.RestClient(self.configuration, **client_conf) self.client.login() # init remote client metro_san_address = self.configuration.safe_get("metro_san_address") metro_san_user = self.configuration.safe_get("metro_san_user") metro_san_password = self.configuration.safe_get("metro_san_password") if metro_san_address and metro_san_user and metro_san_password: metro_san_address = metro_san_address.split(";") self.rmt_client = rest_client.RestClient(self.configuration, metro_san_address, metro_san_user, metro_san_password) self.rmt_client.login() self.metro_flag = True else: self.metro_flag = False LOG.warning(_LW("Remote device not configured in cinder.conf")) # init replication manager if replica_client_conf: self.replica_client = rest_client.RestClient(self.configuration, **replica_client_conf) self.replica_client.try_login() self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) def check_for_setup_error(self): pass def get_volume_stats(self, refresh=False): """Get volume status and reload huawei config file.""" self.huawei_conf.update_config_value() stats = self.client.update_volume_stats() stats = self.update_hypermetro_capability(stats) if self.replica: stats = self.replica.update_replica_capability(stats) targets = [self.replica_dev_conf['backend_id']] stats['replication_targets'] = targets stats['replication_enabled'] = True return stats def update_hypermetro_capability(self, stats): if self.metro_flag: version = self.client.find_array_version() rmt_version = self.rmt_client.find_array_version() if (version >= constants.ARRAY_VERSION and rmt_version >= constants.ARRAY_VERSION): for pool in stats['pools']: pool['hypermetro'] = True pool['consistencygroup_support'] = True return stats def _get_volume_type(self, volume): volume_type = None type_id = volume.volume_type_id if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) return volume_type def _get_volume_params(self, volume_type): """Return the parameters for creating the volume.""" specs = {} if volume_type: specs = dict(volume_type).get('extra_specs') opts = self._get_volume_params_from_specs(specs) return opts def _get_consistencygroup_type(self, group): specs = {} opts = {} type_id = group.volume_type_id.split(",") if type_id[0] and len(type_id) == 2: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id[0]) specs = dict(volume_type).get('extra_specs') opts = self._get_volume_params_from_specs(specs) return opts def _get_volume_params_from_specs(self, specs): """Return the volume parameters from extra specs.""" opts_capability = { 'smarttier': False, 'smartcache': False, 'smartpartition': False, 'thin_provisioning_support': False, 'thick_provisioning_support': False, 'hypermetro': False, 'replication_enabled': False, 'replication_type': 'async', } opts_value = { 'policy': None, 'partitionname': None, 'cachename': None, } opts_associate = { 'smarttier': 'policy', 'smartcache': 'cachename', 'smartpartition': 'partitionname', } opts = self._get_opts_from_specs(opts_capability, opts_value, opts_associate, specs) opts = smartx.SmartX().get_smartx_specs_opts(opts) opts = replication.get_replication_opts(opts) LOG.debug('volume opts %(opts)s.', {'opts': opts}) return opts def _get_opts_from_specs(self, opts_capability, opts_value, opts_associate, specs): """Get the well defined extra specs.""" opts = {} opts.update(opts_capability) opts.update(opts_value) for key, value in specs.items(): # Get the scope, if is using scope format. scope = None key_split = key.split(':') if len(key_split) > 2 and key_split[0] != "capabilities": continue if len(key_split) == 1: key = key_split[0].lower() else: scope = key_split[0].lower() key = key_split[1].lower() if ((not scope or scope == 'capabilities') and key in opts_capability): words = value.split() if words and len(words) == 2 and words[0] in ('<is>', '<in>'): opts[key] = words[1].lower() elif key == 'replication_type': LOG.error(_LE("Extra specs must be specified as " "replication_type='<in> sync' or " "'<in> async'.")) else: LOG.error(_LE("Extra specs must be specified as " "capabilities:%s='<is> True'."), key) if ((scope in opts_capability) and (key in opts_value) and (scope in opts_associate) and (opts_associate[scope] == key)): opts[key] = value return opts def _get_lun_params(self, volume, opts): pool_name = volume_utils.extract_host(volume.host, level='pool') params = { 'TYPE': '11', 'NAME': huawei_utils.encode_name(volume.id), 'PARENTTYPE': '216', 'PARENTID': self.client.get_pool_id(pool_name), 'DESCRIPTION': volume.name, 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), 'CAPACITY': huawei_utils.get_volume_size(volume), 'WRITEPOLICY': self.configuration.lun_write_type, 'MIRRORPOLICY': self.configuration.lun_mirror_switch, 'PREFETCHPOLICY': self.configuration.lun_prefetch_type, 'PREFETCHVALUE': self.configuration.lun_prefetch_value, 'DATATRANSFERPOLICY': opts.get('policy', self.configuration.lun_policy), 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } LOG.info(_LI('volume: %(volume)s, lun params: %(params)s.'), {'volume': volume.id, 'params': params}) return params def _create_volume(self, volume, lun_params): # Create LUN on the array. model_update = {} lun_info = self.client.create_lun(lun_params) model_update['provider_location'] = lun_info['ID'] admin_metadata = huawei_utils.get_admin_metadata(volume) admin_metadata.update({'huawei_lun_wwn': lun_info['WWN']}) model_update['admin_metadata'] = admin_metadata metadata = huawei_utils.get_volume_metadata(volume) model_update['metadata'] = metadata return lun_info, model_update def _create_base_type_volume(self, opts, volume, volume_type): """Create volume and add some base type. Base type is the services won't conflict with the other service. """ lun_params = self._get_lun_params(volume, opts) lun_info, model_update = self._create_volume(volume, lun_params) lun_id = lun_info['ID'] try: qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) if qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(qos, lun_id) smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts, lun_id) smartcache = smartx.SmartCache(self.client) smartcache.add(opts, lun_id) except Exception as err: self._delete_lun_with_check(lun_id) msg = _('Create volume error. Because %s.') % six.text_type(err) raise exception.VolumeBackendAPIException(data=msg) return lun_params, lun_info, model_update def _add_extend_type_to_volume(self, opts, lun_params, lun_info, model_update): """Add the extend type. Extend type is the services may conflict with LUNCopy. So add it after the those services. """ lun_id = lun_info['ID'] if opts.get('hypermetro') == 'true': metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro_info = metro.create_hypermetro(lun_id, lun_params) model_update['metadata'].update(metro_info) except exception.VolumeBackendAPIException as err: LOG.error(_LE('Create hypermetro error: %s.'), err) self._delete_lun_with_check(lun_id) raise if opts.get('replication_enabled') == 'true': replica_model = opts.get('replication_type') try: replica_info = self.replica.create_replica(lun_info, replica_model) model_update.update(replica_info) except Exception as err: LOG.exception(_LE('Create replication volume error.')) self._delete_lun_with_check(lun_id) raise return model_update def create_volume(self, volume): """Create a volume.""" volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if (opts.get('hypermetro') == 'true' and opts.get('replication_enabled') == 'true'): err_msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) lun_params, lun_info, model_update = ( self._create_base_type_volume(opts, volume, volume_type)) model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) return model_update def _delete_volume(self, volume): lun_id = volume.provider_location if not lun_id: return lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) if lun_group_ids and len(lun_group_ids) == 1: self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) self.client.delete_lun(lun_id) def delete_volume(self, volume): """Delete a volume. Three steps: Firstly, remove associate from lungroup. Secondly, remove associate from QoS policy. Thirdly, remove the lun. """ lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) if not lun_id: return qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) metadata = huawei_utils.get_volume_metadata(volume) if 'hypermetro_id' in metadata: metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro.delete_hypermetro(volume) except exception.VolumeBackendAPIException as err: LOG.error(_LE('Delete hypermetro error: %s.'), err) # We have checked the LUN WWN above, # no need to check again here. self._delete_volume(volume) raise # Delete a replication volume replica_data = volume.replication_driver_data if replica_data: try: self.replica.delete_replica(volume) except exception.VolumeBackendAPIException as err: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Delete replication error.")) self._delete_volume(volume) self._delete_volume(volume) def _delete_lun_with_check(self, lun_id, lun_wwn=None): if not lun_id: return if self.client.check_lun_exist(lun_id, lun_wwn): qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) self.client.delete_lun(lun_id) def _is_lun_migration_complete(self, src_id, dst_id): result = self.client.get_lun_migration_task() found_migration_task = False if 'data' not in result: return False for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): found_migration_task = True if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: return True if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: msg = _("Lun migration error.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not found_migration_task: err_msg = _("Cannot find migration task.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) return False def _is_lun_migration_exist(self, src_id, dst_id): try: result = self.client.get_lun_migration_task() except Exception: LOG.error(_LE("Get LUN migration error.")) return False if 'data' in result: for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): return True return False def _migrate_lun(self, src_id, dst_id): try: self.client.create_lun_migration(src_id, dst_id) def _is_lun_migration_complete(): return self._is_lun_migration_complete(src_id, dst_id) wait_interval = constants.MIGRATION_WAIT_INTERVAL huawei_utils.wait_for_condition(_is_lun_migration_complete, wait_interval, self.configuration.lun_timeout) # Clean up if migration failed. except Exception as ex: raise exception.VolumeBackendAPIException(data=ex) finally: if self._is_lun_migration_exist(src_id, dst_id): self.client.delete_lun_migration(src_id, dst_id) self._delete_lun_with_check(dst_id) LOG.debug("Migrate lun %s successfully.", src_id) return True def _wait_volume_ready(self, lun_id): wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) def _get_original_status(self, volume): return 'in-use' if volume.volume_attachment else 'available' def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status=None): original_name = huawei_utils.encode_name(volume.id) current_name = huawei_utils.encode_name(new_volume.id) lun_id = self.client.get_lun_id_by_name(current_name) try: self.client.rename_lun(lun_id, original_name) except exception.VolumeBackendAPIException: LOG.error(_LE('Unable to rename lun %s on array.'), current_name) return {'_name_id': new_volume.name_id} LOG.debug("Rename lun from %(current_name)s to %(original_name)s " "successfully.", {'current_name': current_name, 'original_name': original_name}) model_update = {'_name_id': None} return model_update def migrate_volume(self, ctxt, volume, host, new_type=None): """Migrate a volume within the same array.""" self._check_volume_exist_on_array(volume, constants.VOLUME_NOT_EXISTS_RAISE) # NOTE(jlc): Replication volume can't migrate. But retype # can remove replication relationship first then do migrate. # So don't add this judgement into _check_migration_valid(). volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': return (False, None) return self._migrate_volume(volume, host, new_type) def _check_migration_valid(self, host, volume): if 'pool_name' not in host['capabilities']: return False target_device = host['capabilities']['location_info'] # Source and destination should be on same array. if target_device != self.client.device_id: return False # Same protocol should be used if volume is in-use. protocol = self.configuration.san_protocol if (host['capabilities']['storage_protocol'] != protocol and self._get_original_status(volume) == 'in-use'): return False pool_name = host['capabilities']['pool_name'] if len(pool_name) == 0: return False return True def _migrate_volume(self, volume, host, new_type=None): if not self._check_migration_valid(host, volume): return (False, None) type_id = volume.volume_type_id volume_type = None if type_id: volume_type = volume_types.get_volume_type(None, type_id) pool_name = host['capabilities']['pool_name'] pools = self.client.get_all_pools() pool_info = self.client.get_pool_info(pool_name, pools) src_volume_name = huawei_utils.encode_name(volume.id) dst_volume_name = six.text_type(hash(src_volume_name)) src_id = volume.provider_location opts = None qos = None if new_type: # If new type exists, use new type. new_specs = new_type['extra_specs'] opts = self._get_volume_params_from_specs(new_specs) if 'LUNType' not in opts: opts['LUNType'] = self.configuration.lun_type qos = smartx.SmartQos.get_qos_by_volume_type(new_type) elif volume_type: qos = smartx.SmartQos.get_qos_by_volume_type(volume_type) if not opts: opts = self._get_volume_params(volume_type) lun_info = self.client.get_lun_info(src_id) policy = lun_info['DATATRANSFERPOLICY'] if opts['policy']: policy = opts['policy'] lun_params = { 'NAME': dst_volume_name, 'PARENTID': pool_info['ID'], 'DESCRIPTION': lun_info['DESCRIPTION'], 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), 'CAPACITY': lun_info['CAPACITY'], 'WRITEPOLICY': lun_info['WRITEPOLICY'], 'MIRRORPOLICY': lun_info['MIRRORPOLICY'], 'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'], 'PREFETCHVALUE': lun_info['PREFETCHVALUE'], 'DATATRANSFERPOLICY': policy, 'READCACHEPOLICY': lun_info['READCACHEPOLICY'], 'WRITECACHEPOLICY': lun_info['WRITECACHEPOLICY'], 'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], } lun_info = self.client.create_lun(lun_params) lun_id = lun_info['ID'] if qos: LOG.info(_LI('QoS: %s.'), qos) SmartQos = smartx.SmartQos(self.client) SmartQos.add(qos, lun_id) if opts: smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts, lun_id) smartcache = smartx.SmartCache(self.client) smartcache.add(opts, lun_id) dst_id = lun_info['ID'] self._wait_volume_ready(dst_id) moved = self._migrate_lun(src_id, dst_id) return moved, {} def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. We use LUNcopy to copy a new volume from snapshot. The time needed increases as volume size does. """ volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if (opts.get('hypermetro') == 'true' and opts.get('replication_enabled') == 'true'): err_msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) snapshotname = huawei_utils.encode_name(snapshot.id) snapshot_id = snapshot.provider_location if snapshot_id is None: snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) if snapshot_id is None: err_msg = (_( 'create_volume_from_snapshot: Snapshot %(name)s ' 'does not exist.') % {'name': snapshotname}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) lun_params, lun_info, model_update = ( self._create_base_type_volume(opts, volume, volume_type)) tgt_lun_id = model_update['provider_location'] luncopy_name = huawei_utils.encode_name(volume.id) LOG.info(_LI( 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'), {'src_lun_id': snapshot_id, 'tgt_lun_id': tgt_lun_id, 'copy_name': luncopy_name}) wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(tgt_lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) self._copy_volume(volume, luncopy_name, snapshot_id, tgt_lun_id) # NOTE(jlc): Actually, we just only support replication here right # now, not hypermetro. model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) return model_update def create_cloned_volume(self, volume, src_vref): """Clone a new volume from an existing volume.""" self._check_volume_exist_on_array(src_vref, constants.VOLUME_NOT_EXISTS_RAISE) # Form the snapshot structure. snapshot = Snapshot(id=uuid.uuid4().__str__(), volume_id=src_vref.id, volume=src_vref, provider_location=None) # Create snapshot. self.create_snapshot(snapshot) try: # Create volume from snapshot. model_update = self.create_volume_from_snapshot(volume, snapshot) finally: try: # Delete snapshot. self.delete_snapshot(snapshot) except exception.VolumeBackendAPIException: LOG.warning(_LW( 'Failure deleting the snapshot %(snapshot_id)s ' 'of volume %(volume_id)s.'), {'snapshot_id': snapshot.id, 'volume_id': src_vref.id},) return model_update def _check_volume_exist_on_array(self, volume, action): """Check whether the volume exists on the array. If the volume exists on the array, return the LUN ID. If not exists, raise or log warning. """ # Firstly, try to find LUN ID by volume.provider_location. lun_id = volume.provider_location # If LUN ID not recorded, find LUN ID by LUN NAME. if not lun_id: volume_name = huawei_utils.encode_name(volume.id) lun_id = self.client.get_lun_id_by_name(volume_name) if not lun_id: msg = (_("Volume %s does not exist on the array.") % volume.id) if action == constants.VOLUME_NOT_EXISTS_WARN: LOG.warning(msg) if action == constants.VOLUME_NOT_EXISTS_RAISE: raise exception.VolumeBackendAPIException(data=msg) return metadata = huawei_utils.get_admin_metadata(volume) lun_wwn = metadata.get('huawei_lun_wwn') if metadata else None if not lun_wwn: LOG.debug("No LUN WWN recorded for volume %s.", volume.id) if not self.client.check_lun_exist(lun_id, lun_wwn): msg = (_("Volume %s does not exist on the array.") % volume.id) if action == constants.VOLUME_NOT_EXISTS_WARN: LOG.warning(msg) if action == constants.VOLUME_NOT_EXISTS_RAISE: raise exception.VolumeBackendAPIException(data=msg) return return lun_id def extend_volume(self, volume, new_size): """Extend a volume.""" lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) volume_type = self._get_volume_type(volume) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': msg = (_("Can't extend replication volume, volume: %(id)s") % {"id": volume.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun_info = self.client.get_lun_info(lun_id) old_size = int(lun_info.get('CAPACITY')) new_size = int(new_size) * units.Gi / 512 if new_size == old_size: LOG.info(_LI("New size is equal to the real size from backend" " storage, no need to extend." " realsize: %(oldsize)s, newsize: %(newsize)s."), {'oldsize': old_size, 'newsize': new_size}) return if new_size < old_size: msg = (_("New size should be bigger than the real size from " "backend storage." " realsize: %(oldsize)s, newsize: %(newsize)s."), {'oldsize': old_size, 'newsize': new_size}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(volume.id) LOG.info(_LI( 'Extend volume: %(volumename)s, ' 'oldsize: %(oldsize)s, newsize: %(newsize)s.'), {'volumename': volume_name, 'oldsize': old_size, 'newsize': new_size}) self.client.extend_lun(lun_id, new_size) def create_snapshot(self, snapshot): volume = snapshot.volume if not volume: msg = (_("Can't get volume id from snapshot, snapshot: %(id)s") % {"id": snapshot.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(snapshot.volume_id) lun_id = self.client.get_lun_id(volume, volume_name) snapshot_name = huawei_utils.encode_name(snapshot.id) snapshot_description = snapshot.id snapshot_info = self.client.create_snapshot(lun_id, snapshot_name, snapshot_description) snapshot_id = snapshot_info['ID'] self.client.activate_snapshot(snapshot_id) return {'provider_location': snapshot_info['ID'], 'lun_info': snapshot_info} def delete_snapshot(self, snapshot): snapshotname = huawei_utils.encode_name(snapshot.id) volume_name = huawei_utils.encode_name(snapshot.volume_id) LOG.info(_LI( 'stop_snapshot: snapshot name: %(snapshot)s, ' 'volume name: %(volume)s.'), {'snapshot': snapshotname, 'volume': volume_name},) snapshot_id = snapshot.provider_location if snapshot_id is None: snapshot_id = self.client.get_snapshot_id_by_name(snapshotname) if snapshot_id and self.client.check_snapshot_exist(snapshot_id): self.client.stop_snapshot(snapshot_id) self.client.delete_snapshot(snapshot_id) else: LOG.warning(_LW("Can't find snapshot on the array.")) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " "diff=%(diff)s, host=%(host)s.", {'id': volume.id, 'new_type': new_type, 'diff': diff, 'host': host}) self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) # Check what changes are needed migration, change_opts, lun_id = self.determine_changes_when_retype( volume, new_type, host) model_update = {} replica_enabled_change = change_opts.get('replication_enabled') replica_type_change = change_opts.get('replication_type') if replica_enabled_change and replica_enabled_change[0] == 'true': try: self.replica.delete_replica(volume) model_update.update({'replication_status': 'disabled', 'replication_driver_data': None}) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error. ' 'Delete replication failed.')) return False try: if migration: LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " "change %(change_opts)s.", {"lun_id": lun_id, "change_opts": change_opts}) if not self._migrate_volume(volume, host, new_type): LOG.warning(_LW("Storage-assisted migration failed during " "retype.")) return False else: # Modify lun to change policy self.modify_lun(lun_id, change_opts) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error.')) return False if replica_enabled_change and replica_enabled_change[1] == 'true': try: # If replica_enabled_change is not None, the # replica_type_change won't be None. See function # determine_changes_when_retype. lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, replica_type_change[1]) model_update.update(replica_info) except exception.VolumeBackendAPIException: LOG.exception(_LE('Retype volume error. ' 'Create replication failed.')) return False return (True, model_update) def modify_lun(self, lun_id, change_opts): if change_opts.get('partitionid'): old, new = change_opts['partitionid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_partition(lun_id, old_id) if new_id: self.client.add_lun_to_partition(lun_id, new_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartpartition from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) success."), {"lun_id": lun_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) if change_opts.get('cacheid'): old, new = change_opts['cacheid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_cache(lun_id, old_id) if new_id: self.client.add_lun_to_cache(lun_id, new_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartcache from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) successfully."), {'lun_id': lun_id, 'old_id': old_id, "old_name": old_name, 'new_id': new_id, "new_name": new_name}) if change_opts.get('policy'): old_policy, new_policy = change_opts['policy'] self.client.change_lun_smarttier(lun_id, new_policy) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smarttier policy from " "%(old_policy)s to %(new_policy)s success."), {'lun_id': lun_id, 'old_policy': old_policy, 'new_policy': new_policy}) if change_opts.get('qos'): old_qos, new_qos = change_opts['qos'] old_qos_id = old_qos[0] old_qos_value = old_qos[1] if old_qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(old_qos_id, lun_id) if new_qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(new_qos, lun_id) LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartqos from " "%(old_qos_value)s to %(new_qos)s success."), {'lun_id': lun_id, 'old_qos_value': old_qos_value, 'new_qos': new_qos}) def get_lun_specs(self, lun_id): lun_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'LUNType': None, } lun_info = self.client.get_lun_info(lun_id) lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) if lun_info.get('DATATRANSFERPOLICY'): lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] if lun_info.get('SMARTCACHEPARTITIONID'): lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] if lun_info.get('CACHEPARTITIONID'): lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] return lun_opts def _check_needed_changes(self, lun_id, old_opts, new_opts, change_opts, new_type): new_cache_id = None new_cache_name = new_opts['cachename'] if new_cache_name: new_cache_id = self.client.get_cache_id_by_name(new_cache_name) if new_cache_id is None: msg = (_( "Can't find cache name on the array, cache name is: " "%(name)s.") % {'name': new_cache_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) new_partition_id = None new_partition_name = new_opts['partitionname'] if new_partition_name: new_partition_id = self.client.get_partition_id_by_name( new_partition_name) if new_partition_id is None: msg = (_( "Can't find partition name on the array, partition name " "is: %(name)s.") % {'name': new_partition_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # smarttier if old_opts['policy'] != new_opts['policy']: change_opts['policy'] = (old_opts['policy'], new_opts['policy']) # smartcache old_cache_id = old_opts['cacheid'] if old_cache_id != new_cache_id: old_cache_name = None if old_cache_id: cache_info = self.client.get_cache_info_by_id(old_cache_id) old_cache_name = cache_info['NAME'] change_opts['cacheid'] = ([old_cache_id, old_cache_name], [new_cache_id, new_cache_name]) # smartpartition old_partition_id = old_opts['partitionid'] if old_partition_id != new_partition_id: old_partition_name = None if old_partition_id: partition_info = self.client.get_partition_info_by_id( old_partition_id) old_partition_name = partition_info['NAME'] change_opts['partitionid'] = ([old_partition_id, old_partition_name], [new_partition_id, new_partition_name]) # smartqos new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type) old_qos_id = self.client.get_qosid_by_lunid(lun_id) old_qos = self._get_qos_specs_from_array(old_qos_id) if old_qos != new_qos: change_opts['qos'] = ([old_qos_id, old_qos], new_qos) return change_opts def determine_changes_when_retype(self, volume, new_type, host): migration = False change_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None, 'host': None, 'LUNType': None, 'replication_enabled': None, 'replication_type': None, } lun_id = volume.provider_location old_opts = self.get_lun_specs(lun_id) new_specs = new_type['extra_specs'] new_opts = self._get_volume_params_from_specs(new_specs) if 'LUNType' not in new_opts: new_opts['LUNType'] = self.configuration.lun_type if volume.host != host['host']: migration = True change_opts['host'] = (volume.host, host['host']) if old_opts['LUNType'] != new_opts['LUNType']: migration = True change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) volume_type = self._get_volume_type(volume) volume_opts = self._get_volume_params(volume_type) if (volume_opts['replication_enabled'] == 'true' or new_opts['replication_enabled'] == 'true'): # If replication_enabled changes, # then replication_type in change_opts will be set. change_opts['replication_enabled'] = ( volume_opts['replication_enabled'], new_opts['replication_enabled']) change_opts['replication_type'] = (volume_opts['replication_type'], new_opts['replication_type']) change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, change_opts, new_type) LOG.debug("Determine changes when retype. Migration: " "%(migration)s, change_opts: %(change_opts)s.", {'migration': migration, 'change_opts': change_opts}) return migration, change_opts, lun_id def _get_qos_specs_from_array(self, qos_id): qos = {} qos_info = {} if qos_id: qos_info = self.client.get_qos_info(qos_id) for key, value in qos_info.items(): key = key.upper() if key in constants.QOS_KEYS: if key == 'LATENCY' and value == '0': continue else: qos[key] = value return qos def create_export(self, context, volume, connector): """Export a volume.""" pass def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def create_export_snapshot(self, context, snapshot, connector): """Export a snapshot.""" pass def remove_export_snapshot(self, context, snapshot): """Remove an export for a snapshot.""" pass def backup_use_temp_snapshot(self): # This config option has a default to be False, So just return it. return self.configuration.safe_get("backup_use_temp_snapshot") def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): luncopy_id = self.client.create_luncopy(copy_name, src_lun, tgt_lun) wait_interval = self.configuration.lun_copy_wait_interval try: self.client.start_luncopy(luncopy_id) def _luncopy_complete(): luncopy_info = self.client.get_luncopy_info(luncopy_id) if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: # luncopy_info['status'] means for the running status of # the luncopy. If luncopy_info['status'] is equal to '40', # this luncopy is completely ready. return True elif luncopy_info['state'] != constants.STATUS_HEALTH: # luncopy_info['state'] means for the healthy status of the # luncopy. If luncopy_info['state'] is not equal to '1', # this means that an error occurred during the LUNcopy # operation and we should abort it. err_msg = (_( 'An error occurred during the LUNcopy operation. ' 'LUNcopy name: %(luncopyname)s. ' 'LUNcopy status: %(luncopystatus)s. ' 'LUNcopy state: %(luncopystate)s.') % {'luncopyname': luncopy_id, 'luncopystatus': luncopy_info['status'], 'luncopystate': luncopy_info['state']},) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) huawei_utils.wait_for_condition(_luncopy_complete, wait_interval, self.configuration.lun_timeout) except Exception: with excutils.save_and_reraise_exception(): self.client.delete_luncopy(luncopy_id) self.delete_volume(volume) self.client.delete_luncopy(luncopy_id) def _check_lun_valid_for_manage(self, lun_info, external_ref): lun_id = lun_info.get('ID') # Check whether the LUN is already in LUN group. if lun_info.get('ISADD2LUNGROUP') == 'true': msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " "group.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN is Normal. if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import LUN %s to Cinder. LUN status is not " "normal.") % lun_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a HyperMetroPair. try: hypermetro_pairs = self.client.get_hypermetro_pairs() except exception.VolumeBackendAPIException: hypermetro_pairs = [] LOG.debug("Can't get hypermetro info, pass the check.") for pair in hypermetro_pairs: if pair.get('LOCALOBJID') == lun_id: msg = (_("Can't import LUN %s to Cinder. Already exists in a " "HyperMetroPair.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a SplitMirror. try: split_mirrors = self.client.get_split_mirrors() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has SplitMirror with it, # just pass the check and log it. split_mirrors = [] LOG.warning(_LW('No license for SplitMirror.')) else: msg = _("Failed to get SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) for mirror in split_mirrors: try: target_luns = self.client.get_target_luns(mirror.get('ID')) except exception.VolumeBackendAPIException: msg = _("Failed to get target LUN of SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) if (mirror.get('PRILUNID') == lun_id) or (lun_id in target_luns): msg = (_("Can't import LUN %s to Cinder. Already exists in a " "SplitMirror.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a migration task. try: migration_tasks = self.client.get_migration_task() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has migration task with it, # just pass the check and log it. migration_tasks = [] LOG.warning(_LW('No license for migration.')) else: msg = _("Failed to get migration task.") raise exception.VolumeBackendAPIException(data=msg) for migration in migration_tasks: if lun_id in (migration.get('PARENTID'), migration.get('TARGETLUNID')): msg = (_("Can't import LUN %s to Cinder. Already exists in a " "migration task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN copy task. lun_copy = lun_info.get('LUNCOPYIDS') if lun_copy and lun_copy[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN copy task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a remote replication task. rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') if rmt_replication and rmt_replication[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a remote replication task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN mirror. if self.client.is_lun_in_mirror(lun_id): msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN mirror.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def manage_existing(self, volume, external_ref): """Manage an existing volume on the backend storage.""" # Check whether the LUN is belonged to the specified pool. pool = volume_utils.extract_host(volume.host, 'pool') LOG.debug("Pool specified is: %s.", pool) lun_info = self._get_lun_info_by_ref(external_ref) lun_id = lun_info.get('ID') description = lun_info.get('DESCRIPTION', '') if len(description) <= ( constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1): description = volume.name + ' ' + description lun_pool = lun_info.get('PARENTNAME') LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", {"lun": lun_id, "pool": lun_pool}) if pool != lun_pool: msg = (_("The specified LUN does not belong to the given " "pool: %s.") % pool) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check other stuffs to determine whether this LUN can be imported. self._check_lun_valid_for_manage(lun_info, external_ref) type_id = volume.volume_type_id new_opts = None if type_id: # Handle volume type if specified. old_opts = self.get_lun_specs(lun_id) volume_type = volume_types.get_volume_type(None, type_id) new_specs = volume_type.get('extra_specs') new_opts = self._get_volume_params_from_specs(new_specs) if ('LUNType' in new_opts and old_opts['LUNType'] != new_opts['LUNType']): msg = (_("Can't import LUN %(lun_id)s to Cinder. " "LUN type mismatched.") % lun_id) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if volume_type: change_opts = {'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None} change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, change_opts, volume_type) self.modify_lun(lun_id, change_opts) # Rename the LUN to make it manageable for Cinder. new_name = huawei_utils.encode_name(volume.id) LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", {'old_name': lun_info.get('NAME'), 'new_name': new_name}) self.client.rename_lun(lun_id, new_name, description) metadata = huawei_utils.get_admin_metadata(volume) metadata.update({'huawei_lun_wwn': lun_info['WWN']}) model_update = {} model_update.update({'admin_metadata': metadata}) model_update.update({'provider_location': lun_id}) if new_opts and new_opts.get('replication_enabled'): LOG.debug("Manage volume need to create replication.") try: lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, new_opts.get('replication_type')) model_update.update(replica_info) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Manage exist volume failed.")) return model_update def _get_lun_info_by_ref(self, external_ref): LOG.debug("Get external_ref: %s", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_id = id or self.client.get_lun_id_by_name(name) if not lun_id: msg = _("Can't find LUN on the array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_info = self.client.get_lun_info(lun_id) return lun_info def unmanage(self, volume): """Export Huawei volume from Cinder.""" LOG.debug("Unmanage volume: %s.", volume.id) lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) if not lun_id: return lun_name = huawei_utils.encode_name(volume.id) new_name = 'unmged_' + lun_name LOG.debug("Rename LUN %(lun_name)s to %(new_name)s.", {'lun_name': lun_name, 'new_name': new_name}) try: self.client.rename_lun(lun_id, new_name) except Exception: LOG.warning(_LW("Rename lun %(lun_id)s fails when " "unmanaging volume %(volume)s."), {"lun_id": lun_id, "volume": volume.id}) def manage_existing_get_size(self, volume, external_ref): """Get the size of the existing volume.""" lun_info = self._get_lun_info_by_ref(external_ref) size = int(math.ceil(lun_info.get('CAPACITY') / constants.CAPACITY_UNIT)) return size def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): snapshot_id = snapshot_info.get('ID') # Check whether the snapshot is normal. if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import snapshot %s to Cinder. " "Snapshot status is not normal" " or running status is not online.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': msg = _("Can't import snapshot %s to Cinder. " "Snapshot is exposed to initiator.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def _get_snapshot_info_by_ref(self, external_ref): LOG.debug("Get snapshot external_ref: %s.", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify snapshot source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_id = id or self.client.get_snapshot_id_by_name(name) if not snapshot_id: msg = _("Can't find snapshot on array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_info = self.client.get_snapshot_info(snapshot_id) return snapshot_info def manage_existing_snapshot(self, snapshot, existing_ref): snapshot_info = self._get_snapshot_info_by_ref(existing_ref) snapshot_id = snapshot_info.get('ID') volume = snapshot.volume lun_id = volume.provider_location if lun_id != snapshot_info.get('PARENTID'): msg = (_("Can't import snapshot %s to Cinder. " "Snapshot doesn't belong to volume."), snapshot_id) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Check whether this snapshot can be imported. self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) # Rename the snapshot to make it manageable for Cinder. description = snapshot.id snapshot_name = huawei_utils.encode_name(snapshot.id) self.client.rename_snapshot(snapshot_id, snapshot_name, description) if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: self.client.activate_snapshot(snapshot_id) LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", {'old_name': snapshot_info.get('NAME'), 'new_name': snapshot_name}) return {'provider_location': snapshot_id} def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Get the size of the existing snapshot.""" snapshot_info = self._get_snapshot_info_by_ref(existing_ref) size = (float(snapshot_info.get('USERCAPACITY')) // constants.CAPACITY_UNIT) remainder = (float(snapshot_info.get('USERCAPACITY')) % constants.CAPACITY_UNIT) if int(remainder) > 0: msg = _("Snapshot size must be multiple of 1 GB.") raise exception.VolumeBackendAPIException(data=msg) return int(size) def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" LOG.debug("Unmanage snapshot: %s.", snapshot.id) snapshot_name = huawei_utils.encode_name(snapshot.id) snapshot_id = self.client.get_snapshot_id_by_name(snapshot_name) if not snapshot_id: LOG.warning(_LW("Can't find snapshot on the array: %s."), snapshot_name) return new_name = 'unmged_' + snapshot_name LOG.debug("Rename snapshot %(snapshot_name)s to %(new_name)s.", {'snapshot_name': snapshot_name, 'new_name': new_name}) try: self.client.rename_snapshot(snapshot_id, new_name) except Exception: LOG.warning(_LW("Failed to rename snapshot %(snapshot_id)s, " "snapshot name on array is %(snapshot_name)s."), {'snapshot_id': snapshot.id, 'snapshot_name': snapshot_name}) def remove_host_with_check(self, host_id): wwns_in_host = ( self.client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or self.client.is_host_associated_to_hostgroup(host_id)): self.client.remove_host(host_id) def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" model_update = {'status': 'available'} opts = self._get_consistencygroup_type(group) if (opts.get('hypermetro') == 'true'): metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) metro.create_consistencygroup(group) return model_update # Array will create CG at create_cgsnapshot time. Cinder will # maintain the CG and volumes relationship in the db. return model_update def delete_consistencygroup(self, context, group, volumes): opts = self._get_consistencygroup_type(group) if opts.get('hypermetro') == 'true': metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) return metro.delete_consistencygroup(context, group, volumes) model_update = {} volumes_model_update = [] model_update.update({'status': group.status}) for volume_ref in volumes: try: self.delete_volume(volume_ref) volumes_model_update.append( {'id': volume_ref.id, 'status': 'deleted'}) except Exception: volumes_model_update.append( {'id': volume_ref.id, 'status': 'error_deleting'}) return model_update, volumes_model_update def update_consistencygroup(self, context, group, add_volumes, remove_volumes): model_update = {'status': 'available'} opts = self._get_consistencygroup_type(group) if opts.get('hypermetro') == 'true': metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) metro.update_consistencygroup(context, group, add_volumes, remove_volumes) return model_update, None, None # Array will create CG at create_cgsnapshot time. Cinder will # maintain the CG and volumes relationship in the db. return model_update, None, None def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Create cgsnapshot.""" LOG.info(_LI('Create cgsnapshot for consistency group' ': %(group_id)s'), {'group_id': cgsnapshot.consistencygroup_id}) model_update = {} snapshots_model_update = [] added_snapshots_info = [] try: for snapshot in snapshots: volume = snapshot.volume if not volume: msg = (_("Can't get volume id from snapshot, " "snapshot: %(id)s") % {"id": snapshot.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volume_name = huawei_utils.encode_name(volume.id) lun_id = self.client.get_lun_id(volume, volume_name) snapshot_name = huawei_utils.encode_name(snapshot.id) snapshot_description = snapshot.id info = self.client.create_snapshot(lun_id, snapshot_name, snapshot_description) snapshot_model_update = {'id': snapshot.id, 'status': 'available', 'provider_location': info['ID']} snapshots_model_update.append(snapshot_model_update) added_snapshots_info.append(info) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Create cgsnapshots failed. " "Cgsnapshot id: %s."), cgsnapshot.id) snapshot_ids = [added_snapshot['ID'] for added_snapshot in added_snapshots_info] try: self.client.activate_snapshot(snapshot_ids) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Active cgsnapshots failed. " "Cgsnapshot id: %s."), cgsnapshot.id) model_update['status'] = 'available' return model_update, snapshots_model_update def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Delete consistency group snapshot.""" LOG.info(_LI('Delete cgsnapshot %(snap_id)s for consistency group: ' '%(group_id)s'), {'snap_id': cgsnapshot.id, 'group_id': cgsnapshot.consistencygroup_id}) model_update = {} snapshots_model_update = [] model_update['status'] = cgsnapshot.status for snapshot in snapshots: try: self.delete_snapshot(snapshot) snapshots_model_update.append({'id': snapshot.id, 'status': 'deleted'}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Delete cg snapshots failed. " "Cgsnapshot id: %s"), cgsnapshot.id) return model_update, snapshots_model_update def _classify_volume(self, volumes): normal_volumes = [] replica_volumes = [] for v in volumes: volume_type = self._get_volume_type(v) opts = self._get_volume_params(volume_type) if opts.get('replication_enabled') == 'true': replica_volumes.append(v) else: normal_volumes.append(v) return normal_volumes, replica_volumes def _failback_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id metadata = huawei_utils.get_volume_metadata(v) old_status = 'available' if 'old_status' in metadata: old_status = metadata['old_status'] del metadata['old_status'] v_update['updates'] = {'status': old_status, 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failback(self, volumes): if self.active_backend_id in ('', None): return 'default', [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failback(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failback_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = "" secondary_id = 'default' # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def _failover_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id metadata = huawei_utils.get_volume_metadata(v) metadata.update({'old_status': v.status}) v_update['updates'] = {'status': 'error', 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failover(self, volumes): if self.active_backend_id not in ('', None): return self.replica_dev_conf['backend_id'], [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failover(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failover_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = self.replica_dev_conf['backend_id'] secondary_id = self.active_backend_id # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def failover_host(self, context, volumes, secondary_id=None): """Failover all volumes to secondary.""" if secondary_id == 'default': secondary_id, volumes_update = self._failback(volumes) elif (secondary_id == self.replica_dev_conf['backend_id'] or secondary_id is None): secondary_id, volumes_update = self._failover(volumes) else: msg = _("Invalid secondary id %s.") % secondary_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return secondary_id, volumes_update def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Map a snapshot to a host and return target iSCSI information.""" # From the volume structure. volume = Volume(id=snapshot.id, provider_location=snapshot.provider_location, lun_type=constants.SNAPSHOT_TYPE, metadata=None) return self.initialize_connection(volume, connector) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Delete map between a snapshot and a host.""" # From the volume structure. volume = Volume(id=snapshot.id, provider_location=snapshot.provider_location, lun_type=constants.SNAPSHOT_TYPE, metadata=None) return self.terminate_connection(volume, connector) def get_lun_id_and_type(self, volume): if hasattr(volume, 'lun_type'): lun_id = volume.provider_location lun_type = constants.SNAPSHOT_TYPE else: lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) lun_type = constants.LUN_TYPE return lun_id, lun_type @interface.volumedriver class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver): """ISCSI driver for Huawei storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor storage 18000 driver 1.1.1 - Code refactor CHAP support Multiple pools support ISCSI multipath support SmartX support Volume migration support Volume retype support 2.0.0 - Rename to HuaweiISCSIDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiISCSIDriver 2.0.3 - Manage/unmanage snapshot support 2.0.5 - Replication V2 support 2.0.6 - Support iSCSI configuration in Replication 2.0.7 - Hypermetro support Hypermetro consistency group support Consistency group support Cgsnapshot support 2.0.8 - Backup snapshot optimal path support 2.0.9 - Support reporting disk type of pool """ VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) def get_volume_stats(self, refresh=False): """Get volume status.""" data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['storage_protocol'] = 'iSCSI' data['driver_version'] = self.VERSION data['vendor_name'] = 'Huawei' return data @utils.synchronized('huawei', external=True) def initialize_connection(self, volume, connector): """Map a volume to a host and return target iSCSI information.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] LOG.info(_LI( 'initiator name: %(initiator_name)s, ' 'LUN ID: %(lun_id)s.'), {'initiator_name': initiator_name, 'lun_id': lun_id}) (iscsi_iqns, target_ips, portgroup_id) = self.client.get_iscsi_params(connector) LOG.info(_LI('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' 'target_ip: %(target_ip)s, ' 'portgroup_id: %(portgroup_id)s.'), {'iscsi_iqn': iscsi_iqns, 'target_ip': target_ips, 'portgroup_id': portgroup_id},) # Create hostgroup if not exist. original_host_name = connector['host'] host_name = huawei_utils.encode_host_name(original_host_name) host_id = self.client.add_host_with_check(host_name, original_host_name) # Add initiator to the host. self.client.ensure_initiator_added(initiator_name, host_id) hostgroup_id = self.client.add_host_to_hostgroup(host_id) # Mapping lungroup and hostgroup to view. self.client.do_mapping(lun_id, hostgroup_id, host_id, portgroup_id, lun_type) hostlun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) LOG.info(_LI("initialize_connection, host lun id is: %s."), hostlun_id) chapinfo = self.client.find_chap_info(self.client.iscsi_info, initiator_name) # Return iSCSI properties. properties = {} properties['target_discovered'] = False properties['volume_id'] = volume.id multipath = connector.get('multipath', False) hostlun_id = int(hostlun_id) if not multipath: properties['target_portal'] = ('%s:3260' % target_ips[0]) properties['target_iqn'] = iscsi_iqns[0] properties['target_lun'] = hostlun_id else: properties['target_iqns'] = [iqn for iqn in iscsi_iqns] properties['target_portals'] = [ '%s:3260' % ip for ip in target_ips] properties['target_luns'] = [hostlun_id] * len(target_ips) # If use CHAP, return CHAP info. if chapinfo: chap_username, chap_password = chapinfo.split(';') properties['auth_method'] = 'CHAP' properties['auth_username'] = chap_username properties['auth_password'] = chap_password LOG.info(_LI("initialize_connection success. Return data: %s."), properties) return {'driver_volume_type': 'iscsi', 'data': properties} @utils.synchronized('huawei', external=True) def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] host_name = connector['host'] lungroup_id = None LOG.info(_LI( 'terminate_connection: initiator name: %(ini)s, ' 'LUN ID: %(lunid)s.'), {'ini': initiator_name, 'lunid': lun_id},) portgroup = None portgroup_id = None view_id = None left_lunnum = -1 for ini in self.client.iscsi_info: if ini['Name'] == initiator_name: for key in ini: if key == 'TargetPortGroup': portgroup = ini['TargetPortGroup'] break if portgroup: portgroup_id = self.client.get_tgt_port_group(portgroup) host_name = huawei_utils.encode_host_name(host_name) host_id = self.client.get_host_id_by_name(host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) # Remove lun from lungroup. if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid( lun_id, lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s."), {"lun_id": lun_id, "lungroup_id": lungroup_id}) # Remove portgroup from mapping view if no lun left in lungroup. if lungroup_id: left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if portgroup_id and view_id and (int(left_lunnum) <= 0): if self.client.is_portgroup_associated_to_view(view_id, portgroup_id): self.client.delete_portgroup_mapping_view(view_id, portgroup_id) if view_id and (int(left_lunnum) <= 0): self.client.remove_chap(initiator_name) if self.client.lungroup_associated(view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if self.client.is_initiator_associated_to_host(initiator_name): self.client.remove_iscsi_from_host(initiator_name) hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if self.client.hostgroup_associated(view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view(view_id, hostgroup_id) self.client.remove_host_from_hostgroup(hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) self.client.remove_host(host_id) self.client.delete_mapping_view(view_id) @interface.volumedriver class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver): """FC driver for Huawei OceanStor storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor 18000 storage volume driver 1.1.1 - Code refactor Multiple pools support SmartX support Volume migration support Volume retype support FC zone enhancement Volume hypermetro support 2.0.0 - Rename to HuaweiFCDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiFCDriver 2.0.3 - Manage/unmanage snapshot support 2.0.4 - Balanced FC port selection 2.0.5 - Replication V2 support 2.0.7 - Hypermetro support Hypermetro consistency group support Consistency group support Cgsnapshot support 2.0.8 - Backup snapshot optimal path support 2.0.9 - Support reporting disk type of pool """ VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiFCDriver, self).__init__(*args, **kwargs) self.fcsan = None def get_volume_stats(self, refresh=False): """Get volume status.""" data = HuaweiBaseDriver.get_volume_stats(self, refresh=False) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['storage_protocol'] = 'FC' data['driver_version'] = self.VERSION data['vendor_name'] = 'Huawei' return data @utils.synchronized('huawei', external=True) @fczm_utils.AddFCZone def initialize_connection(self, volume, connector): lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] LOG.info(_LI( 'initialize_connection, initiator: %(wwpns)s,' ' LUN ID: %(lun_id)s.'), {'wwpns': wwns, 'lun_id': lun_id},) portg_id = None original_host_name = connector['host'] host_name = huawei_utils.encode_host_name(original_host_name) host_id = self.client.add_host_with_check(host_name, original_host_name) if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: # Use FC switch. zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) try: (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.build_ini_targ_map(wwns, host_id, lun_id, lun_type)) except Exception as err: self.remove_host_with_check(host_id) msg = _('build_ini_targ_map fails. %s') % err raise exception.VolumeBackendAPIException(data=msg) for ini in init_targ_map: self.client.ensure_fc_initiator_added(ini, host_id) else: # Not use FC switch. online_wwns_in_host = ( self.client.get_host_online_fc_initiators(host_id)) online_free_wwns = self.client.get_online_free_wwns() for wwn in wwns: if (wwn not in online_wwns_in_host and wwn not in online_free_wwns): wwns_in_host = ( self.client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or self.client.is_host_associated_to_hostgroup(host_id)): self.client.remove_host(host_id) msg = _('No FC initiator can be added to host.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for wwn in wwns: if wwn in online_free_wwns: self.client.add_fc_port_to_host(host_id, wwn) (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Add host into hostgroup. hostgroup_id = self.client.add_host_to_hostgroup(host_id) map_info = self.client.do_mapping(lun_id, hostgroup_id, host_id, portg_id, lun_type) host_lun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) # Return FC properties. fc_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': int(host_lun_id), 'target_discovered': True, 'target_wwn': tgt_port_wwns, 'volume_id': volume.id, 'initiator_target_map': init_targ_map, 'map_info': map_info}, } # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) LOG.info(_LI("initialize_connection, metadata is: %s."), metadata) if 'hypermetro_id' in metadata: loc_tgt_wwn = fc_info['data']['target_wwn'] local_ini_tgt_map = fc_info['data']['initiator_target_map'] hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) rmt_fc_info = hyperm.connect_volume_fc(volume, connector) rmt_tgt_wwn = rmt_fc_info['data']['target_wwn'] rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map'] fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn) wwns = connector['wwpns'] for wwn in wwns: if (wwn in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn].extend( rmt_ini_tgt_map[wwn]) elif (wwn not in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn] = ( rmt_ini_tgt_map[wwn]) # else, do nothing loc_map_info = fc_info['data']['map_info'] rmt_map_info = rmt_fc_info['data']['map_info'] same_host_id = self._get_same_hostid(loc_map_info, rmt_map_info) self.client.change_hostlun_id(loc_map_info, same_host_id) hyperm.rmt_client.change_hostlun_id(rmt_map_info, same_host_id) fc_info['data']['target_lun'] = same_host_id hyperm.rmt_client.logout() LOG.info(_LI("Return FC info is: %s."), fc_info) return fc_info def _get_same_hostid(self, loc_fc_info, rmt_fc_info): loc_aval_luns = loc_fc_info['aval_luns'] loc_aval_luns = json.loads(loc_aval_luns) rmt_aval_luns = rmt_fc_info['aval_luns'] rmt_aval_luns = json.loads(rmt_aval_luns) same_host_id = None for i in range(1, 512): if i in rmt_aval_luns and i in loc_aval_luns: same_host_id = i break LOG.info(_LI("The same hostid is: %s."), same_host_id) if not same_host_id: msg = _("Can't find the same host id from arrays.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return same_host_id @utils.synchronized('huawei', external=True) @fczm_utils.RemoveFCZone def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] host_name = connector['host'] left_lunnum = -1 lungroup_id = None view_id = None LOG.info(_LI('terminate_connection: wwpns: %(wwns)s, ' 'LUN ID: %(lun_id)s.'), {'wwns': wwns, 'lun_id': lun_id}) host_name = huawei_utils.encode_host_name(host_name) host_id = self.client.get_host_id_by_name(host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id, lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) else: LOG.warning(_LW("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s."), {"lun_id": lun_id, "lungroup_id": lungroup_id}) else: LOG.warning(_LW("Can't find lun on the array.")) if lungroup_id: left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if int(left_lunnum) > 0: fc_info = {'driver_volume_type': 'fibre_channel', 'data': {}} else: fc_info, portg_id = self._delete_zone_and_remove_fc_initiators( wwns, host_id) if lungroup_id: if view_id and self.client.lungroup_associated( view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if portg_id: if view_id and self.client.is_portgroup_associated_to_view( view_id, portg_id): self.client.delete_portgroup_mapping_view(view_id, portg_id) self.client.delete_portgroup(portg_id) if host_id: hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if view_id and self.client.hostgroup_associated( view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view( view_id, hostgroup_id) self.client.remove_host_from_hostgroup( hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) if not self.client.check_fc_initiators_exist_in_host( host_id): self.client.remove_host(host_id) if view_id: self.client.delete_mapping_view(view_id) # Deal with hypermetro connection. metadata = huawei_utils.get_volume_metadata(volume) LOG.info(_LI("Detach Volume, metadata is: %s."), metadata) if 'hypermetro_id' in metadata: hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) hyperm.disconnect_volume_fc(volume, connector) LOG.info(_LI("terminate_connection, return data is: %s."), fc_info) return fc_info def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): # Get tgt_port_wwns and init_targ_map to remove zone. portg_id = None if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.get_init_targ_map(wwns, host_id)) else: (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Remove the initiators from host if need. if host_id: fc_initiators = self.client.get_host_fc_initiators(host_id) for wwn in wwns: if wwn in fc_initiators: self.client.remove_fc_from_host(wwn) info = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id
bswartz/cinder
cinder/volume/drivers/huawei/huawei_driver.py
Python
apache-2.0
97,319
# Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from StringIO import StringIO import mock from paasta_tools.cli.cmds.list import paasta_list @mock.patch('sys.stdout', new_callable=StringIO) @mock.patch('paasta_tools.cli.cmds.list.list_services', autospec=True) def test_list_paasta_list(mock_list_services, mock_stdout): """ paasta_list print each service returned by get_services """ mock_services = ['service_1', 'service_2'] mock_list_services.return_value = mock_services args = mock.MagicMock() args.print_instances = False paasta_list(args) output = mock_stdout.getvalue() assert output == 'service_1\nservice_2\n' @mock.patch('sys.stdout', new_callable=StringIO) @mock.patch('paasta_tools.cli.cmds.list.list_service_instances', autospec=True) def test_list_paasta_list_instances(mock_list_service_instances, mock_stdout): """ paasta_list print each service.instance """ mock_services = ['service_1.main', 'service_2.canary'] mock_list_service_instances.return_value = mock_services args = mock.MagicMock() args.print_instances = True paasta_list(args) output = mock_stdout.getvalue() assert output == 'service_1.main\nservice_2.canary\n'
gstarnberger/paasta
tests/cli/test_cmds_list.py
Python
apache-2.0
1,751
# -*- coding: utf-8 -*- # vim:fenc=utf-8 ''' Insights Forex live source -------------------------- :copyright (c) 2014 Xavier Bruhiere :license: Apache 2.0, see LICENSE for more details. ''' import time import pandas as pd import dna.logging import intuition.data.forex as forex log = dna.logging.logger(__name__) class Forex(object): ''' At each event datetime of the provided index, ForexLiveSource fetchs live forex data from TrueFX. ''' def __init__(self, pairs, properties): self._wait_retry = properties.get('retry', 10) self.forex = forex.TrueFX(pairs=pairs) self.forex.connect() def get_data(self, sids): while True: rates = self.forex.query_rates() if len(rates.keys()) >= len(sids): log.debug('Data available for {}'.format(rates.keys())) break log.debug('Incomplete data ({}/{}), retrying in {}s'.format( len(rates.keys()), len(sids), self._wait_retry)) time.sleep(self._wait_retry) debug_feedback = self.forex.connect() log.info('New Truefx connection: {}'.format(debug_feedback)) return rates @property def mapping(self): return { 'dt': (lambda x: x, 'dt'), #TODO Here conversion (weird result for now) # Or: (lambda x: pd.tslib.i8_to_pydt(x + '000000'), 'trade_time'), 'trade_time': (lambda x: pd.datetime.utcfromtimestamp( float(x[:-3])), 'timeStamp'), 'sid': (lambda x: x, 'sid'), 'price': (float, 'bid'), 'ask': (float, 'ask'), 'high': (float, 'high'), 'low': (float, 'low'), 'volume': (lambda x: 10000, 'bid') }
intuition-io/insights
insights/sources/live/currencies.py
Python
apache-2.0
1,793
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.file_utils import is_datasets_available, is_faiss_available, is_torch_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class RagTokenizerTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) @require_tokenizers def test_save_load_pretrained_with_saved_config(self): save_dir = os.path.join(self.tmpdirname, "rag_tokenizer") rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict()) rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer()) rag_config.save_pretrained(save_dir) rag_tokenizer.save_pretrained(save_dir) new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config) self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab()) self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast) self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab()) @slow def test_pretrained_token_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict) @slow def test_pretrained_sequence_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict)
huggingface/transformers
tests/rag/test_tokenization_rag.py
Python
apache-2.0
7,364
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import numpy as np from transformers.file_utils import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset from transformers.modeling_outputs import SequenceClassifierOutput from transformers.tokenization_utils_base import BatchEncoding from transformers.trainer_pt_utils import ( DistributedLengthGroupedSampler, DistributedSamplerWithLoop, DistributedTensorGatherer, IterableDatasetShard, LabelSmoother, LengthGroupedSampler, SequentialDistributedSampler, ShardSampler, get_parameter_names, ) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) class RandomIterableDataset(IterableDataset): # For testing, an iterable dataset of random length def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length self.generator = torch.Generator() def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 number = torch.rand(1, generator=self.generator).item() stop = number < self.p_stop @require_torch class TrainerUtilsTest(unittest.TestCase): def test_distributed_tensor_gatherer(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays(predictions[indices]) result = gatherer.finalize() self.assertTrue(np.array_equal(result, predictions)) # With nested tensors gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices in input_indices: gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]]) result = gatherer.finalize() self.assertTrue(isinstance(result, list)) self.assertTrue(len(result), 2) self.assertTrue(isinstance(result[1], list)) self.assertTrue(len(result[1]), 2) self.assertTrue(np.array_equal(result[0], predictions)) self.assertTrue(np.array_equal(result[1][0], predictions)) self.assertTrue(np.array_equal(result[1][1], predictions)) def test_distributed_tensor_gatherer_different_shapes(self): # Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1 world_size = 4 num_samples = 21 input_indices = [ [0, 1, 6, 7, 12, 13, 18, 19], [2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1], [5, 11, 17, 2], ] sequence_lengths = [8, 10, 13] predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays(predictions[indices, :seq_length]) result = gatherer.finalize() # Remove the extra samples added at the end for a round multiple of num processes. actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]] for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length])) # With nested tensors predictions = np.random.normal(size=(num_samples, 13)) gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]]) result = gatherer.finalize() for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length])) self.assertTrue(np.array_equal(result[1], predictions)) # Check if works if varying seq_length is second gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples) for indices, seq_length in zip(input_indices, sequence_lengths): gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]]) result = gatherer.finalize() self.assertTrue(np.array_equal(result[0], predictions)) for indices, seq_length in zip(actual_indices, sequence_lengths): self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length])) def test_label_smoothing(self): epsilon = 0.1 num_labels = 12 random_logits = torch.randn(4, 5, num_labels) random_labels = torch.randint(0, num_labels, (4, 5)) loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean() self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) # With a few -100 labels random_labels[0, 1] = -100 random_labels[2, 1] = -100 random_labels[2, 3] = -100 loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1)) model_output = SequenceClassifierOutput(logits=random_logits) label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels) log_probs = -nn.functional.log_softmax(random_logits, dim=-1) # Mask the log probs with the -100 labels log_probs[0, 1] = 0.0 log_probs[2, 1] = 0.0 log_probs[2, 3] = 0.0 expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17) self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss)) def test_group_by_length(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices = list(LengthGroupedSampler(4, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(list(sorted(indices)), list(range(100))) def test_group_by_length_with_dict(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append({"input_ids": input_ids}) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(list(sorted(indices)), list(range(6))) def test_group_by_length_with_batch_encoding(self): # Get some inputs of random lengths data = [] for _ in range(6): input_ids = torch.randint(0, 25, (100,)).tolist() data.append(BatchEncoding({"input_ids": input_ids})) # Put one bigger than the others to check it ends up in first position data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist() indices = list(LengthGroupedSampler(4, dataset=data)) # The biggest element should be first self.assertEqual(len(data[indices[0]]["input_ids"]), 105) # The indices should be a permutation of range(6) self.assertEqual(list(sorted(indices)), list(range(6))) def test_distributed_length_grouped(self): # Get some inputs of random lengths lengths = torch.randint(0, 25, (100,)).tolist() # Put one bigger than the others to check it ends up in first position lengths[32] = 50 indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths)) indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths)) # The biggest element should be first self.assertEqual(lengths[indices_process_0[0]], 50) # The indices should be a permutation of range(100) self.assertEqual(list(sorted(indices_process_0 + indices_process_1)), list(range(100))) def test_get_parameter_names(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) # fmt: off self.assertEqual( get_parameter_names(model, [nn.LayerNorm]), ['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias'] ) # fmt: on def test_distributed_sampler_with_loop(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0) shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1) # Set seeds shard1.set_epoch(0) shard2.set_epoch(0) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = [] for sample1, sample2 in zip(samples1, samples2): total += [sample1, sample2] self.assertEqual(set(total[:length]), set(dataset)) self.assertEqual(set(total[length:]), set(total[: (len(total) - length)])) def test_sequential_distributed_sampler(self): batch_size = 16 for length in [23, 64, 123]: dataset = list(range(length)) shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1) # Sample samples1 = list(shard1) samples2 = list(shard2) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) # With a batch_size passed shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size) shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size) # Sample samples1 = list(shard1) samples2 = list(shard2) self.assertTrue(len(samples1) % batch_size == 0) self.assertTrue(len(samples2) % batch_size == 0) total = samples1 + samples2 self.assertListEqual(total[:length], dataset) self.assertListEqual(total[length:], dataset[: (len(total) - length)]) def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0): # Set the seed for the base dataset to get the proper reference. dataset.generator.manual_seed(epoch) reference = list(dataset) shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard in shards: shard.set_epoch(epoch) shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) for shard in shards: # All shards know the total number of samples self.assertEqual(shard.num_examples, len(reference)) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) # Check equivalence between IterableDataset and ShardSampler dataset.generator.manual_seed(epoch) reference = list(dataset) sampler_shards = [ ShardSampler( reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] for shard, sampler_shard in zip(shard_lists, sampler_shards): self.assertListEqual(shard, list(sampler_shard)) def test_iterable_dataset_shard(self): dataset = RandomIterableDataset() self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0) self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42) self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42) def test_iterable_dataset_shard_with_length(self): sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i) for i in range(2) ] # Build expected shards: each process will have batches of size 4 until there is not enough elements to # form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4) expected_shards = [[], []] current_shard = 0 for i in range(0, 96, 4): expected_shards[current_shard].extend(list(range(i, i + 4))) current_shard = 1 - current_shard self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) sampler_shards = [ IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i) for i in range(2) ] # When drop_last=False, we get two last full batches by looping back to the beginning. expected_shards[0].extend(list(range(96, 100))) expected_shards[1].extend(list(range(0, 4))) self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards) self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards]) def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2): shards = [ ShardSampler( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i ) for i in range(num_processes) ] shard_lists = [list(shard) for shard in shards] for shard in shard_lists: # All shards have a number of samples that is a round multiple of batch size self.assertTrue(len(shard) % batch_size == 0) # All shards have the same number of samples self.assertEqual(len(shard), len(shard_lists[0])) observed = [] for idx in range(0, len(shard_lists[0]), batch_size): for shard in shard_lists: observed += shard[idx : idx + batch_size] # If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of # batch_size reference = copy.copy(dataset) if not drop_last: while len(reference) < len(observed): reference += reference self.assertListEqual(observed, reference[: len(observed)]) def test_shard_sampler(self): for n_elements in [64, 123]: dataset = list(range(n_elements)) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2) self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3) self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3)
huggingface/transformers
tests/trainer/test_trainer_utils.py
Python
apache-2.0
18,824
from models.RepPoints.builder import RepPoints as Detector from models.dcn.builder import DCNResNetFPN as Backbone from models.RepPoints.builder import RepPointsNeck as Neck from models.RepPoints.builder import RepPointsHead as Head from mxnext.complicate import normalizer_factory def get_config(is_train): class General: log_frequency = 10 name = __name__.rsplit("/")[-1].rsplit(".")[-1] batch_image = 2 if is_train else 1 fp16 = False class KvstoreParam: kvstore = "nccl" batch_image = General.batch_image gpus = [0, 1, 2, 3, 4, 5, 6, 7] fp16 = General.fp16 class NormalizeParam: # normalizer = normalizer_factory(type="syncbn", ndev=8, wd_mult=1.0) normalizer = normalizer_factory(type="gn") class BackboneParam: fp16 = General.fp16 # normalizer = NormalizeParam.normalizer normalizer = normalizer_factory(type="fixbn") depth = 101 num_c3_block = 0 num_c4_block = 3 class NeckParam: fp16 = General.fp16 normalizer = NormalizeParam.normalizer class HeadParam: num_class = 1 + 80 fp16 = General.fp16 normalizer = NormalizeParam.normalizer batch_image = General.batch_image class point_generate: num_points = 9 scale = 4 stride = (8, 16, 32, 64, 128) transform = "moment" class head: conv_channel = 256 point_conv_channel = 256 mean = None std = None class proposal: pre_nms_top_n = 1000 post_nms_top_n = None nms_thr = None min_bbox_side = None class point_target: target_scale = 4 num_pos = 1 class bbox_target: pos_iou_thr = 0.5 neg_iou_thr = 0.5 min_pos_iou = 0.0 class focal_loss: alpha = 0.25 gamma = 2.0 class BboxParam: fp16 = General.fp16 normalizer = NormalizeParam.normalizer num_class = None image_roi = None batch_image = None class regress_target: class_agnostic = None mean = None std = None class RoiParam: fp16 = General.fp16 normalizer = NormalizeParam.normalizer out_size = None stride = None class DatasetParam: if is_train: image_set = ("coco_train2017", ) else: image_set = ("coco_val2017", ) backbone = Backbone(BackboneParam) neck = Neck(NeckParam) head = Head(HeadParam) detector = Detector() if is_train: train_sym = detector.get_train_symbol(backbone, neck, head) test_sym = None else: train_sym = None test_sym = detector.get_test_symbol(backbone, neck, head) class ModelParam: train_symbol = train_sym test_symbol = test_sym from_scratch = False random = True memonger = False memonger_until = "stage3_unit21_plus" class pretrain: prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth epoch = 0 fixed_param = ["conv0", "stage1", "gamma", "beta"] excluded_param = ["gn"] class OptimizeParam: class optimizer: type = "sgd" lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image momentum = 0.9 wd = 0.0001 clip_gradient = 35 class schedule: begin_epoch = 0 end_epoch = 12 lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image), 160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)] class warmup: type = "gradual" lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3 iter = 2000 class TestScaleParam: short_ranges = [600, 800, 1000, 1200] long_ranges = [2000, 2000, 2000, 2000] @staticmethod def add_resize_info(roidb): ms_roidb = [] for r_ in roidb: for short, long in zip(TestScaleParam.short_ranges, TestScaleParam.long_ranges): r = r_.copy() r["resize_long"] = long r["resize_short"] = short ms_roidb.append(r) return ms_roidb class TestParam: min_det_score = 0.05 # filter appended boxes max_det_per_image = 100 process_roidb = TestScaleParam.add_resize_info def process_output(x, y): return x class model: prefix = "experiments/{}/checkpoint".format(General.name) epoch = OptimizeParam.schedule.end_epoch class nms: type = "nms" thr = 0.5 class coco: annotation = "data/coco/annotations/instances_minival2014.json" # data processing class NormParam: mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order std = tuple(i * 255 for i in (0.229, 0.224, 0.225)) class RandResizeParam: short = None # generate on the fly long = None short_ranges = [600, 800, 1000, 1200] long_ranges = [2000, 2000, 2000, 2000] class RandCropParam: mode = "center" # random or center short = 800 long = 1333 class ResizeParam: short = 800 long = 1333 class PadParam: short = 800 long = 1333 max_num_gt = 100 class RandPadParam: short = 1200 long = 2000 max_num_gt = 100 class RenameParam: mapping = dict(image="data") from core.detection_input import ReadRoiRecord, \ RandResize2DImageBbox, RandCrop2DImageBbox, Resize2DImageByRoidb, \ ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \ RenameRecord from models.retinanet.input import Norm2DImage if is_train: transform = [ ReadRoiRecord(None), Norm2DImage(NormParam), # Resize2DImageBbox(ResizeParam), RandResize2DImageBbox(RandResizeParam), RandCrop2DImageBbox(RandCropParam), Flip2DImageBbox(), Pad2DImageBbox(PadParam), ConvertImageFromHwcToChw(), RenameRecord(RenameParam.mapping) ] data_name = ["data"] label_name = ["gt_bbox"] else: transform = [ ReadRoiRecord(None), Norm2DImage(NormParam), # Resize2DImageBbox(ResizeParam), Resize2DImageByRoidb(), Pad2DImageBbox(RandPadParam), ConvertImageFromHwcToChw(), RenameRecord(RenameParam.mapping) ] data_name = ["data", "im_info", "im_id", "rec_id"] label_name = [] from models.retinanet import metric as cls_metric import core.detection_metric as box_metric cls_acc_metric = cls_metric.FGAccMetric( "FGAcc", ["cls_loss_output", "point_refine_labels_output"], [] ) box_init_l1_metric = box_metric.L1( "InitL1", ["pts_init_loss_output", "points_init_labels_output"], [] ) box_refine_l1_metric = box_metric.L1( "RefineL1", ["pts_refine_loss_output", "point_refine_labels_output"], [] ) metric_list = [cls_acc_metric, box_init_l1_metric, box_refine_l1_metric] return General, KvstoreParam, HeadParam, RoiParam, BboxParam, DatasetParam, \ ModelParam, OptimizeParam, TestParam, \ transform, data_name, label_name, metric_list
TuSimple/simpledet
config/RepPoints/reppoints_moment_dcn_r101v1b_fpn_multiscale_2x.py
Python
apache-2.0
7,766
from django.shortcuts import render from django.shortcuts import render_to_response from django.template import RequestContext from django.shortcuts import redirect from main.models import Link from main.models import Tag # Create your views here. def index(request): context = RequestContext(request) links = Link.objects.all() return render_to_response('main/index.html', {'links': links}, context) def tags(request): context = RequestContext(request) tags = Tag.objects.all() return render_to_response('main/tags.html', {'tags': tags}, context) def tag(request, tag_name): context = RequestContext(request) the_tag = Tag.objects.get(name=tag_name) links=the_tag.link_set.all() return render_to_response('main/index.html',{'links':links, 'tag_name': '#' + tag_name}, context) def add_link(request): context = RequestContext(request) if request.method == 'POST': url = request.POST.get("url","") tags = request.POST.get("tags","") title = request.POST.get("title","") tags = tags.split(',') l = Link.objects.get_or_create(title=title, url=url)[0] for x in tags: l.tags.add(Tag.objects.get_or_create(name=x)[0]) return redirect(index)
cntnboys/410Lab6
bookmarks/main/views.py
Python
apache-2.0
1,325
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time import uuid import concurrent.futures from oslo_config import cfg import six.moves from testtools import matchers import oslo_messaging from oslo_messaging.tests.functional import utils class CallTestCase(utils.SkipIfNoTransportURL): def setUp(self): super(CallTestCase, self).setUp(conf=cfg.ConfigOpts()) if self.url.startswith("kafka://"): self.skipTest("kafka does not support RPC API") self.conf.prog = "test_prog" self.conf.project = "test_project" self.config(heartbeat_timeout_threshold=0, group='oslo_messaging_rabbit') def test_specific_server(self): group = self.useFixture(utils.RpcServerGroupFixture( self.conf, self.url) ) client = group.client(1) client.append(text='open') self.assertEqual('openstack', client.append(text='stack')) client.add(increment=2) self.assertEqual(12, client.add(increment=10)) self.assertEqual(9, client.subtract(increment=3)) self.assertEqual('openstack', group.servers[1].endpoint.sval) self.assertEqual(9, group.servers[1].endpoint.ival) for i in [0, 2]: self.assertEqual('', group.servers[i].endpoint.sval) self.assertEqual(0, group.servers[i].endpoint.ival) def test_server_in_group(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client() data = [c for c in 'abcdefghijklmn'] for i in data: client.append(text=i) for s in group.servers: self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) actual = [[c for c in s.endpoint.sval] for s in group.servers] self.assertThat(actual, utils.IsValidDistributionOf(data)) def test_different_exchanges(self): # If the different exchanges are not honoured, then the # teardown may hang unless we broadcast all control messages # to each server group1 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url, use_fanout_ctrl=True)) group2 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url, exchange="a", use_fanout_ctrl=True)) group3 = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url, exchange="b", use_fanout_ctrl=True)) client1 = group1.client(1) data1 = [c for c in 'abcdefghijklmn'] for i in data1: client1.append(text=i) client2 = group2.client() data2 = [c for c in 'opqrstuvwxyz'] for i in data2: client2.append(text=i) actual1 = [[c for c in s.endpoint.sval] for s in group1.servers] self.assertThat(actual1, utils.IsValidDistributionOf(data1)) actual1 = [c for c in group1.servers[1].endpoint.sval] self.assertThat([actual1], utils.IsValidDistributionOf(data1)) for s in group1.servers: expected = len(data1) if group1.servers.index(s) == 1 else 0 self.assertEqual(expected, len(s.endpoint.sval)) self.assertEqual(0, s.endpoint.ival) actual2 = [[c for c in s.endpoint.sval] for s in group2.servers] for s in group2.servers: self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0)) self.assertEqual(0, s.endpoint.ival) self.assertThat(actual2, utils.IsValidDistributionOf(data2)) for s in group3.servers: self.assertEqual(0, len(s.endpoint.sval)) self.assertEqual(0, s.endpoint.ival) def test_timeout(self): transport = self.useFixture( utils.TransportFixture(self.conf, self.url) ) target = oslo_messaging.Target(topic="no_such_topic") c = utils.ClientStub(transport.transport, target, timeout=1) self.assertThat(c.ping, matchers.raises(oslo_messaging.MessagingTimeout)) def test_exception(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client(1) client.add(increment=2) self.assertRaises(ValueError, client.subtract, increment=3) def test_timeout_with_concurrently_queues(self): transport = self.useFixture( utils.TransportFixture(self.conf, self.url) ) target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()), server="server_" + str(uuid.uuid4())) server = self.useFixture( utils.RpcServerFixture(self.conf, self.url, target, executor="threading")) client = utils.ClientStub(transport.transport, target, cast=False, timeout=5) def short_periodical_tasks(): for i in range(10): client.add(increment=1) time.sleep(1) with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: future = executor.submit(client.long_running_task, seconds=10) executor.submit(short_periodical_tasks) self.assertRaises(oslo_messaging.MessagingTimeout, future.result) self.assertEqual(10, server.endpoint.ival) class CastTestCase(utils.SkipIfNoTransportURL): # Note: casts return immediately, so these tests utilise a special # internal sync() cast to ensure prior casts are complete before # making the necessary assertions. def setUp(self): super(CastTestCase, self).setUp() if self.url.startswith("kafka://"): self.skipTest("kafka does not support RPC API") def test_specific_server(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client(1, cast=True) client.append(text='open') client.append(text='stack') client.add(increment=2) client.add(increment=10) time.sleep(0.3) client.sync() group.sync(1) self.assertIn(group.servers[1].endpoint.sval, ["openstack", "stackopen"]) self.assertEqual(12, group.servers[1].endpoint.ival) for i in [0, 2]: self.assertEqual('', group.servers[i].endpoint.sval) self.assertEqual(0, group.servers[i].endpoint.ival) def test_server_in_group(self): if self.url.startswith("amqp:"): self.skipTest("QPID-6307") group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client(cast=True) for i in range(20): client.add(increment=1) for i in range(len(group.servers)): # expect each server to get a sync client.sync() group.sync(server="all") total = 0 for s in group.servers: ival = s.endpoint.ival self.assertThat(ival, matchers.GreaterThan(0)) self.assertThat(ival, matchers.LessThan(20)) total += ival self.assertEqual(20, total) def test_fanout(self): group = self.useFixture( utils.RpcServerGroupFixture(self.conf, self.url) ) client = group.client('all', cast=True) client.append(text='open') client.append(text='stack') client.add(increment=2) client.add(increment=10) time.sleep(0.3) client.sync() group.sync(server='all') for s in group.servers: self.assertIn(s.endpoint.sval, ["openstack", "stackopen"]) self.assertEqual(12, s.endpoint.ival) class NotifyTestCase(utils.SkipIfNoTransportURL): # NOTE(sileht): Each test must not use the same topics # to be run in parallel def test_simple(self): listener = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['test_simple'])) notifier = listener.notifier('abc') notifier.info({}, 'test', 'Hello World!') event = listener.events.get(timeout=1) self.assertEqual('info', event[0]) self.assertEqual('test', event[1]) self.assertEqual('Hello World!', event[2]) self.assertEqual('abc', event[3]) def test_multiple_topics(self): listener = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['a', 'b'])) a = listener.notifier('pub-a', topic='a') b = listener.notifier('pub-b', topic='b') sent = { 'pub-a': [a, 'test-a', 'payload-a'], 'pub-b': [b, 'test-b', 'payload-b'] } for e in sent.values(): e[0].info({}, e[1], e[2]) received = {} while len(received) < len(sent): e = listener.events.get(timeout=1) received[e[3]] = e for key in received: actual = received[key] expected = sent[key] self.assertEqual('info', actual[0]) self.assertEqual(expected[1], actual[1]) self.assertEqual(expected[2], actual[2]) def test_multiple_servers(self): if self.url.startswith("amqp:"): self.skipTest("QPID-6307") if self.url.startswith("zmq"): self.skipTest("ZeroMQ-PUB-SUB") if self.url.startswith("kafka"): self.skipTest("Kafka: Need to be fixed") listener_a = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['test-topic'])) listener_b = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['test-topic'])) n = listener_a.notifier('pub') events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh'] for event_type, payload in events_out: n.info({}, event_type, payload) events_in = [[(e[1], e[2]) for e in listener_a.get_events()], [(e[1], e[2]) for e in listener_b.get_events()]] self.assertThat(events_in, utils.IsValidDistributionOf(events_out)) for stream in events_in: self.assertThat(len(stream), matchers.GreaterThan(0)) def test_independent_topics(self): listener_a = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['1'])) listener_b = self.useFixture( utils.NotificationFixture(self.conf, self.url, ['2'])) a = listener_a.notifier('pub-1', topic='1') b = listener_b.notifier('pub-2', topic='2') a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh'] for event_type, payload in a_out: a.info({}, event_type, payload) b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop'] for event_type, payload in b_out: b.info({}, event_type, payload) def check_received(listener, publisher, messages): actuals = sorted([listener.events.get(timeout=0.5) for __ in range(len(a_out))]) expected = sorted([['info', m[0], m[1], publisher] for m in messages]) self.assertEqual(expected, actuals) check_received(listener_a, "pub-1", a_out) check_received(listener_b, "pub-2", b_out) def test_all_categories(self): listener = self.useFixture(utils.NotificationFixture( self.conf, self.url, ['test_all_categories'])) n = listener.notifier('abc') cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical'] events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats] for e in events: e[0]({}, e[2], e[3]) # order between events with different categories is not guaranteed received = {} for expected in events: e = listener.events.get(timeout=1) received[e[0]] = e for expected in events: actual = received[expected[1]] self.assertEqual(expected[1], actual[0]) self.assertEqual(expected[2], actual[1]) self.assertEqual(expected[3], actual[2]) def test_simple_batch(self): if self.url.startswith("amqp:"): backend = os.environ.get("AMQP1_BACKEND") if backend == "qdrouterd": # end-to-end acknowledgement with router intermediary # sender pends until batch_size or timeout reached self.skipTest("qdrouterd backend") listener = self.useFixture( utils.BatchNotificationFixture(self.conf, self.url, ['test_simple_batch'], batch_size=100, batch_timeout=2)) notifier = listener.notifier('abc') for i in six.moves.range(0, 205): notifier.info({}, 'test%s' % i, 'Hello World!') events = listener.get_events(timeout=3) self.assertEqual(3, len(events)) self.assertEqual(100, len(events[0][1])) self.assertEqual(100, len(events[1][1])) self.assertEqual(5, len(events[2][1]))
ozamiatin/oslo.messaging
oslo_messaging/tests/functional/test_functional.py
Python
apache-2.0
13,879
# # This source file is part of the EdgeDB open source project. # # Copyright 2008-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import annotations from typing import * from edb.edgeql import ast as qlast from edb.edgeql import qltypes from edb import errors from . import abc as s_abc from . import constraints from . import delta as sd from . import indexes from . import inheriting from . import properties from . import name as sn from . import objects as so from . import pointers from . import referencing from . import sources from . import utils if TYPE_CHECKING: from . import objtypes as s_objtypes from . import types as s_types from . import schema as s_schema LinkTargetDeleteAction = qltypes.LinkTargetDeleteAction def merge_actions( target: so.InheritingObject, sources: List[so.Object], field_name: str, *, ignore_local: bool = False, schema: s_schema.Schema, ) -> Any: if not ignore_local: ours = target.get_explicit_local_field_value(schema, field_name, None) else: ours = None if ours is None: current = None current_from = None for source in sources: theirs = source.get_explicit_field_value(schema, field_name, None) if theirs is not None: if current is None: current = theirs current_from = source elif current != theirs: target_source = target.get_source(schema) current_from_source = current_from.get_source(schema) source_source = source.get_source(schema) tgt_repr = ( f'{target_source.get_displayname(schema)}.' f'{target.get_displayname(schema)}' ) cf_repr = ( f'{current_from_source.get_displayname(schema)}.' f'{current_from.get_displayname(schema)}' ) other_repr = ( f'{source_source.get_displayname(schema)}.' f'{source.get_displayname(schema)}' ) raise errors.SchemaError( f'cannot implicitly resolve the ' f'`on target delete` action for ' f'{tgt_repr!r}: it is defined as {current} in ' f'{cf_repr!r} and as {theirs} in {other_repr!r}; ' f'to resolve, declare `on target delete` ' f'explicitly on {tgt_repr!r}' ) return current else: return ours class Link( sources.Source, pointers.Pointer, s_abc.Link, qlkind=qltypes.SchemaObjectClass.LINK, data_safe=False, ): on_target_delete = so.SchemaField( LinkTargetDeleteAction, default=LinkTargetDeleteAction.Restrict, coerce=True, compcoef=0.9, merge_fn=merge_actions) def get_target(self, schema: s_schema.Schema) -> s_objtypes.ObjectType: return self.get_field_value( # type: ignore[no-any-return] schema, 'target') def is_link_property(self, schema: s_schema.Schema) -> bool: return False def is_property(self, schema: s_schema.Schema) -> bool: return False def scalar(self) -> bool: return False def has_user_defined_properties(self, schema: s_schema.Schema) -> bool: return bool([p for p in self.get_pointers(schema).objects(schema) if not p.is_special_pointer(schema)]) def get_source_type( self, schema: s_schema.Schema ) -> s_types.Type: from . import types as s_types source = self.get_source(schema) assert isinstance(source, s_types.Type) return source def compare( self, other: so.Object, *, our_schema: s_schema.Schema, their_schema: s_schema.Schema, context: so.ComparisonContext, ) -> float: if not isinstance(other, Link): if isinstance(other, pointers.Pointer): return 0.0 else: raise NotImplementedError() return super().compare( other, our_schema=our_schema, their_schema=their_schema, context=context) def set_target( self, schema: s_schema.Schema, target: s_types.Type, ) -> s_schema.Schema: schema = super().set_target(schema, target) tgt_prop = self.getptr(schema, sn.UnqualName('target')) schema = tgt_prop.set_target(schema, target) return schema @classmethod def get_root_classes(cls) -> Tuple[sn.QualName, ...]: return ( sn.QualName(module='std', name='link'), sn.QualName(module='schema', name='__type__'), ) @classmethod def get_default_base_name(self) -> sn.QualName: return sn.QualName('std', 'link') class LinkSourceCommandContext(sources.SourceCommandContext): pass class LinkSourceCommand(inheriting.InheritingObjectCommand[sources.Source_T]): pass class LinkCommandContext(pointers.PointerCommandContext[Link], constraints.ConsistencySubjectCommandContext, properties.PropertySourceContext, indexes.IndexSourceCommandContext): pass class LinkCommand( properties.PropertySourceCommand[Link], pointers.PointerCommand[Link], context_class=LinkCommandContext, referrer_context_class=LinkSourceCommandContext, ): def _append_subcmd_ast( self, schema: s_schema.Schema, node: qlast.DDLOperation, subcmd: sd.Command, context: sd.CommandContext, ) -> None: if ( isinstance(subcmd, pointers.PointerCommand) and subcmd.classname != self.classname ): pname = sn.shortname_from_fullname(subcmd.classname) if pname.name in {'source', 'target'}: return super()._append_subcmd_ast(schema, node, subcmd, context) def validate_object( self, schema: s_schema.Schema, context: sd.CommandContext, ) -> None: """Check that link definition is sound.""" super().validate_object(schema, context) scls = self.scls assert isinstance(scls, Link) if not scls.get_owned(schema): return target = scls.get_target(schema) assert target is not None if not target.is_object_type(): srcctx = self.get_attribute_source_context('target') raise errors.InvalidLinkTargetError( f'invalid link target type, expected object type, got ' f'{target.get_verbosename(schema)}', context=srcctx, ) if target.is_free_object_type(schema): srcctx = self.get_attribute_source_context('target') raise errors.InvalidLinkTargetError( f'{target.get_verbosename(schema)} is not a valid link target', context=srcctx, ) if ( not scls.is_pure_computable(schema) and not scls.get_from_alias(schema) and target.is_view(schema) ): srcctx = self.get_attribute_source_context('target') raise errors.InvalidLinkTargetError( f'invalid link type: {target.get_displayname(schema)!r}' f' is an expression alias, not a proper object type', context=srcctx, ) def _get_ast( self, schema: s_schema.Schema, context: sd.CommandContext, *, parent_node: Optional[qlast.DDLOperation] = None, ) -> Optional[qlast.DDLOperation]: node = super()._get_ast(schema, context, parent_node=parent_node) # __type__ link is special, and while it exists on every object # it does not have a defined default in the schema (and therefore # it isn't marked as required.) We intervene here to mark all # __type__ links required when rendering for SDL/TEXT. if context.declarative and node is not None: assert isinstance(node, (qlast.CreateConcreteLink, qlast.CreateLink)) if node.name.name == '__type__': assert isinstance(node, qlast.CreateConcretePointer) node.is_required = True return node def _reinherit_classref_dict( self, schema: s_schema.Schema, context: sd.CommandContext, refdict: so.RefDict, ) -> Tuple[s_schema.Schema, Dict[sn.Name, Type[sd.ObjectCommand[so.Object]]]]: if self.scls.get_computable(schema) and refdict.attr != 'pointers': # If the link is a computable, the inheritance would only # happen in the case of aliasing, and in that case we only # need to inherit the link properties and nothing else. return schema, {} return super()._reinherit_classref_dict(schema, context, refdict) class CreateLink( pointers.CreatePointer[Link], LinkCommand, ): astnode = [qlast.CreateConcreteLink, qlast.CreateLink] referenced_astnode = qlast.CreateConcreteLink @classmethod def _cmd_tree_from_ast( cls, schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, ) -> sd.Command: cmd = super()._cmd_tree_from_ast(schema, astnode, context) if isinstance(astnode, qlast.CreateConcreteLink): assert isinstance(cmd, pointers.PointerCommand) cmd._process_create_or_alter_ast(schema, astnode, context) else: # this is an abstract property then if cmd.get_attribute_value('default') is not None: raise errors.SchemaDefinitionError( f"'default' is not a valid field for an abstract link", context=astnode.context) assert isinstance(cmd, sd.Command) return cmd def get_ast_attr_for_field( self, field: str, astnode: Type[qlast.DDLOperation], ) -> Optional[str]: if ( field == 'required' and issubclass(astnode, qlast.CreateConcreteLink) ): return 'is_required' elif ( field == 'cardinality' and issubclass(astnode, qlast.CreateConcreteLink) ): return 'cardinality' else: return super().get_ast_attr_for_field(field, astnode) def _apply_field_ast( self, schema: s_schema.Schema, context: sd.CommandContext, node: qlast.DDLOperation, op: sd.AlterObjectProperty, ) -> None: objtype = self.get_referrer_context(context) if op.property == 'target' and objtype: # Due to how SDL is processed the underlying AST may be an # AlterConcreteLink, which requires different handling. if isinstance(node, qlast.CreateConcreteLink): if not node.target: expr = self.get_attribute_value('expr') if expr is not None: node.target = expr.qlast else: t = op.new_value assert isinstance(t, (so.Object, so.ObjectShell)) node.target = utils.typeref_to_ast(schema, t) else: old_type = pointers.merge_target( self.scls, list(self.scls.get_bases(schema).objects(schema)), 'target', ignore_local=True, schema=schema, ) assert isinstance(op.new_value, (so.Object, so.ObjectShell)) new_type = ( op.new_value.resolve(schema) if isinstance(op.new_value, so.ObjectShell) else op.new_value) new_type_ast = utils.typeref_to_ast(schema, op.new_value) cast_expr = None # If the type isn't assignment castable, generate a # USING with a nonsense cast. It shouldn't matter, # since there should be no data to cast, but the DDL side # of things doesn't know that since the command is split up. if old_type and not old_type.assignment_castable_to( new_type, schema): cast_expr = qlast.TypeCast( type=new_type_ast, expr=qlast.Set(elements=[]), ) node.commands.append( qlast.SetPointerType( value=new_type_ast, cast_expr=cast_expr, ) ) elif op.property == 'on_target_delete': node.commands.append(qlast.OnTargetDelete(cascade=op.new_value)) else: super()._apply_field_ast(schema, context, node, op) def inherit_classref_dict( self, schema: s_schema.Schema, context: sd.CommandContext, refdict: so.RefDict, ) -> sd.CommandGroup: if self.scls.get_computable(schema) and refdict.attr != 'pointers': # If the link is a computable, the inheritance would only # happen in the case of aliasing, and in that case we only # need to inherit the link properties and nothing else. return sd.CommandGroup() cmd = super().inherit_classref_dict(schema, context, refdict) if refdict.attr != 'pointers': return cmd parent_ctx = self.get_referrer_context(context) if parent_ctx is None: return cmd base_prop_name = sn.QualName('std', 'source') s_name = sn.get_specialized_name( sn.QualName('__', 'source'), str(self.classname)) src_prop_name = sn.QualName( name=s_name, module=self.classname.module) src_prop = properties.CreateProperty( classname=src_prop_name, is_strong_ref=True, ) src_prop.set_attribute_value('name', src_prop_name) src_prop.set_attribute_value( 'bases', so.ObjectList.create(schema, [schema.get(base_prop_name)]), ) src_prop.set_attribute_value( 'source', self.scls, ) src_prop.set_attribute_value( 'target', parent_ctx.op.scls, ) src_prop.set_attribute_value('required', True) src_prop.set_attribute_value('readonly', True) src_prop.set_attribute_value('owned', True) src_prop.set_attribute_value('from_alias', self.scls.get_from_alias(schema)) src_prop.set_attribute_value('cardinality', qltypes.SchemaCardinality.One) cmd.prepend(src_prop) base_prop_name = sn.QualName('std', 'target') s_name = sn.get_specialized_name( sn.QualName('__', 'target'), str(self.classname)) tgt_prop_name = sn.QualName( name=s_name, module=self.classname.module) tgt_prop = properties.CreateProperty( classname=tgt_prop_name, is_strong_ref=True, ) tgt_prop.set_attribute_value('name', tgt_prop_name) tgt_prop.set_attribute_value( 'bases', so.ObjectList.create(schema, [schema.get(base_prop_name)]), ) tgt_prop.set_attribute_value( 'source', self.scls, ) tgt_prop.set_attribute_value( 'target', self.get_attribute_value('target'), ) tgt_prop.set_attribute_value('required', False) tgt_prop.set_attribute_value('readonly', True) tgt_prop.set_attribute_value('owned', True) tgt_prop.set_attribute_value('from_alias', self.scls.get_from_alias(schema)) tgt_prop.set_attribute_value('cardinality', qltypes.SchemaCardinality.One) cmd.prepend(tgt_prop) return cmd class RenameLink( LinkCommand, referencing.RenameReferencedInheritingObject[Link], ): pass class RebaseLink( LinkCommand, referencing.RebaseReferencedInheritingObject[Link], ): pass class SetLinkType( pointers.SetPointerType[Link], referrer_context_class=LinkSourceCommandContext, field='target', ): def _alter_begin( self, schema: s_schema.Schema, context: sd.CommandContext, ) -> s_schema.Schema: schema = super()._alter_begin(schema, context) scls = self.scls new_target = scls.get_target(schema) if not context.canonical: # We need to update the target link prop as well tgt_prop = scls.getptr(schema, sn.UnqualName('target')) tgt_prop_alter = tgt_prop.init_delta_command( schema, sd.AlterObject) tgt_prop_alter.set_attribute_value('target', new_target) self.add(tgt_prop_alter) return schema class AlterLinkUpperCardinality( pointers.AlterPointerUpperCardinality[Link], referrer_context_class=LinkSourceCommandContext, field='cardinality', ): pass class AlterLinkLowerCardinality( pointers.AlterPointerLowerCardinality[Link], referrer_context_class=LinkSourceCommandContext, field='required', ): pass class AlterLinkOwned( referencing.AlterOwned[Link], pointers.PointerCommandOrFragment[Link], referrer_context_class=LinkSourceCommandContext, field='owned', ): pass class SetTargetDeletePolicy(sd.Command): astnode = qlast.OnTargetDelete @classmethod def _cmd_from_ast( cls, schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, ) -> sd.AlterObjectProperty: return sd.AlterObjectProperty( property='on_target_delete' ) @classmethod def _cmd_tree_from_ast( cls, schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, ) -> sd.Command: assert isinstance(astnode, qlast.OnTargetDelete) cmd = super()._cmd_tree_from_ast(schema, astnode, context) assert isinstance(cmd, sd.AlterObjectProperty) cmd.new_value = astnode.cascade return cmd class AlterLink( LinkCommand, pointers.AlterPointer[Link], ): astnode = [qlast.AlterConcreteLink, qlast.AlterLink] referenced_astnode = qlast.AlterConcreteLink @classmethod def _cmd_tree_from_ast( cls, schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, ) -> AlterLink: cmd = super()._cmd_tree_from_ast(schema, astnode, context) assert isinstance(cmd, AlterLink) if isinstance(astnode, qlast.CreateConcreteLink): cmd._process_create_or_alter_ast(schema, astnode, context) else: cmd._process_alter_ast(schema, astnode, context) return cmd def _apply_field_ast( self, schema: s_schema.Schema, context: sd.CommandContext, node: qlast.DDLOperation, op: sd.AlterObjectProperty, ) -> None: if op.property == 'target': if op.new_value: assert isinstance(op.new_value, so.ObjectShell) node.commands.append( qlast.SetPointerType( value=utils.typeref_to_ast(schema, op.new_value), ), ) elif op.property == 'computable': if not op.new_value: node.commands.append( qlast.SetField( name='expr', value=None, special_syntax=True, ), ) elif op.property == 'on_target_delete': node.commands.append(qlast.OnTargetDelete(cascade=op.new_value)) else: super()._apply_field_ast(schema, context, node, op) class DeleteLink( LinkCommand, pointers.DeletePointer[Link], ): astnode = [qlast.DropConcreteLink, qlast.DropLink] referenced_astnode = qlast.DropConcreteLink # NB: target type cleanup (e.g. target compound type) is done by # the DeleteProperty handler for the @target property. def _get_ast( self, schema: s_schema.Schema, context: sd.CommandContext, *, parent_node: Optional[qlast.DDLOperation] = None, ) -> Optional[qlast.DDLOperation]: if self.get_orig_attribute_value('from_alias'): # This is an alias type, appropriate DDL would be generated # from the corresponding Alter/DeleteAlias node. return None else: return super()._get_ast(schema, context, parent_node=parent_node)
edgedb/edgedb
edb/schema/links.py
Python
apache-2.0
21,887
import bisect import string from abc import ABC, abstractmethod from typing import Optional from django.conf import settings class AbstractGrid(ABC): enabled = False @abstractmethod def get_square_for_point(self, x, y) -> Optional[str]: pass @abstractmethod def get_squares_for_bounds(self, bounds) -> Optional[str]: pass class Grid(AbstractGrid): enabled = True def __init__(self, rows, cols): rows = tuple(float(y) for y in rows) cols = tuple(float(x) for x in cols) self.rows = tuple(sorted(rows)) self.cols = tuple(sorted(cols)) if self.rows == rows: self.invert_y = False elif self.rows == tuple(reversed(rows)): self.invert_y = True else: raise ValueError('row coordinates are not ordered') if self.cols == cols: self.invert_x = False elif self.cols == tuple(reversed(cols)): self.invert_x = True else: raise ValueError('column coordinates are not ordered') def get_square_for_point(self, x, y): x = bisect.bisect(self.cols, x) if x <= 0 or x >= len(self.cols): return None y = bisect.bisect(self.rows, y) if y <= 0 or y >= len(self.rows): return None if self.invert_x: x = len(self.cols) - x if self.invert_y: y = len(self.rows) - y return '%s%d' % (string.ascii_uppercase[x-1], y) def get_squares_for_bounds(self, bounds): minx, miny, maxx, maxy = bounds if self.invert_x: minx, maxx = maxx, minx if self.invert_y: miny, maxy = maxy, miny min_square = self.get_square_for_point(minx, miny) max_square = self.get_square_for_point(maxx, maxy) if not min_square or not max_square: return None if min_square == max_square: return min_square return '%s-%s' % (min_square, max_square) class DummyGrid(AbstractGrid): def get_square_for_point(self, x, y): return None def get_squares_for_bounds(self, bounds): return None if settings.GRID_COLS and settings.GRID_ROWS: grid = Grid(settings.GRID_ROWS.split(','), settings.GRID_COLS.split(',')) else: grid = DummyGrid()
c3nav/c3nav
src/c3nav/mapdata/grid.py
Python
apache-2.0
2,343
from model.contact import Contact from random import randrange def test_edit_contact(app, db, check_ui): if len(db.get_contact_list()) == 0: app.contact.create(Contact(first_name ="Sabina", last_name="test", company="Pewex", address="osiedle", phone_home="123456789", e_mail="[email protected]", year="2016",)) old_contact = db.get_contact_list() index = randrange(len(old_contact)) contact = Contact(first_name='Kasia', last_name='Bober') contact.id = old_contact[index].id app.contact.edit_contact_by_index(index, contact) assert len(old_contact) == app.contact.count() new_contact = db.get_contact_list() old_contact[index] = contact assert old_contact == new_contact if check_ui: assert sorted(new_contact, key=Contact.id_or_max) == sorted( app.group.get_contact_list(), key=Contact.id_or_max )
sabinaczopik/python_training
test/test_edit_contact.py
Python
apache-2.0
949
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket from unittest import mock from oslotest import base as test_base from oslo_service import systemd class SystemdTestCase(test_base.BaseTestCase): """Test case for Systemd service readiness.""" def test__abstractify(self): sock_name = '@fake_socket' res = systemd._abstractify(sock_name) self.assertEqual('\0{0}'.format(sock_name[1:]), res) @mock.patch.object(os, 'getenv', return_value='@fake_socket') def _test__sd_notify(self, getenv_mock, unset_env=False): self.ready = False self.closed = False class FakeSocket(object): def __init__(self, family, type): pass def connect(fs, socket): pass def close(fs): self.closed = True def sendall(fs, data): if data == b'READY=1': self.ready = True with mock.patch.object(socket, 'socket', new=FakeSocket): if unset_env: systemd.notify_once() else: systemd.notify() self.assertTrue(self.ready) self.assertTrue(self.closed) def test_notify(self): self._test__sd_notify() def test_notify_once(self): os.environ['NOTIFY_SOCKET'] = '@fake_socket' self._test__sd_notify(unset_env=True) self.assertRaises(KeyError, os.environ.__getitem__, 'NOTIFY_SOCKET') @mock.patch("socket.socket") def test_onready(self, sock_mock): recv_results = [b'READY=1', '', socket.timeout] expected_results = [0, 1, 2] for recv, expected in zip(recv_results, expected_results): if recv == socket.timeout: sock_mock.return_value.recv.side_effect = recv else: sock_mock.return_value.recv.return_value = recv actual = systemd.onready('@fake_socket', 1) self.assertEqual(expected, actual)
openstack/oslo.service
oslo_service/tests/test_systemd.py
Python
apache-2.0
2,580
class Solution: def dailyTemperatures(self, T): ans = [] m = [None]*101 for i in range(len(T)-1, -1, -1): x = T[i] m[x] = i ans.append(min([x for x in m[x+1:] if x is not None], default=i)-i) ans.reverse() return ans print(Solution().dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73]))
zuun77/givemegoogletshirts
leetcode/python/739_daily-temperatures.py
Python
apache-2.0
367
#!/usr/bin/python2.7 """ Print the number of bases in a nib file. usage: %prog nib_file """ from bx.seq import nib as seq_nib import sys nib = seq_nib.NibFile( file( sys.argv[1] ) ) print nib.length
poojavade/Genomics_Docker
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/nib_length.py
Python
apache-2.0
203
''' Created on Feb 3, 2013 @author: bpurgaso ''' from twisted.words.protocols import irc from twisted.internet import protocol from twisted.internet import reactor from twisted.internet import threads from ConfigManager import ConfigManager from Authenticator import Authenticator from subprocess import PIPE, STDOUT, Popen class bot(irc.IRCClient): """ irc bots, yay """ def _get_nickname(self): return self.factory.nickname nickname = property(_get_nickname) def reloadConfig(self): self.config = self.configManager.getConfig() def signedOn(self): #Initial Setup self.configManager = self.factory.configManager self.configManager.registerListener(self) self.config = self.configManager.getConfig() self.auth = self.factory.auth print "Signed on as %s." % (self.nickname) for i in self.config['channels'].keys(): if self.config['channels'][i]['autojoin']: irc.IRCClient.join(self, i, self.config['channels'][i]['key']) def joined(self, channel): print "Joined %s." % (channel) def irc_INVITE(self, prefix, params): """ called by twisted, if the bot was invited """ channel = params[-1].lower().replace('#', '') if channel not in self.config['channels'].keys(): self.auth.createChannelEntry(channel) self.join(channel, self.config['channels'][channel]['key']) def privmsg(self, user, channel, msg): ''' Called whenever an inbound message arrives ''' print user, channel, msg user = user.rsplit('!', 1)[0] # Check to see if they're sending me a private message if channel == self.nickname: channel = user index = 0 else: index = 1 # See if the message directed at me if msg.startswith(self.nickname + ":") or index == 0: ''' embedded commands go here ''' command = msg.rsplit()[index].lower() #REGISTER if command == 'register': if self.auth.isUserAuthorized('register', user): self.msg(channel, self.auth.registerUser(user, 'default')) else: self.msg(channel, "You aren't authorized for register.") #PROMOTE elif command == 'promote': if self.auth.isUserAuthorized('promote', user): try: target_uname = msg.rsplit()[index + 1].lower() target_group = msg.rsplit()[index + 2].lower() if self.auth.getPowerOfUser(user) <=\ self.auth.getPowerOfGroup(target_group): self.postToIRC((channel, [self.auth.registerUser(\ target_uname, target_group)])) else: self.postToIRC((channel, ['%s, your power level'\ ' is'\ ' insufficient.' % user])) except: self.postToIRC((channel, ['Check your formatting and'\ ' try again.'])) else: self.msg(channel, "You aren't authorized for register.") #WHOAMI elif command == 'whoami': if self.auth.isUserAuthorized('whoami', user): self.postToIRC((channel, [self.auth.whoami(user)])) else: self.msg(channel, "You aren't authorized for register.") #OPME elif command == 'opme': if self.auth.isUserAuthorized('opme', user): self.mode(channel, set, 'o', None, user) else: self.msg(channel, "You aren't authorized for opme.") #AUTOOP elif command == 'autoop': if self.auth.isUserAuthorized('autoop', user): if msg.rsplit()[2].lower() == 'on': self.postToIRC((channel, self.auth.toggleAutoOp(\ user, channel, True))) else: self.postToIRC((channel, self.auth.toggleAutoOp(\ user, channel, False))) else: self.msg(channel, "You aren't authorized for autoop.") #HELP elif command == 'help': if self.auth.isUserAuthorized('help', user): for i in self.auth.getAvailableCommandsForUser(user): self.msg(user, '%s: %s' %\ (i, self.auth.getHelpForCommand(i))) self.msg(channel, 'I\'ve sent you a pm.') else: self.msg(channel, "You aren't authorized for help.") #RELOAD elif command == 'reload': if self.auth.isUserAuthorized('reload', user): self.configManager.reload() self.msg(channel, "Configuration Reloaded") if not self.auth.sanityCheck(False): self.msg(channel, "Configuration Sanity is suspect, "\ "rolling back.") else: self.msg(channel, "You aren't authorized for reload.") #KICK elif command == 'kick': if self.auth.isUserAuthorized('kick', user): if self.nickname not in msg.rsplit()[index + 1:]: for i in msg.rsplit()[index + 1:]: self.kick(channel, i, 'Later broseph.') else: self.msg(channel, "Nope, not happening.") else: self.kick(channel, user, 'Sorry bro, nothing personal.') else: ''' External script execution goes here ''' if self.auth.isUserAuthorized(msg.rsplit()[index].lower(),\ user): #kick off the async call #channel, command, params self.invokeCommand(channel,\ command,\ (" ".join(msg.rsplit()[index + 1:]))) else: self.msg(channel, "You aren't authorized for %s." %\ (command)) else: ''' filter processing go here ''' pass def invokeCommand(self, channel, command, params): tmp = threads.deferToThread(self.__shellCall, channel, command, params) tmp.addCallback(self.postToIRC) def __shellCall(self, channel, command, params): command = self.sanitize(command) params = self.sanitize(params) command = "exec python ./bin/%s.py %s 2> /dev/null" % (command, params) self.p = Popen( command, stderr=STDOUT, stdout=PIPE, close_fds=True, shell=True) out, err = self.p.communicate() # @UnusedVariable return (channel, out.splitlines()) def sanitize(self, s): for i in self.config['sanitize']: s = s.replace(i, '') return s def postToIRC(self, tpl): for i in tpl[1]: self.msg(tpl[0], i) def userJoined(self, user, channel): channel_dict = channel.replace('#', '') if self.config['channels'][channel_dict]['enable_autoop'] and\ user in self.config['channels'][channel_dict]['autoop']: self.mode(channel, set, 'o', None, user) if self.config['channels'][channel_dict]['enable_greeting']: self.msg(channel, "%s: %s" % (user,\ self.config['channels'][channel_dict]['greeting'])) def kickedFrom(self, channel, kicker, message): """ called by twisted, if the bot was kicked """ channel = channel.replace('#', '') if channel in self.config['channels'].keys() and\ self.config['channels'][channel]['autojoin']: self.join(channel, self.config['channels'][channel]['key']) self.msg(kicker, "Why would you do that to me brah?") class botFactory(protocol.ClientFactory): """ Factory for producing "bot" """ protocol = bot def __init__(self, channel, configManager, auth): self.startChannel = channel self.configManager = configManager self.config = self.configManager.getConfig() self.auth = auth #required self.nickname = self.config['nick'] def clientConnectionLost(self, connector, reason): print "Lost connection (%s), reconnecting." % (reason) connector.connect() def clientConnectionFailed(self, connector, reason): print "Could not connect: %s" % (reason) class Hydra(object): ''' The big bad scary bot ''' def __init__(self): self.startChannel = '#hydra' self.configManager = ConfigManager() self.config = self.configManager.getConfig() self.configManager.registerListener(self) self.auth = Authenticator(self.configManager) n = self.config['network'] p = self.config['port'] b = botFactory(self.startChannel, self.configManager, self.auth) reactor.connectTCP(n, p, b) # @UndefinedVariable reactor.run() # @UndefinedVariable def reloadConfig(self): self.config = self.configManager.getConfig() ### dummy code below h = Hydra()
bpurgaso/hydra-ircbot
hydra/Hydra.py
Python
apache-2.0
9,993
"""Support for Tibber.""" import asyncio import logging import aiohttp import tibber from homeassistant.const import ( CONF_ACCESS_TOKEN, CONF_NAME, EVENT_HOMEASSISTANT_STOP, Platform, ) from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import discovery from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.util import dt as dt_util from .const import DATA_HASS_CONFIG, DOMAIN PLATFORMS = [Platform.SENSOR] CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False) _LOGGER = logging.getLogger(__name__) async def async_setup(hass, config): """Set up the Tibber component.""" hass.data[DATA_HASS_CONFIG] = config return True async def async_setup_entry(hass, entry): """Set up a config entry.""" tibber_connection = tibber.Tibber( access_token=entry.data[CONF_ACCESS_TOKEN], websession=async_get_clientsession(hass), time_zone=dt_util.DEFAULT_TIME_ZONE, ) hass.data[DOMAIN] = tibber_connection async def _close(event): await tibber_connection.rt_disconnect() entry.async_on_unload(hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close)) try: await tibber_connection.update_info() except asyncio.TimeoutError as err: raise ConfigEntryNotReady from err except aiohttp.ClientError as err: _LOGGER.error("Error connecting to Tibber: %s ", err) return False except tibber.InvalidLogin as exp: _LOGGER.error("Failed to login. %s", exp) return False hass.config_entries.async_setup_platforms(entry, PLATFORMS) # set up notify platform, no entry support for notify component yet, # have to use discovery to load platform. hass.async_create_task( discovery.async_load_platform( hass, "notify", DOMAIN, {CONF_NAME: DOMAIN}, hass.data[DATA_HASS_CONFIG] ) ) return True async def async_unload_entry(hass, config_entry): """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms( config_entry, PLATFORMS ) if unload_ok: tibber_connection = hass.data.get(DOMAIN) await tibber_connection.rt_disconnect() return unload_ok
home-assistant/home-assistant
homeassistant/components/tibber/__init__.py
Python
apache-2.0
2,333
import pytest from collections import OrderedDict from insights.parsers import (calc_offset, keyword_search, optlist_to_dict, parse_delimited_table, parse_fixed_table, split_kv_pairs, unsplit_lines, ParseException, SkipException) SPLIT_TEST_1 = """ # Comment line keyword1 = value1 # Inline comments # Comment indented keyword3 # Key with no separator keyword2 = value2a=True, value2b=100M """.strip() SPLIT_TEST_1_OD = OrderedDict([ ('keyword1', 'value1'), ('keyword3', ''), ('keyword2', 'value2a=True, value2b=100M') ]) SPLIT_TEST_2 = """ @ Comment line keyword1: value1 @ Inline comments keyword2 : value2a=True, value2b=100M @ Comment indented keyword3 @ Key with no separator """.strip() OFFSET_CONTENT_1 = """ data 1 line data 2 line """.strip() OFFSET_CONTENT_2 = """ # Warning line Error line data 1 line data 2 line Trailing line Blank line above Another trailing line Yet another trailing line Yet yet another trailing line """.strip() def test_split_kv_pairs(): kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines()) assert len(kv_pairs) == 2 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), filter_string='value2') assert len(kv_pairs) == 1 assert kv_pairs == { 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True) assert len(kv_pairs) == 3 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M', 'keyword3': '' } kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True, ordered=True) assert len(kv_pairs) == 3 assert kv_pairs == SPLIT_TEST_1_OD kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':') assert len(kv_pairs) == 2 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', filter_string='value2') assert len(kv_pairs) == 1 assert kv_pairs == { 'keyword2': 'value2a=True, value2b=100M' } kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', use_partition=True) assert len(kv_pairs) == 3 assert kv_pairs == { 'keyword1': 'value1', 'keyword2': 'value2a=True, value2b=100M', 'keyword3': '' } SPLIT_LINES = """ Line one Line two part 1 \\ line two part 2\\ line two part 3 Line three """.strip() SPLIT_LINES_2 = """ Line one Line two part 1 ^ line two part 2^ line two part 3 Line three^ """.strip() SPLIT_LINES_3 = """ web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue, RHN::Task::ErrataEngine, RHN::Task::DailySummary, RHN::Task::SummaryPopulation, RHN::Task::RHNProc, RHN::Task::PackageCleanup db_host =""" def test_unsplit_lines(): lines = list(unsplit_lines(SPLIT_LINES.splitlines())) assert len(lines) == 3 assert lines[0] == 'Line one' assert lines[1] == 'Line two part 1 line two part 2 line two part 3' assert lines[2] == 'Line three' lines = list(unsplit_lines(SPLIT_LINES_2.splitlines(), cont_char='^')) assert len(lines) == 3 assert lines[0] == 'Line one' assert lines[1] == 'Line two part 1 line two part 2 line two part 3' assert lines[2] == 'Line three' # test continuation on last line # Test keeping continuation character on line lines = list(unsplit_lines( SPLIT_LINES_3.splitlines(), cont_char=',', keep_cont_char=True )) assert len(lines) == 4 assert lines[0] == '' assert lines[1] == 'web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue, RHN::Task::ErrataEngine, RHN::Task::DailySummary, RHN::Task::SummaryPopulation, RHN::Task::RHNProc, RHN::Task::PackageCleanup' assert lines[2] == '' assert lines[3] == 'db_host =' def test_calc_offset(): assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[]) == 0 assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[None]) == 0 assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=['data ']) == 0 with pytest.raises(ValueError): calc_offset(OFFSET_CONTENT_1.splitlines(), target=['xdata ']) with pytest.raises(ValueError): calc_offset(OFFSET_CONTENT_1.splitlines(), target=['data '], invert_search=True) assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=['Trailing', 'Blank', 'Another '], invert_search=True) == 0 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=[]) == 0 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data ']) == 3 assert calc_offset(reversed(OFFSET_CONTENT_2.splitlines()), target=['Trailing', 'Blank', 'Another ', 'Yet'], invert_search=True) == 6 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data', '2']) == 3 assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data', '2'], require_all=True) == 4 assert calc_offset( reversed(OFFSET_CONTENT_2.splitlines()), target=['Trailing', 'Blank', 'Another ', 'Yet'], invert_search=True) == 6 assert calc_offset( reversed(OFFSET_CONTENT_2.splitlines()), target=['Trailing', 'Blank', 'Another ', 'Yet'], invert_search=True, require_all=True) == 6 FIXED_CONTENT_1 = """ Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_1A = """ WARNING Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_1B = """ Column1 Column2 Column3 data1 data 2 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_2 = """ WARNING WARNING WARNING Some message Another message Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 """.strip() FIXED_CONTENT_3 = """ WARNING WARNING WARNING Some message Another message Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 Trailing non-data line Another trailing non-data line """.strip() FIXED_CONTENT_4 = """ WARNING WARNING WARNING Some message Another message Column1 Column 2 Column 3 data1 data 2 data 3 data4 data5 data6 data 7 data 9 data10 Trailing non-data line Another trailing non-data line """.strip() FIXED_CONTENT_5 = """ Column1 Column 2 Column 3 data1 data 2 data 3 data 7 data 9 data10 """.strip() FIXED_CONTENT_DUP_HEADER_PREFIXES = """ NAMESPACE NAME LABELS default foo app=superawesome """.strip() def test_parse_fixed_table(): data = parse_fixed_table(FIXED_CONTENT_1.splitlines()) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_1A.splitlines(), heading_ignore=['Column1 ']) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_1B.splitlines()) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': ''} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_2.splitlines(), heading_ignore=['Column1 ']) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_3.splitlines(), heading_ignore=['Column1 '], trailing_ignore=['Trailing', 'Another']) assert len(data) == 3 assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'} data = parse_fixed_table(FIXED_CONTENT_4.splitlines(), heading_ignore=['Column1 '], header_substitute=[('Column 2', 'Column_2'), ('Column 3', 'Column_3')], trailing_ignore=['Trailing', 'Another']) assert len(data) == 4 assert data[0] == {'Column1': 'data1', 'Column_2': 'data 2', 'Column_3': 'data 3'} assert data[1] == {'Column1': 'data4', 'Column_2': 'data5', 'Column_3': 'data6'} assert data[2] == {'Column1': 'data 7', 'Column_2': '', 'Column_3': 'data 9'} assert data[3] == {'Column1': 'data10', 'Column_2': '', 'Column_3': ''} # Test that if we search for trailing data that is always found, then we # should get the whole thing parsed as a table from the header line data = parse_fixed_table( ['foo' + line for line in FIXED_CONTENT_4.splitlines()], heading_ignore=['fooColumn1 '], header_substitute=[('fooColumn1', 'Column1'), ('Column 2', 'Column_2'), ('Column 3', 'Column_3')], trailing_ignore=['foo'] ) assert len(data) == 6 assert data[4] == {'Column1': 'fooTrailing', 'Column_2': 'non-data li', 'Column_3': 'ne'} assert data[5] == {'Column1': 'foo Another', 'Column_2': 'trailing no', 'Column_3': 'n-data line'} data = parse_fixed_table(FIXED_CONTENT_DUP_HEADER_PREFIXES.splitlines()) assert data[0] == {'NAMESPACE': 'default', 'NAME': 'foo', 'LABELS': 'app=superawesome'} data = parse_fixed_table(FIXED_CONTENT_5.splitlines()) assert len(data) == 3 def test_parse_fixed_table_empty_exception(): with pytest.raises(ParseException) as pe: parse_fixed_table(FIXED_CONTENT_1B.splitlines(), empty_exception=True) assert "Incorrect line:" in str(pe.value) def test_optlist_standard(): d = optlist_to_dict('key1,key2=val2,key1=val1,key3') assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3']) assert d['key1'] == 'val1' assert d['key2'] == 'val2' assert d['key3'] is True def test_optlist_no_vals(): d = optlist_to_dict('key1,key2=val2,key1=val1,key3', kv_sep=None) assert sorted(d.keys()) == sorted(['key1', 'key1=val1', 'key2=val2', 'key3']) assert d['key1'] is True assert d['key1=val1'] is True assert d['key2=val2'] is True assert d['key3'] is True def test_optlist_strip_quotes(): d = optlist_to_dict( '''key1="foo",key2='bar',key3="mismatched quotes',key4="inner'quotes"''', strip_quotes=True ) assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3', 'key4']) assert d['key1'] == 'foo' assert d['key2'] == 'bar' assert d['key3'] == '"mismatched quotes\'' assert d['key4'] == "inner'quotes" def test_optlist_with_spaces(): d = optlist_to_dict( '''key1=foo, key2=bar''' ) assert 'key1' in d assert 'key2' in d PS_AUX_TEST = """ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 /sbin/init root 1821 0.0 0.0 0 0 ? S May31 0:25 [kondemand/0] root 1864 0.0 0.0 18244 668 ? Ss May31 0:05 irqbalance --pid=/var/run/irqbalance.pid user1 20160 0.0 0.0 108472 1896 pts/3 Ss 10:09 0:00 bash root 20357 0.0 0.0 9120 760 ? Ss 10:09 0:00 /sbin/dhclient -1 -q -lf /var/lib/dhclient/dhclient-extbr0.leases -pf /var/run/dhclient-extbr0.pid extbr0 qemu 22673 0.8 10.2 1618556 805636 ? Sl 11:38 1:07 /usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad """ MISSING_DATA_TEST = """ WARNING: Locking disabled. Be careful! This could corrupt your metadata. LVM2_PV_FMT|LVM2_PV_UUID|LVM2_DEV_SIZE|LVM2_PV_NAME|LVM2_PV_MAJOR|LVM2_PV_MINOR|LVM2_PV_MDA_FREE|LVM2_PV_MDA_SIZE|LVM2_PV_EXT_VSN|LVM2_PE_START|LVM2_PV_SIZE|LVM2_PV_FREE|LVM2_PV_USED|LVM2_PV_ATTR|LVM2_PV_ALLOCATABLE|LVM2_PV_EXPORTED|LVM2_PV_MISSING|LVM2_PV_PE_COUNT|LVM2_PV_PE_ALLOC_COUNT|LVM2_PV_TAGS|LVM2_PV_MDA_COUNT|LVM2_PV_MDA_USED_COUNT|LVM2_PV_BA_START|LVM2_PV_BA_SIZE|LVM2_PV_IN_USE|LVM2_PV_DUPLICATE|LVM2_VG_NAME WARNING: Locking disabled. Be careful! This could corrupt your metadata. """ SUBSTITUTE_HEADERS_TEST = """ address,port,state,read-only 0.0.0.0,3000,LISTEN,N 10.76.19.184,37500,ESTAB,Y """.strip() POSTGRESQL_LOG = """ schema | table | rows public | rhnsnapshotpackage | 47428950 public | rhnpackagefile | 32174333 public | rhnpackagecapability | 12934215 public | rhnpackagechangelogrec | 11269933 public | rhnchecksum | 10129746 public | rhnactionconfigrevision | 2894957 public | rhnpackageprovides | 2712442 public | rhnpackagerequires | 2532861 public | rhn_command_target | 1009152 public | rhnconfigfilename | 0 public | rhnxccdfidentsystem | 0 public | rhndistchannelmap | 0 public | rhnactionvirtshutdown | 0 public | rhnpublicchannelfamily | 0 (402 rows) """.strip() # Normally has a --- separator line, which is ignored using get_active_lines TABLE1 = """ THIS IS A HEADER this is some content_with_blank_prefix This is more content """.strip() TABLE2 = [ "SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE", "HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe", "HA2| 22| D22| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe" ] TABLE3 = """ THIS | IS | A | HEADER this ^ is ^ some ^ content This ^ is ^ more ^ content """.strip() def test_parse_delimited_table(): # No content? No table. assert parse_delimited_table([]) == [] # Test maximum splits and header 'ignore', which should actually be # called 'header_startswith' tbl = parse_delimited_table( PS_AUX_TEST.splitlines(), max_splits=10, heading_ignore=['USER'] ) assert tbl assert isinstance(tbl, list) assert len(tbl) == 6 assert isinstance(tbl[0], dict) assert tbl[0] == { '%MEM': '0.0', 'TTY': '?', 'VSZ': '19356', 'PID': '1', '%CPU': '0.0', 'START': 'May31', 'COMMAND': '/sbin/init', 'USER': 'root', 'STAT': 'Ss', 'TIME': '0:01', 'RSS': '1544' } assert tbl[5]['COMMAND'] == \ '/usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad' # Test trailing ignore not found tbl = parse_delimited_table( MISSING_DATA_TEST.splitlines(), delim='|', heading_ignore=['LVM2_PV_FMT'], trailing_ignore=['WARNING', 'ERROR', 'Cannot get lock'] ) assert isinstance(tbl, list) assert len(tbl) == 0 # Header substitution tbl = parse_delimited_table( SUBSTITUTE_HEADERS_TEST.splitlines(), delim=',', strip=False, header_substitute=[('read-only', 'read_only')] ) assert tbl assert isinstance(tbl, list) assert len(tbl) == 2 assert isinstance(tbl[1], dict) assert tbl[1] == { 'address': '10.76.19.184', 'port': '37500', 'state': 'ESTAB', 'read_only': 'Y' } # Test change of delimiter and trailing_ignore tbl = parse_delimited_table(POSTGRESQL_LOG.splitlines(), delim='|', trailing_ignore=['(']) assert isinstance(tbl, list) assert len(tbl) == 14 assert isinstance(tbl[0], dict) assert tbl[0] == { 'schema': 'public', 'table': 'rhnsnapshotpackage', 'rows': '47428950' } # Test using different header delimiter result = parse_delimited_table(TABLE3.splitlines(), delim="^", header_delim="|") assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) expected = [{"THIS": "this", "IS": "is", "A": "some", "HEADER": "content"}, {"THIS": "This", "IS": "is", "A": "more", "HEADER": "content"}] assert expected == result # Test explicit None as header delimiter, different from content delimiter result = parse_delimited_table(TABLE2, delim='|', header_delim=None) assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) expected = [{"SID": "HA2", "Nr": "16", "Instance": "D16", "SAPLOCALHOST": "lu0417", "Version": "749, patch 10, changelist 1698137", "DIR_EXECUTABLE": "/usr/sap/HA2/D16/exe"}, {"SID": "HA2", "Nr": "22", "Instance": "D22", "SAPLOCALHOST": "lu0417", "Version": "749, patch 10, changelist 1698137", "DIR_EXECUTABLE": "/usr/sap/HA2/D22/exe"}] assert expected == result # Test raw_line_key TABLE1_SP = TABLE1.splitlines() result = parse_delimited_table(TABLE1_SP, raw_line_key='raw_line') assert isinstance(result, list) assert len(result) == 2 assert isinstance(result[0], dict) # Get the RAW line assert result[0]['raw_line'] == TABLE1_SP[1] DATA_LIST = [ {'name': 'test 1', 'role': 'server', 'memory_gb': 16, 'ssd': True}, {'name': 'test 2', 'role': 'server', 'memory_gb': 256, 'ssd': False}, {'name': 'test 3', 'role': 'server', 'memory_gb': 16, 'ssd': False}, {'name': 'test 4', 'role': 'embedded', 'memory_gb': 1, 'ssd': False}, {'name': 'test 5', 'role': 'workstation', 'memory_gb': 16, 'ssd': True}, ] CERT_LIST = [ { 'status': 'MONITORING', 'stuck': 'no', 'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM/pwdfile.txt'", 'certificate': { 'type': 'NSSDB', 'location': '/etc/dirsrv/slapd-LDAP-EXAMPLE-COM', 'nickname': 'Server-Cert', 'token': 'NSS Certificate DB', }, 'CA': 'IPA', 'issuer': 'CN=Certificate Authority,O=LDAP.EXAMPLE.COM', 'subject': 'CN=master.LDAP.EXAMPLE.COM,O=LDAP.EXAMPLE.COM', 'expires': '2017-06-28 12:52:12 UTC', 'eku': 'id-kp-serverAuth,id-kp-clientAuth', 'pre-save command': '', 'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv LDAP-EXAMPLE-COM', 'track': 'yes', 'auto-renew': 'yes', }, { 'status': 'MONITORING', 'stuck': 'no', 'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-PKI-IPA/pwdfile.txt'", 'certificate': { 'type': 'NSSDB', 'location': '/etc/dirsrv/slapd-PKI-IPA', 'nickname': 'Server-Cert', 'token': 'NSS Certificate DB', }, 'CA': 'IPA', 'issuer': 'CN=Certificate Authority,O=EXAMPLE.COM', 'subject': 'CN=ldap.EXAMPLE.COM,O=EXAMPLE.COM', 'expires': '2017-06-28 12:52:13 UTC', 'eku': 'id-kp-serverAuth,id-kp-clientAuth', 'pre-save command': '', 'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv PKI-IPA', 'track': 'yes', 'auto-renew': 'yes', 'dash- space': 'tested', } ] def test_keyword_search(): # No keywords, no result assert len(keyword_search(DATA_LIST)) == 0 # Search on absent keywords produces empty list assert keyword_search(DATA_LIST, cpu_count=4) == [] # Search on present but non-matching keyword produces empty list assert keyword_search(DATA_LIST, memory_gb=8) == [] # Single result - search on string results = keyword_search(DATA_LIST, role='embedded') assert len(results) == 1 assert results[0] == DATA_LIST[3] # Multiple results, name has underscore - search on integer results = keyword_search(DATA_LIST, memory_gb=16) assert len(results) == 3 assert results == [DATA_LIST[i] for i in (0, 2, 4)] # Search on boolean results = keyword_search(DATA_LIST, ssd=False) assert len(results) == 3 assert results == [DATA_LIST[i] for i in (1, 2, 3)] # No data, no results. assert len(keyword_search([], role='server')) == 0 # Search with contains results = keyword_search(DATA_LIST, role__contains='e') assert len(results) == 4 assert results == [DATA_LIST[i] for i in (0, 1, 2, 3)] # Search with startswith results = keyword_search(DATA_LIST, role__startswith='e') assert len(results) == 1 assert results[0] == DATA_LIST[3] # Search for multiple keys, with spaces and dashes, and search operators results = keyword_search( CERT_LIST, pre_save_command='', key_pair_storage__startswith="type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA'" ) assert len(results) == 1 assert results[0] == CERT_LIST[1] # Make sure contains can also apply to keys with dashes and spaces results = keyword_search( CERT_LIST, post_save_command__contains='PKI-IPA', ) assert len(results) == 1 assert results[0] == CERT_LIST[1] # Lower case value matching results = keyword_search( CERT_LIST, status__lower_value='Monitoring', ) assert len(results) == 2 assert results == CERT_LIST # Check that searches for keys with two underscores that aren't matcher # suffixes still work results = keyword_search( CERT_LIST, dash__space='tested', ) assert len(results) == 1 assert results[0] == CERT_LIST[1] # Check that we can use contains to check the contents of a dictionary # in a value results = keyword_search( CERT_LIST, certificate__contains='type' ) assert len(results) == 2 assert results == CERT_LIST assert keyword_search( CERT_LIST, certificate__contains='encryption' ) == [] PS_LIST = [ {'PID': '692', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 692 2 kdmflush'}, {'PID': '701', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 701 2 kdmflush'}, {'PID': '725', 'PPID': '2', 'COMMAND': 'xfsalloc', '_line': ' 725 2 xfsalloc'}, {'PID': '726', 'PPID': '2', 'COMMAND': None, '_line': ' 726 2 grep -F xx'}, ] def test_keyword_search_None(): # Normal search assert keyword_search(PS_LIST, COMMAND__default=None)[0]['PID'] == '726' assert keyword_search(PS_LIST, _line__contains='alloc')[0]['PID'] == '725' assert keyword_search(PS_LIST, COMMAND__startswith='xfs')[0]['PID'] == '725' assert len(keyword_search(PS_LIST, COMMAND__lower_value='KDMFLUSH')) == 2 # Check that searches for non-existing keys assert keyword_search(PS_LIST, NONE__default=None) == [] assert keyword_search(PS_LIST, NONE__startswith='xfs') == [] def test_parse_exception(): with pytest.raises(ParseException) as e_info: raise ParseException('This is a parse exception') assert 'This is a parse exception' == str(e_info.value) def test_skip_exception(): with pytest.raises(SkipException) as e_info: raise SkipException('This is a skip exception') assert 'This is a skip exception' == str(e_info.value)
RedHatInsights/insights-core
insights/parsers/tests/test_parsers_module.py
Python
apache-2.0
24,552
""" @date 2014-11-16 @author Hong-She Liang <[email protected]> """ from selenium.common.exceptions import *
starofrainnight/rabird.selenium
rabird/selenium/exceptions.py
Python
apache-2.0
118
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Runs either `.fit()` or `.test()` on a single node across multiple gpus. """ import os from argparse import ArgumentParser import torch from pytorch_lightning import seed_everything, Trainer from tests.helpers.datamodules import ClassifDataModule from tests.helpers.simple_models import ClassificationModel def main(): seed_everything(4321) parser = ArgumentParser(add_help=False) parser = Trainer.add_argparse_args(parser) parser.add_argument("--trainer_method", default="fit") parser.add_argument("--tmpdir") parser.add_argument("--workdir") parser.set_defaults(gpus=2) parser.set_defaults(accelerator="ddp") args = parser.parse_args() dm = ClassifDataModule() model = ClassificationModel() trainer = Trainer.from_argparse_args(args) if args.trainer_method == "fit": trainer.fit(model, datamodule=dm) result = None elif args.trainer_method == "test": result = trainer.test(model, datamodule=dm) elif args.trainer_method == "fit_test": trainer.fit(model, datamodule=dm) result = trainer.test(model, datamodule=dm) else: raise ValueError(f"Unsupported: {args.trainer_method}") result_ext = {"status": "complete", "method": args.trainer_method, "result": result} file_path = os.path.join(args.tmpdir, "ddp.result") torch.save(result_ext, file_path) if __name__ == "__main__": main()
williamFalcon/pytorch-lightning
tests/accelerators/ddp_model.py
Python
apache-2.0
2,010
import csv import os import color def _GetDataDirPath(): return os.path.join(os.path.dirname(__file__), 'data') def _GetCsvPath(): return os.path.join(_GetDataDirPath(), 'dmccolors.csv') def _GetCsvString(): with open(_GetCsvPath()) as f: return f.read().strip() def _CreateDmcColorFromRow(row): number = int(row[0]) name = row[1] hex_color = row[5] rgb_color = color.RGBColorFromHexString(hex_color) return DMCColor(number, name, rgb_color) # DMC Colors singleton _dmc_colors = None def _CreateDMCColors(): global _dmc_colors csv_data = _GetCsvString() lines = csv_data.splitlines() # Skip first line lines = lines[1:] reader = csv.reader(lines, delimiter='\t') dmc_colors = set() for row in reader: dmc_colors.add(_CreateDmcColorFromRow(row)) return dmc_colors def GetDMCColors(): global _dmc_colors if not _dmc_colors: _dmc_colors = frozenset(_CreateDMCColors()) return _dmc_colors def GetClosestDMCColorsPairs(rgb_color): pairs = list() for dcolor in GetDMCColors(): pairs.append((dcolor, color.RGBColor.distance(rgb_color, dcolor.color))) return sorted(pairs, key=lambda pair: pair[1]) def GetClosestDMCColors(rgb_color): return [pair[0] for pair in GetClosestDMCColorsPairs(rgb_color)] class DMCColor(object): def __init__(self, number, name, color): self.number = number self.name = name self.color = color def __str__(self): return super(DMCColor, self).__str__() + str((self.number, self.name, self.color)) def GetStringForDMCColor(dmc_color): return "%s %s %s" % (dmc_color.number, dmc_color.name, dmc_color.color) # Simple executable functionality for debugging. def main(): for color in GetDMCColors(): print color if __name__ == '__main__': main()
nanaze/pystitch
pystitch/dmc_colors.py
Python
apache-2.0
1,790
"""A simple memcache-like server. The basic data structure maintained is a single in-memory dictionary mapping string keys to string values, with operations get, set and delete. (Both keys and values may contain Unicode.) This is a TCP server listening on port 54321. There is no authentication. Requests provide an operation and return a response. A connection may be used for multiple requests. The connection is closed when a client sends a bad request. If a client is idle for over 5 seconds (i.e., it does not send another request, or fails to read the whole response, within this time), it is disconnected. Framing of requests and responses within a connection uses a line-based protocol. The first line of a request is the frame header and contains three whitespace-delimited token followed by LF or CRLF: - the keyword 'request' - a decimal request ID; the first request is '1', the second '2', etc. - a decimal byte count giving the size of the rest of the request Note that the requests ID *must* be consecutive and start at '1' for each connection. Response frames look the same except the keyword is 'response'. The response ID matches the request ID. There should be exactly one response to each request and responses should be seen in the same order as the requests. After the frame, individual requests and responses are JSON encoded. If the frame header or the JSON request body cannot be parsed, an unframed error message (always starting with 'error') is written back and the connection is closed. JSON-encoded requests can be: - {"type": "get", "key": <string>} - {"type": "set", "key": <string>, "value": <string>} - {"type": "delete", "key": <string>} Responses are also JSON-encoded: - {"status": "ok", "value": <string>} # Successful get request - {"status": "ok"} # Successful set or delete request - {"status": "notfound"} # Key not found for get or delete request If the request is valid JSON but cannot be handled (e.g., the type or key field is absent or invalid), an error response of the following form is returned, but the connection is not closed: - {"error": <string>} """ import argparse import asyncio import json import logging import os import random ARGS = argparse.ArgumentParser(description='Cache server example.') ARGS.add_argument( '--tls', action='store_true', dest='tls', default=False, help='Use TLS') ARGS.add_argument( '--iocp', action='store_true', dest='iocp', default=False, help='Use IOCP event loop (Windows only)') ARGS.add_argument( '--host', action='store', dest='host', default='localhost', help='Host name') ARGS.add_argument( '--port', action='store', dest='port', default=54321, type=int, help='Port number') ARGS.add_argument( '--timeout', action='store', dest='timeout', default=5, type=float, help='Timeout') ARGS.add_argument( '--random_failure_percent', action='store', dest='fail_percent', default=0, type=float, help='Fail randomly N percent of the time') ARGS.add_argument( '--random_failure_sleep', action='store', dest='fail_sleep', default=0, type=float, help='Sleep time when randomly failing') ARGS.add_argument( '--random_response_sleep', action='store', dest='resp_sleep', default=0, type=float, help='Sleep time before responding') args = ARGS.parse_args() class Cache: def __init__(self, loop): self.loop = loop self.table = {} @asyncio.coroutine def handle_client(self, reader, writer): # Wrapper to log stuff and close writer (i.e., transport). peer = writer.get_extra_info('socket').getpeername() logging.info('got a connection from %s', peer) try: yield from self.frame_parser(reader, writer) except Exception as exc: logging.error('error %r from %s', exc, peer) else: logging.info('end connection from %s', peer) finally: writer.close() @asyncio.coroutine def frame_parser(self, reader, writer): # This takes care of the framing. last_request_id = 0 while True: # Read the frame header, parse it, read the data. # NOTE: The readline() and readexactly() calls will hang # if the client doesn't send enough data but doesn't # disconnect either. We add a timeout to each. (But the # timeout should really be implemented by StreamReader.) framing_b = yield from asyncio.wait_for( reader.readline(), timeout=args.timeout, loop=self.loop) if random.random()*100 < args.fail_percent: logging.warn('Inserting random failure') yield from asyncio.sleep(args.fail_sleep*random.random(), loop=self.loop) writer.write(b'error random failure\r\n') break logging.debug('framing_b = %r', framing_b) if not framing_b: break # Clean close. try: frame_keyword, request_id_b, byte_count_b = framing_b.split() except ValueError: writer.write(b'error unparseable frame\r\n') break if frame_keyword != b'request': writer.write(b'error frame does not start with request\r\n') break try: request_id, byte_count = int(request_id_b), int(byte_count_b) except ValueError: writer.write(b'error unparsable frame parameters\r\n') break if request_id != last_request_id + 1 or byte_count < 2: writer.write(b'error invalid frame parameters\r\n') break last_request_id = request_id request_b = yield from asyncio.wait_for( reader.readexactly(byte_count), timeout=args.timeout, loop=self.loop) try: request = json.loads(request_b.decode('utf8')) except ValueError: writer.write(b'error unparsable json\r\n') break response = self.handle_request(request) # Not a coroutine. if response is None: writer.write(b'error unhandlable request\r\n') break response_b = json.dumps(response).encode('utf8') + b'\r\n' byte_count = len(response_b) framing_s = 'response {} {}\r\n'.format(request_id, byte_count) writer.write(framing_s.encode('ascii')) yield from asyncio.sleep(args.resp_sleep*random.random(), loop=self.loop) writer.write(response_b) def handle_request(self, request): # This parses one request and farms it out to a specific handler. # Return None for all errors. if not isinstance(request, dict): return {'error': 'request is not a dict'} request_type = request.get('type') if request_type is None: return {'error': 'no type in request'} if request_type not in {'get', 'set', 'delete'}: return {'error': 'unknown request type'} key = request.get('key') if not isinstance(key, str): return {'error': 'key is not a string'} if request_type == 'get': return self.handle_get(key) if request_type == 'set': value = request.get('value') if not isinstance(value, str): return {'error': 'value is not a string'} return self.handle_set(key, value) if request_type == 'delete': return self.handle_delete(key) assert False, 'bad request type' # Should have been caught above. def handle_get(self, key): value = self.table.get(key) if value is None: return {'status': 'notfound'} else: return {'status': 'ok', 'value': value} def handle_set(self, key, value): self.table[key] = value return {'status': 'ok'} def handle_delete(self, key): if key not in self.table: return {'status': 'notfound'} else: del self.table[key] return {'status': 'ok'} def main(): asyncio.set_event_loop(None) if args.iocp: from asyncio.windows_events import ProactorEventLoop loop = ProactorEventLoop() else: loop = asyncio.new_event_loop() sslctx = None if args.tls: import ssl # TODO: take cert/key from args as well. here = os.path.join(os.path.dirname(__file__), '..', 'tests') sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslctx.options |= ssl.OP_NO_SSLv2 sslctx.load_cert_chain( certfile=os.path.join(here, 'ssl_cert.pem'), keyfile=os.path.join(here, 'ssl_key.pem')) cache = Cache(loop) task = asyncio.streams.start_server(cache.handle_client, args.host, args.port, ssl=sslctx, loop=loop) svr = loop.run_until_complete(task) for sock in svr.sockets: logging.info('socket %s', sock.getsockname()) loop.run_forever() if __name__ == '__main__': logging.basicConfig(level=logging.INFO) main()
leetreveil/tulip
examples/cachesvr.py
Python
apache-2.0
9,357
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import operator from pyspark import since, keyword_only from pyspark.ml import Estimator, Model from pyspark.ml.param.shared import * from pyspark.ml.regression import DecisionTreeModel, DecisionTreeRegressionModel, \ RandomForestParams, TreeEnsembleModel, TreeEnsembleParams from pyspark.ml.util import * from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams from pyspark.ml.wrapper import JavaWrapper from pyspark.ml.common import inherit_doc from pyspark.sql import DataFrame from pyspark.sql.functions import udf, when from pyspark.sql.types import ArrayType, DoubleType from pyspark.storagelevel import StorageLevel __all__ = ['LinearSVC', 'LinearSVCModel', 'LogisticRegression', 'LogisticRegressionModel', 'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary', 'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary', 'DecisionTreeClassifier', 'DecisionTreeClassificationModel', 'GBTClassifier', 'GBTClassificationModel', 'RandomForestClassifier', 'RandomForestClassificationModel', 'NaiveBayes', 'NaiveBayesModel', 'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel', 'OneVsRest', 'OneVsRestModel'] @inherit_doc class JavaClassificationModel(JavaPredictionModel): """ (Private) Java Model produced by a ``Classifier``. Classes are indexed {0, 1, ..., numClasses - 1}. To be mixed in with class:`pyspark.ml.JavaModel` """ @property @since("2.1.0") def numClasses(self): """ Number of classes (values which the label can take). """ return self._call_java("numClasses") @inherit_doc class LinearSVC(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter, HasRegParam, HasTol, HasRawPredictionCol, HasFitIntercept, HasStandardization, HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable): """ .. note:: Experimental `Linear SVM Classifier <https://en.wikipedia.org/wiki/Support_vector_machine#Linear_SVM>`_ This binary classifier optimizes the Hinge Loss using the OWLQN optimizer. Only supports L2 regularization currently. >>> from pyspark.sql import Row >>> from pyspark.ml.linalg import Vectors >>> df = sc.parallelize([ ... Row(label=1.0, features=Vectors.dense(1.0, 1.0, 1.0)), ... Row(label=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF() >>> svm = LinearSVC(maxIter=5, regParam=0.01) >>> model = svm.fit(df) >>> model.coefficients DenseVector([0.0, -0.2792, -0.1833]) >>> model.intercept 1.0206118982229047 >>> model.numClasses 2 >>> model.numFeatures 3 >>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF() >>> result = model.transform(test0).head() >>> result.prediction 1.0 >>> result.rawPrediction DenseVector([-1.4831, 1.4831]) >>> svm_path = temp_path + "/svm" >>> svm.save(svm_path) >>> svm2 = LinearSVC.load(svm_path) >>> svm2.getMaxIter() 5 >>> model_path = temp_path + "/svm_model" >>> model.save(model_path) >>> model2 = LinearSVCModel.load(model_path) >>> model.coefficients[0] == model2.coefficients[0] True >>> model.intercept == model2.intercept True .. versionadded:: 2.2.0 """ threshold = Param(Params._dummy(), "threshold", "The threshold in binary classification applied to the linear model" " prediction. This threshold can be any real number, where Inf will make" " all predictions 0.0 and -Inf will make all predictions 1.0.", typeConverter=TypeConverters.toFloat) @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \ fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \ aggregationDepth=2): """ super(LinearSVC, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.classification.LinearSVC", self.uid) self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True, standardization=True, threshold=0.0, aggregationDepth=2) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("2.2.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, aggregationDepth=2): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \ fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \ aggregationDepth=2): Sets params for Linear SVM Classifier. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return LinearSVCModel(java_model) def setThreshold(self, value): """ Sets the value of :py:attr:`threshold`. """ return self._set(threshold=value) def getThreshold(self): """ Gets the value of threshold or its default value. """ return self.getOrDefault(self.threshold) class LinearSVCModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable): """ .. note:: Experimental Model fitted by LinearSVC. .. versionadded:: 2.2.0 """ @property @since("2.2.0") def coefficients(self): """ Model coefficients of Linear SVM Classifier. """ return self._call_java("coefficients") @property @since("2.2.0") def intercept(self): """ Model intercept of Linear SVM Classifier. """ return self._call_java("intercept") @inherit_doc class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter, HasRegParam, HasTol, HasProbabilityCol, HasRawPredictionCol, HasElasticNetParam, HasFitIntercept, HasStandardization, HasThresholds, HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable): """ Logistic regression. This class supports multinomial logistic (softmax) and binomial logistic regression. >>> from pyspark.sql import Row >>> from pyspark.ml.linalg import Vectors >>> bdf = sc.parallelize([ ... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)), ... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)), ... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)), ... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF() >>> blor = LogisticRegression(regParam=0.01, weightCol="weight") >>> blorModel = blor.fit(bdf) >>> blorModel.coefficients DenseVector([-1.080..., -0.646...]) >>> blorModel.intercept 3.112... >>> data_path = "data/mllib/sample_multiclass_classification_data.txt" >>> mdf = spark.read.format("libsvm").load(data_path) >>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial") >>> mlorModel = mlor.fit(mdf) >>> mlorModel.coefficientMatrix SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1) >>> mlorModel.interceptVector DenseVector([0.04..., -0.42..., 0.37...]) >>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF() >>> result = blorModel.transform(test0).head() >>> result.prediction 1.0 >>> result.probability DenseVector([0.02..., 0.97...]) >>> result.rawPrediction DenseVector([-3.54..., 3.54...]) >>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF() >>> blorModel.transform(test1).head().prediction 1.0 >>> blor.setParams("vector") Traceback (most recent call last): ... TypeError: Method setParams forces keyword arguments. >>> lr_path = temp_path + "/lr" >>> blor.save(lr_path) >>> lr2 = LogisticRegression.load(lr_path) >>> lr2.getRegParam() 0.01 >>> model_path = temp_path + "/lr_model" >>> blorModel.save(model_path) >>> model2 = LogisticRegressionModel.load(model_path) >>> blorModel.coefficients[0] == model2.coefficients[0] True >>> blorModel.intercept == model2.intercept True .. versionadded:: 1.3.0 """ threshold = Param(Params._dummy(), "threshold", "Threshold in binary classification prediction, in range [0, 1]." + " If threshold and thresholds are both set, they must match." + "e.g. if threshold is p, then thresholds must be equal to [1-p, p].", typeConverter=TypeConverters.toFloat) family = Param(Params._dummy(), "family", "The name of family which is a description of the label distribution to " + "be used in the model. Supported options: auto, binomial, multinomial", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, threshold=0.5, thresholds=None, probabilityCol="probability", rawPredictionCol="rawPrediction", standardization=True, weightCol=None, aggregationDepth=2, family="auto"): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ threshold=0.5, thresholds=None, probabilityCol="probability", \ rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \ aggregationDepth=2, family="auto") If the threshold and thresholds Params are both set, they must be equivalent. """ super(LogisticRegression, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.classification.LogisticRegression", self.uid) self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto") kwargs = self._input_kwargs self.setParams(**kwargs) self._checkThresholdConsistency() @keyword_only @since("1.3.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, threshold=0.5, thresholds=None, probabilityCol="probability", rawPredictionCol="rawPrediction", standardization=True, weightCol=None, aggregationDepth=2, family="auto"): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \ threshold=0.5, thresholds=None, probabilityCol="probability", \ rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \ aggregationDepth=2, family="auto") Sets params for logistic regression. If the threshold and thresholds Params are both set, they must be equivalent. """ kwargs = self._input_kwargs self._set(**kwargs) self._checkThresholdConsistency() return self def _create_model(self, java_model): return LogisticRegressionModel(java_model) @since("1.4.0") def setThreshold(self, value): """ Sets the value of :py:attr:`threshold`. Clears value of :py:attr:`thresholds` if it has been set. """ self._set(threshold=value) self._clear(self.thresholds) return self @since("1.4.0") def getThreshold(self): """ Get threshold for binary classification. If :py:attr:`thresholds` is set with length 2 (i.e., binary classification), this returns the equivalent threshold: :math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`. Otherwise, returns :py:attr:`threshold` if set or its default value if unset. """ self._checkThresholdConsistency() if self.isSet(self.thresholds): ts = self.getOrDefault(self.thresholds) if len(ts) != 2: raise ValueError("Logistic Regression getThreshold only applies to" + " binary classification, but thresholds has length != 2." + " thresholds: " + ",".join(ts)) return 1.0/(1.0 + ts[0]/ts[1]) else: return self.getOrDefault(self.threshold) @since("1.5.0") def setThresholds(self, value): """ Sets the value of :py:attr:`thresholds`. Clears value of :py:attr:`threshold` if it has been set. """ self._set(thresholds=value) self._clear(self.threshold) return self @since("1.5.0") def getThresholds(self): """ If :py:attr:`thresholds` is set, return its value. Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary classification: (1-threshold, threshold). If neither are set, throw an error. """ self._checkThresholdConsistency() if not self.isSet(self.thresholds) and self.isSet(self.threshold): t = self.getOrDefault(self.threshold) return [1.0-t, t] else: return self.getOrDefault(self.thresholds) def _checkThresholdConsistency(self): if self.isSet(self.threshold) and self.isSet(self.thresholds): ts = self.getOrDefault(self.thresholds) if len(ts) != 2: raise ValueError("Logistic Regression getThreshold only applies to" + " binary classification, but thresholds has length != 2." + " thresholds: {0}".format(str(ts))) t = 1.0/(1.0 + ts[0]/ts[1]) t2 = self.getOrDefault(self.threshold) if abs(t2 - t) >= 1E-5: raise ValueError("Logistic Regression getThreshold found inconsistent values for" + " threshold (%g) and thresholds (equivalent to %g)" % (t2, t)) @since("2.1.0") def setFamily(self, value): """ Sets the value of :py:attr:`family`. """ return self._set(family=value) @since("2.1.0") def getFamily(self): """ Gets the value of :py:attr:`family` or its default value. """ return self.getOrDefault(self.family) class LogisticRegressionModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable): """ Model fitted by LogisticRegression. .. versionadded:: 1.3.0 """ @property @since("2.0.0") def coefficients(self): """ Model coefficients of binomial logistic regression. An exception is thrown in the case of multinomial logistic regression. """ return self._call_java("coefficients") @property @since("1.4.0") def intercept(self): """ Model intercept of binomial logistic regression. An exception is thrown in the case of multinomial logistic regression. """ return self._call_java("intercept") @property @since("2.1.0") def coefficientMatrix(self): """ Model coefficients. """ return self._call_java("coefficientMatrix") @property @since("2.1.0") def interceptVector(self): """ Model intercept. """ return self._call_java("interceptVector") @property @since("2.0.0") def summary(self): """ Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model trained on the training set. An exception is thrown if `trainingSummary is None`. """ if self.hasSummary: java_blrt_summary = self._call_java("summary") # Note: Once multiclass is added, update this to return correct summary return BinaryLogisticRegressionTrainingSummary(java_blrt_summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__) @property @since("2.0.0") def hasSummary(self): """ Indicates whether a training summary exists for this model instance. """ return self._call_java("hasSummary") @since("2.0.0") def evaluate(self, dataset): """ Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` """ if not isinstance(dataset, DataFrame): raise ValueError("dataset must be a DataFrame but got %s." % type(dataset)) java_blr_summary = self._call_java("evaluate", dataset) return BinaryLogisticRegressionSummary(java_blr_summary) class LogisticRegressionSummary(JavaWrapper): """ .. note:: Experimental Abstraction for Logistic Regression Results for a given model. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def predictions(self): """ Dataframe outputted by the model's `transform` method. """ return self._call_java("predictions") @property @since("2.0.0") def probabilityCol(self): """ Field in "predictions" which gives the probability of each class as a vector. """ return self._call_java("probabilityCol") @property @since("2.0.0") def labelCol(self): """ Field in "predictions" which gives the true label of each instance. """ return self._call_java("labelCol") @property @since("2.0.0") def featuresCol(self): """ Field in "predictions" which gives the features of each instance as a vector. """ return self._call_java("featuresCol") @inherit_doc class LogisticRegressionTrainingSummary(LogisticRegressionSummary): """ .. note:: Experimental Abstraction for multinomial Logistic Regression Training results. Currently, the training summary ignores the training weights except for the objective trace. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def objectiveHistory(self): """ Objective function (scaled loss + regularization) at each iteration. """ return self._call_java("objectiveHistory") @property @since("2.0.0") def totalIterations(self): """ Number of training iterations until termination. """ return self._call_java("totalIterations") @inherit_doc class BinaryLogisticRegressionSummary(LogisticRegressionSummary): """ .. note:: Experimental Binary Logistic regression results for a given model. .. versionadded:: 2.0.0 """ @property @since("2.0.0") def roc(self): """ Returns the receiver operating characteristic (ROC) curve, which is a Dataframe having two fields (FPR, TPR) with (0.0, 0.0) prepended and (1.0, 1.0) appended to it. .. seealso:: `Wikipedia reference \ <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. note:: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("roc") @property @since("2.0.0") def areaUnderROC(self): """ Computes the area under the receiver operating characteristic (ROC) curve. .. note:: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("areaUnderROC") @property @since("2.0.0") def pr(self): """ Returns the precision-recall curve, which is a Dataframe containing two fields recall, precision with (0.0, 1.0) prepended to it. .. note:: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("pr") @property @since("2.0.0") def fMeasureByThreshold(self): """ Returns a dataframe with two fields (threshold, F-Measure) curve with beta = 1.0. .. note:: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("fMeasureByThreshold") @property @since("2.0.0") def precisionByThreshold(self): """ Returns a dataframe with two fields (threshold, precision) curve. Every possible probability obtained in transforming the dataset are used as thresholds used in calculating the precision. .. note:: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("precisionByThreshold") @property @since("2.0.0") def recallByThreshold(self): """ Returns a dataframe with two fields (threshold, recall) curve. Every possible probability obtained in transforming the dataset are used as thresholds used in calculating the recall. .. note:: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`. This will change in later Spark versions. """ return self._call_java("recallByThreshold") @inherit_doc class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary, LogisticRegressionTrainingSummary): """ .. note:: Experimental Binary Logistic regression training results for a given model. .. versionadded:: 2.0.0 """ pass class TreeClassifierParams(object): """ Private class to track supported impurity measures. .. versionadded:: 1.4.0 """ supportedImpurities = ["entropy", "gini"] impurity = Param(Params._dummy(), "impurity", "Criterion used for information gain calculation (case-insensitive). " + "Supported options: " + ", ".join(supportedImpurities), typeConverter=TypeConverters.toString) def __init__(self): super(TreeClassifierParams, self).__init__() @since("1.6.0") def setImpurity(self, value): """ Sets the value of :py:attr:`impurity`. """ return self._set(impurity=value) @since("1.6.0") def getImpurity(self): """ Gets the value of impurity or its default value. """ return self.getOrDefault(self.impurity) class GBTParams(TreeEnsembleParams): """ Private class to track supported GBT params. .. versionadded:: 1.4.0 """ supportedLossTypes = ["logistic"] @inherit_doc class DecisionTreeClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasProbabilityCol, HasRawPredictionCol, DecisionTreeParams, TreeClassifierParams, HasCheckpointInterval, HasSeed, JavaMLWritable, JavaMLReadable): """ `Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_ learning algorithm for classification. It supports both binary and multiclass labels, as well as both continuous and categorical features. >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.feature import StringIndexer >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed") >>> si_model = stringIndexer.fit(df) >>> td = si_model.transform(df) >>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed") >>> model = dt.fit(td) >>> model.numNodes 3 >>> model.depth 1 >>> model.featureImportances SparseVector(1, {0: 1.0}) >>> model.numFeatures 1 >>> model.numClasses 2 >>> print(model.toDebugString) DecisionTreeClassificationModel (uid=...) of depth 1 with 3 nodes... >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> result = model.transform(test0).head() >>> result.prediction 0.0 >>> result.probability DenseVector([1.0, 0.0]) >>> result.rawPrediction DenseVector([1.0, 0.0]) >>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"]) >>> model.transform(test1).head().prediction 1.0 >>> dtc_path = temp_path + "/dtc" >>> dt.save(dtc_path) >>> dt2 = DecisionTreeClassifier.load(dtc_path) >>> dt2.getMaxDepth() 2 >>> model_path = temp_path + "/dtc_model" >>> model.save(model_path) >>> model2 = DecisionTreeClassificationModel.load(model_path) >>> model.featureImportances == model2.featureImportances True .. versionadded:: 1.4.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", probabilityCol="probability", rawPredictionCol="rawPrediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", seed=None): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ probabilityCol="probability", rawPredictionCol="rawPrediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \ seed=None) """ super(DecisionTreeClassifier, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", probabilityCol="probability", rawPredictionCol="rawPrediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", seed=None): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ probabilityCol="probability", rawPredictionCol="rawPrediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \ seed=None) Sets params for the DecisionTreeClassifier. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return DecisionTreeClassificationModel(java_model) @inherit_doc class DecisionTreeClassificationModel(DecisionTreeModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable): """ Model fitted by DecisionTreeClassifier. .. versionadded:: 1.4.0 """ @property @since("2.0.0") def featureImportances(self): """ Estimate of the importance of each feature. This generalizes the idea of "Gini" importance to other losses, following the explanation of Gini importance from "Random Forests" documentation by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn. This feature importance is calculated as follows: - importance(feature j) = sum (over nodes which split on feature j) of the gain, where gain is scaled by the number of instances passing through node - Normalize importances for tree to sum to 1. .. note:: Feature importance for single decision trees can have high variance due to correlated predictor variables. Consider using a :py:class:`RandomForestClassifier` to determine feature importance instead. """ return self._call_java("featureImportances") @inherit_doc class RandomForestClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed, HasRawPredictionCol, HasProbabilityCol, RandomForestParams, TreeClassifierParams, HasCheckpointInterval, JavaMLWritable, JavaMLReadable): """ `Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_ learning algorithm for classification. It supports both binary and multiclass labels, as well as both continuous and categorical features. >>> import numpy >>> from numpy import allclose >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.feature import StringIndexer >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed") >>> si_model = stringIndexer.fit(df) >>> td = si_model.transform(df) >>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42) >>> model = rf.fit(td) >>> model.featureImportances SparseVector(1, {0: 1.0}) >>> allclose(model.treeWeights, [1.0, 1.0, 1.0]) True >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> result = model.transform(test0).head() >>> result.prediction 0.0 >>> numpy.argmax(result.probability) 0 >>> numpy.argmax(result.rawPrediction) 0 >>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"]) >>> model.transform(test1).head().prediction 1.0 >>> model.trees [DecisionTreeClassificationModel (uid=...) of depth..., DecisionTreeClassificationModel...] >>> rfc_path = temp_path + "/rfc" >>> rf.save(rfc_path) >>> rf2 = RandomForestClassifier.load(rfc_path) >>> rf2.getNumTrees() 3 >>> model_path = temp_path + "/rfc_model" >>> model.save(model_path) >>> model2 = RandomForestClassificationModel.load(model_path) >>> model.featureImportances == model2.featureImportances True .. versionadded:: 1.4.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", probabilityCol="probability", rawPredictionCol="rawPrediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ probabilityCol="probability", rawPredictionCol="rawPrediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \ numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0) """ super(RandomForestClassifier, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.classification.RandomForestClassifier", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", probabilityCol="probability", rawPredictionCol="rawPrediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ probabilityCol="probability", rawPredictionCol="rawPrediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \ impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0) Sets params for linear classification. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return RandomForestClassificationModel(java_model) class RandomForestClassificationModel(TreeEnsembleModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable): """ Model fitted by RandomForestClassifier. .. versionadded:: 1.4.0 """ @property @since("2.0.0") def featureImportances(self): """ Estimate of the importance of each feature. Each feature's importance is the average of its importance across all trees in the ensemble The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) and follows the implementation from scikit-learn. .. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances` """ return self._call_java("featureImportances") @property @since("2.0.0") def trees(self): """Trees in this ensemble. Warning: These have null parent Estimators.""" return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))] @inherit_doc class GBTClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter, GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable, JavaMLReadable): """ `Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_ learning algorithm for classification. It supports binary labels, as well as both continuous and categorical features. The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999. Notes on Gradient Boosting vs. TreeBoost: - This implementation is for Stochastic Gradient Boosting, not for TreeBoost. - Both algorithms learn tree ensembles by minimizing loss functions. - TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes based on the loss function, whereas the original gradient boosting method does not. - We expect to implement TreeBoost in the future: `SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_ .. note:: Multiclass labels are not currently supported. >>> from numpy import allclose >>> from pyspark.ml.linalg import Vectors >>> from pyspark.ml.feature import StringIndexer >>> df = spark.createDataFrame([ ... (1.0, Vectors.dense(1.0)), ... (0.0, Vectors.sparse(1, [], []))], ["label", "features"]) >>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed") >>> si_model = stringIndexer.fit(df) >>> td = si_model.transform(df) >>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42) >>> model = gbt.fit(td) >>> model.featureImportances SparseVector(1, {0: 1.0}) >>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1]) True >>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"]) >>> model.transform(test0).head().prediction 0.0 >>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"]) >>> model.transform(test1).head().prediction 1.0 >>> model.totalNumNodes 15 >>> print(model.toDebugString) GBTClassificationModel (uid=...)...with 5 trees... >>> gbtc_path = temp_path + "gbtc" >>> gbt.save(gbtc_path) >>> gbt2 = GBTClassifier.load(gbtc_path) >>> gbt2.getMaxDepth() 2 >>> model_path = temp_path + "gbtc_model" >>> model.save(model_path) >>> model2 = GBTClassificationModel.load(model_path) >>> model.featureImportances == model2.featureImportances True >>> model.treeWeights == model2.treeWeights True >>> model.trees [DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...] .. versionadded:: 1.4.0 """ lossType = Param(Params._dummy(), "lossType", "Loss function which GBT tries to minimize (case-insensitive). " + "Supported options: " + ", ".join(GBTParams.supportedLossTypes), typeConverter=TypeConverters.toString) @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \ lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0) """ super(GBTClassifier, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.classification.GBTClassifier", self.uid) self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0) kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.4.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \ maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \ lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0) Sets params for Gradient Boosted Tree Classification. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return GBTClassificationModel(java_model) @since("1.4.0") def setLossType(self, value): """ Sets the value of :py:attr:`lossType`. """ return self._set(lossType=value) @since("1.4.0") def getLossType(self): """ Gets the value of lossType or its default value. """ return self.getOrDefault(self.lossType) class GBTClassificationModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable): """ Model fitted by GBTClassifier. .. versionadded:: 1.4.0 """ @property @since("2.0.0") def featureImportances(self): """ Estimate of the importance of each feature. Each feature's importance is the average of its importance across all trees in the ensemble The importance vector is normalized to sum to 1. This method is suggested by Hastie et al. (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.) and follows the implementation from scikit-learn. .. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances` """ return self._call_java("featureImportances") @property @since("2.0.0") def trees(self): """Trees in this ensemble. Warning: These have null parent Estimators.""" return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))] @inherit_doc class NaiveBayes(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasProbabilityCol, HasRawPredictionCol, HasThresholds, HasWeightCol, JavaMLWritable, JavaMLReadable): """ Naive Bayes Classifiers. It supports both Multinomial and Bernoulli NB. `Multinomial NB <http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_ can handle finitely supported discrete data. For example, by converting documents into TF-IDF vectors, it can be used for document classification. By making every vector a binary (0/1) data, it can also be used as `Bernoulli NB <http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_. The input feature values must be nonnegative. >>> from pyspark.sql import Row >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])), ... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])), ... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))]) >>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight") >>> model = nb.fit(df) >>> model.pi DenseVector([-0.81..., -0.58...]) >>> model.theta DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1) >>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF() >>> result = model.transform(test0).head() >>> result.prediction 1.0 >>> result.probability DenseVector([0.32..., 0.67...]) >>> result.rawPrediction DenseVector([-1.72..., -0.99...]) >>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF() >>> model.transform(test1).head().prediction 1.0 >>> nb_path = temp_path + "/nb" >>> nb.save(nb_path) >>> nb2 = NaiveBayes.load(nb_path) >>> nb2.getSmoothing() 1.0 >>> model_path = temp_path + "/nb_model" >>> model.save(model_path) >>> model2 = NaiveBayesModel.load(model_path) >>> model.pi == model2.pi True >>> model.theta == model2.theta True >>> nb = nb.setThresholds([0.01, 10.00]) >>> model3 = nb.fit(df) >>> result = model3.transform(test0).head() >>> result.prediction 0.0 .. versionadded:: 1.5.0 """ smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " + "default is 1.0", typeConverter=TypeConverters.toFloat) modelType = Param(Params._dummy(), "modelType", "The model type which is a string " + "(case-sensitive). Supported options: multinomial (default) and bernoulli.", typeConverter=TypeConverters.toString) @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, modelType="multinomial", thresholds=None, weightCol=None): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \ modelType="multinomial", thresholds=None, weightCol=None) """ super(NaiveBayes, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.classification.NaiveBayes", self.uid) self._setDefault(smoothing=1.0, modelType="multinomial") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.5.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, modelType="multinomial", thresholds=None, weightCol=None): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \ modelType="multinomial", thresholds=None, weightCol=None) Sets params for Naive Bayes. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return NaiveBayesModel(java_model) @since("1.5.0") def setSmoothing(self, value): """ Sets the value of :py:attr:`smoothing`. """ return self._set(smoothing=value) @since("1.5.0") def getSmoothing(self): """ Gets the value of smoothing or its default value. """ return self.getOrDefault(self.smoothing) @since("1.5.0") def setModelType(self, value): """ Sets the value of :py:attr:`modelType`. """ return self._set(modelType=value) @since("1.5.0") def getModelType(self): """ Gets the value of modelType or its default value. """ return self.getOrDefault(self.modelType) class NaiveBayesModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable): """ Model fitted by NaiveBayes. .. versionadded:: 1.5.0 """ @property @since("2.0.0") def pi(self): """ log of class priors. """ return self._call_java("pi") @property @since("2.0.0") def theta(self): """ log of class conditional probabilities. """ return self._call_java("theta") @inherit_doc class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed, HasStepSize, JavaMLWritable, JavaMLReadable): """ Classifier trainer based on the Multilayer Perceptron. Each layer has sigmoid activation function, output layer has softmax. Number of inputs has to be equal to the size of feature vectors. Number of outputs has to be equal to the total number of labels. >>> from pyspark.ml.linalg import Vectors >>> df = spark.createDataFrame([ ... (0.0, Vectors.dense([0.0, 0.0])), ... (1.0, Vectors.dense([0.0, 1.0])), ... (1.0, Vectors.dense([1.0, 0.0])), ... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"]) >>> mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[2, 2, 2], blockSize=1, seed=123) >>> model = mlp.fit(df) >>> model.layers [2, 2, 2] >>> model.weights.size 12 >>> testDF = spark.createDataFrame([ ... (Vectors.dense([1.0, 0.0]),), ... (Vectors.dense([0.0, 0.0]),)], ["features"]) >>> model.transform(testDF).show() +---------+----------+ | features|prediction| +---------+----------+ |[1.0,0.0]| 1.0| |[0.0,0.0]| 0.0| +---------+----------+ ... >>> mlp_path = temp_path + "/mlp" >>> mlp.save(mlp_path) >>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path) >>> mlp2.getBlockSize() 1 >>> model_path = temp_path + "/mlp_model" >>> model.save(model_path) >>> model2 = MultilayerPerceptronClassificationModel.load(model_path) >>> model.layers == model2.layers True >>> model.weights == model2.weights True >>> mlp2 = mlp2.setInitialWeights(list(range(0, 12))) >>> model3 = mlp2.fit(df) >>> model3.weights != model2.weights True >>> model3.layers == model.layers True .. versionadded:: 1.6.0 """ layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " + "E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " + "neurons and output layer of 10 neurons.", typeConverter=TypeConverters.toListInt) blockSize = Param(Params._dummy(), "blockSize", "Block size for stacking input data in " + "matrices. Data is stacked within partitions. If block size is more than " + "remaining data in a partition then it is adjusted to the size of this " + "data. Recommended size is between 10 and 1000, default is 128.", typeConverter=TypeConverters.toInt) solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " + "options: l-bfgs, gd.", typeConverter=TypeConverters.toString) initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.", typeConverter=TypeConverters.toVector) @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, solver="l-bfgs", initialWeights=None): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \ solver="l-bfgs", initialWeights=None) """ super(MultilayerPerceptronClassifier, self).__init__() self._java_obj = self._new_java_obj( "org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid) self._setDefault(maxIter=100, tol=1E-4, blockSize=128, stepSize=0.03, solver="l-bfgs") kwargs = self._input_kwargs self.setParams(**kwargs) @keyword_only @since("1.6.0") def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, solver="l-bfgs", initialWeights=None): """ setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \ solver="l-bfgs", initialWeights=None) Sets params for MultilayerPerceptronClassifier. """ kwargs = self._input_kwargs return self._set(**kwargs) def _create_model(self, java_model): return MultilayerPerceptronClassificationModel(java_model) @since("1.6.0") def setLayers(self, value): """ Sets the value of :py:attr:`layers`. """ return self._set(layers=value) @since("1.6.0") def getLayers(self): """ Gets the value of layers or its default value. """ return self.getOrDefault(self.layers) @since("1.6.0") def setBlockSize(self, value): """ Sets the value of :py:attr:`blockSize`. """ return self._set(blockSize=value) @since("1.6.0") def getBlockSize(self): """ Gets the value of blockSize or its default value. """ return self.getOrDefault(self.blockSize) @since("2.0.0") def setStepSize(self, value): """ Sets the value of :py:attr:`stepSize`. """ return self._set(stepSize=value) @since("2.0.0") def getStepSize(self): """ Gets the value of stepSize or its default value. """ return self.getOrDefault(self.stepSize) @since("2.0.0") def setSolver(self, value): """ Sets the value of :py:attr:`solver`. """ return self._set(solver=value) @since("2.0.0") def getSolver(self): """ Gets the value of solver or its default value. """ return self.getOrDefault(self.solver) @since("2.0.0") def setInitialWeights(self, value): """ Sets the value of :py:attr:`initialWeights`. """ return self._set(initialWeights=value) @since("2.0.0") def getInitialWeights(self): """ Gets the value of initialWeights or its default value. """ return self.getOrDefault(self.initialWeights) class MultilayerPerceptronClassificationModel(JavaModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable): """ Model fitted by MultilayerPerceptronClassifier. .. versionadded:: 1.6.0 """ @property @since("1.6.0") def layers(self): """ array of layer sizes including input and output layers. """ return self._call_java("javaLayers") @property @since("2.0.0") def weights(self): """ the weights of layers. """ return self._call_java("weights") class OneVsRestParams(HasFeaturesCol, HasLabelCol, HasWeightCol, HasPredictionCol): """ Parameters for OneVsRest and OneVsRestModel. """ classifier = Param(Params._dummy(), "classifier", "base binary classifier") @since("2.0.0") def setClassifier(self, value): """ Sets the value of :py:attr:`classifier`. .. note:: Only LogisticRegression and NaiveBayes are supported now. """ return self._set(classifier=value) @since("2.0.0") def getClassifier(self): """ Gets the value of classifier or its default value. """ return self.getOrDefault(self.classifier) @inherit_doc class OneVsRest(Estimator, OneVsRestParams, MLReadable, MLWritable): """ .. note:: Experimental Reduction of Multiclass Classification to Binary Classification. Performs reduction using one against all strategy. For a multiclass classification with k classes, train k models (one per class). Each example is scored against all k models and the model with highest score is picked to label the example. >>> from pyspark.sql import Row >>> from pyspark.ml.linalg import Vectors >>> data_path = "data/mllib/sample_multiclass_classification_data.txt" >>> df = spark.read.format("libsvm").load(data_path) >>> lr = LogisticRegression(regParam=0.01) >>> ovr = OneVsRest(classifier=lr) >>> model = ovr.fit(df) >>> model.models[0].coefficients DenseVector([0.5..., -1.0..., 3.4..., 4.2...]) >>> model.models[1].coefficients DenseVector([-2.1..., 3.1..., -2.6..., -2.3...]) >>> model.models[2].coefficients DenseVector([0.3..., -3.4..., 1.0..., -1.1...]) >>> [x.intercept for x in model.models] [-2.7..., -2.5..., -1.3...] >>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0, 1.0, 1.0))]).toDF() >>> model.transform(test0).head().prediction 0.0 >>> test1 = sc.parallelize([Row(features=Vectors.sparse(4, [0], [1.0]))]).toDF() >>> model.transform(test1).head().prediction 2.0 >>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4, 0.3, 0.2))]).toDF() >>> model.transform(test2).head().prediction 0.0 >>> model_path = temp_path + "/ovr_model" >>> model.save(model_path) >>> model2 = OneVsRestModel.load(model_path) >>> model2.transform(test0).head().prediction 0.0 .. versionadded:: 2.0.0 """ @keyword_only def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", classifier=None, weightCol=None): """ __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \ classifier=None, weightCol=None) """ super(OneVsRest, self).__init__() kwargs = self._input_kwargs self._set(**kwargs) @keyword_only @since("2.0.0") def setParams(self, featuresCol=None, labelCol=None, predictionCol=None, classifier=None, weightCol=None): """ setParams(self, featuresCol=None, labelCol=None, predictionCol=None, \ classifier=None, weightCol=None): Sets params for OneVsRest. """ kwargs = self._input_kwargs return self._set(**kwargs) def _fit(self, dataset): labelCol = self.getLabelCol() featuresCol = self.getFeaturesCol() predictionCol = self.getPredictionCol() classifier = self.getClassifier() assert isinstance(classifier, HasRawPredictionCol),\ "Classifier %s doesn't extend from HasRawPredictionCol." % type(classifier) numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1 weightCol = None if (self.isDefined(self.weightCol) and self.getWeightCol()): if isinstance(classifier, HasWeightCol): weightCol = self.getWeightCol() else: warnings.warn("weightCol is ignored, " "as it is not supported by {} now.".format(classifier)) if weightCol: multiclassLabeled = dataset.select(labelCol, featuresCol, weightCol) else: multiclassLabeled = dataset.select(labelCol, featuresCol) # persist if underlying dataset is not persistent. handlePersistence = \ dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False) if handlePersistence: multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK) def trainSingleClass(index): binaryLabelCol = "mc2b$" + str(index) trainingDataset = multiclassLabeled.withColumn( binaryLabelCol, when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0)) paramMap = dict([(classifier.labelCol, binaryLabelCol), (classifier.featuresCol, featuresCol), (classifier.predictionCol, predictionCol)]) if weightCol: paramMap[classifier.weightCol] = weightCol return classifier.fit(trainingDataset, paramMap) # TODO: Parallel training for all classes. models = [trainSingleClass(i) for i in range(numClasses)] if handlePersistence: multiclassLabeled.unpersist() return self._copyValues(OneVsRestModel(models=models)) @since("2.0.0") def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() newOvr = Params.copy(self, extra) if self.isSet(self.classifier): newOvr.setClassifier(self.getClassifier().copy(extra)) return newOvr @since("2.0.0") def write(self): """Returns an MLWriter instance for this ML instance.""" return JavaMLWriter(self) @since("2.0.0") def save(self, path): """Save this ML instance to the given path, a shortcut of `write().save(path)`.""" self.write().save(path) @classmethod @since("2.0.0") def read(cls): """Returns an MLReader instance for this class.""" return JavaMLReader(cls) @classmethod def _from_java(cls, java_stage): """ Given a Java OneVsRest, create and return a Python wrapper of it. Used for ML persistence. """ featuresCol = java_stage.getFeaturesCol() labelCol = java_stage.getLabelCol() predictionCol = java_stage.getPredictionCol() classifier = JavaParams._from_java(java_stage.getClassifier()) py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol, classifier=classifier) py_stage._resetUid(java_stage.uid()) return py_stage def _to_java(self): """ Transfer this instance to a Java OneVsRest. Used for ML persistence. :return: Java object equivalent to this instance. """ _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest", self.uid) _java_obj.setClassifier(self.getClassifier()._to_java()) _java_obj.setFeaturesCol(self.getFeaturesCol()) _java_obj.setLabelCol(self.getLabelCol()) _java_obj.setPredictionCol(self.getPredictionCol()) return _java_obj class OneVsRestModel(Model, OneVsRestParams, MLReadable, MLWritable): """ .. note:: Experimental Model fitted by OneVsRest. This stores the models resulting from training k binary classifiers: one for each class. Each example is scored against all k models, and the model with the highest score is picked to label the example. .. versionadded:: 2.0.0 """ def __init__(self, models): super(OneVsRestModel, self).__init__() self.models = models def _transform(self, dataset): # determine the input columns: these need to be passed through origCols = dataset.columns # add an accumulator column to store predictions of all the models accColName = "mbc$acc" + str(uuid.uuid4()) initUDF = udf(lambda _: [], ArrayType(DoubleType())) newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]])) # persist if underlying dataset is not persistent. handlePersistence = \ dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False) if handlePersistence: newDataset.persist(StorageLevel.MEMORY_AND_DISK) # update the accumulator column with the result of prediction of models aggregatedDataset = newDataset for index, model in enumerate(self.models): rawPredictionCol = model._call_java("getRawPredictionCol") columns = origCols + [rawPredictionCol, accColName] # add temporary column to store intermediate scores and update tmpColName = "mbc$tmp" + str(uuid.uuid4()) updateUDF = udf( lambda predictions, prediction: predictions + [prediction.tolist()[1]], ArrayType(DoubleType())) transformedDataset = model.transform(aggregatedDataset).select(*columns) updatedDataset = transformedDataset.withColumn( tmpColName, updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol])) newColumns = origCols + [tmpColName] # switch out the intermediate column with the accumulator column aggregatedDataset = updatedDataset\ .select(*newColumns).withColumnRenamed(tmpColName, accColName) if handlePersistence: newDataset.unpersist() # output the index of the classifier with highest confidence as prediction labelUDF = udf( lambda predictions: float(max(enumerate(predictions), key=operator.itemgetter(1))[0]), DoubleType()) # output label and label metadata as prediction return aggregatedDataset.withColumn( self.getPredictionCol(), labelUDF(aggregatedDataset[accColName])).drop(accColName) @since("2.0.0") def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() newModel = Params.copy(self, extra) newModel.models = [model.copy(extra) for model in self.models] return newModel @since("2.0.0") def write(self): """Returns an MLWriter instance for this ML instance.""" return JavaMLWriter(self) @since("2.0.0") def save(self, path): """Save this ML instance to the given path, a shortcut of `write().save(path)`.""" self.write().save(path) @classmethod @since("2.0.0") def read(cls): """Returns an MLReader instance for this class.""" return JavaMLReader(cls) @classmethod def _from_java(cls, java_stage): """ Given a Java OneVsRestModel, create and return a Python wrapper of it. Used for ML persistence. """ featuresCol = java_stage.getFeaturesCol() labelCol = java_stage.getLabelCol() predictionCol = java_stage.getPredictionCol() classifier = JavaParams._from_java(java_stage.getClassifier()) models = [JavaParams._from_java(model) for model in java_stage.models()] py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\ .setFeaturesCol(featuresCol).setClassifier(classifier) py_stage._resetUid(java_stage.uid()) return py_stage def _to_java(self): """ Transfer this instance to a Java OneVsRestModel. Used for ML persistence. :return: Java object equivalent to this instance. """ sc = SparkContext._active_spark_context java_models = [model._to_java() for model in self.models] java_models_array = JavaWrapper._new_java_array( java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel) metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata") _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel", self.uid, metadata.empty(), java_models_array) _java_obj.set("classifier", self.getClassifier()._to_java()) _java_obj.set("featuresCol", self.getFeaturesCol()) _java_obj.set("labelCol", self.getLabelCol()) _java_obj.set("predictionCol", self.getPredictionCol()) return _java_obj if __name__ == "__main__": import doctest import pyspark.ml.classification from pyspark.sql import SparkSession globs = pyspark.ml.classification.__dict__.copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: spark = SparkSession.builder\ .master("local[2]")\ .appName("ml.classification tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark import tempfile temp_path = tempfile.mkdtemp() globs['temp_path'] = temp_path try: (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() finally: from shutil import rmtree try: rmtree(temp_path) except OSError: pass if failure_count: exit(-1)
jlopezmalla/spark
python/pyspark/ml/classification.py
Python
apache-2.0
70,656
""" Test lldb breakpoint command add/list/delete. """ from __future__ import print_function import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil import side_effect class BreakpointCommandTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) @expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24528") def test_breakpoint_command_sequence(self): """Test a sequence of breakpoint command add, list, and delete.""" self.build() self.breakpoint_command_sequence() def test_script_parameters(self): """Test a sequence of breakpoint command add, list, and delete.""" self.build() self.breakpoint_command_script_parameters() def test_commands_on_creation(self): self.build() self.breakpoint_commands_on_creation() def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number to break inside main(). self.line = line_number('main.c', '// Set break point at this line.') # disable "There is a running process, kill it and restart?" prompt self.runCmd("settings set auto-confirm true") self.addTearDownHook( lambda: self.runCmd("settings clear auto-confirm")) def test_delete_all_breakpoints(self): """Test that deleting all breakpoints works.""" self.build() exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) lldbutil.run_break_set_by_symbol(self, "main") lldbutil.run_break_set_by_file_and_line( self, "main.c", self.line, num_expected_locations=1, loc_exact=True) self.runCmd("run", RUN_SUCCEEDED) self.runCmd("breakpoint delete") self.runCmd("process continue") self.expect("process status", PROCESS_STOPPED, patterns=['Process .* exited with status = 0']) def breakpoint_command_sequence(self): """Test a sequence of breakpoint command add, list, and delete.""" exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # Add three breakpoints on the same line. The first time we don't specify the file, # since the default file is the one containing main: lldbutil.run_break_set_by_file_and_line( self, None, self.line, num_expected_locations=1, loc_exact=True) lldbutil.run_break_set_by_file_and_line( self, "main.c", self.line, num_expected_locations=1, loc_exact=True) lldbutil.run_break_set_by_file_and_line( self, "main.c", self.line, num_expected_locations=1, loc_exact=True) # Breakpoint 4 - set at the same location as breakpoint 1 to test # setting breakpoint commands on two breakpoints at a time lldbutil.run_break_set_by_file_and_line( self, None, self.line, num_expected_locations=1, loc_exact=True) # Make sure relative path source breakpoints work as expected. We test # with partial paths with and without "./" prefixes. lldbutil.run_break_set_by_file_and_line( self, "./main.c", self.line, num_expected_locations=1, loc_exact=True) lldbutil.run_break_set_by_file_and_line( self, "basic/main.c", self.line, num_expected_locations=1, loc_exact=True) lldbutil.run_break_set_by_file_and_line( self, "./basic/main.c", self.line, num_expected_locations=1, loc_exact=True) lldbutil.run_break_set_by_file_and_line( self, "breakpoint/basic/main.c", self.line, num_expected_locations=1, loc_exact=True) lldbutil.run_break_set_by_file_and_line( self, "./breakpoint/basic/main.c", self.line, num_expected_locations=1, loc_exact=True) # Test relative breakpoints with incorrect paths and make sure we get # no breakpoint locations lldbutil.run_break_set_by_file_and_line( self, "invalid/main.c", self.line, num_expected_locations=0, loc_exact=True) lldbutil.run_break_set_by_file_and_line( self, "./invalid/main.c", self.line, num_expected_locations=0, loc_exact=True) # Now add callbacks for the breakpoints just created. self.runCmd( "breakpoint command add -s command -o 'frame variable --show-types --scope' 1 4") self.runCmd( "breakpoint command add -s python -o 'import side_effect; side_effect.one_liner = \"one liner was here\"' 2") self.runCmd( "breakpoint command add --python-function bktptcmd.function 3") # Check that the breakpoint commands are correctly set. # The breakpoint list now only contains breakpoint 1. self.expect( "breakpoint list", "Breakpoints 1 & 2 created", substrs=[ "2: file = 'main.c', line = %d, exact_match = 0, locations = 1" % self.line], patterns=[ "1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" % self.line]) self.expect( "breakpoint list -f", "Breakpoints 1 & 2 created", substrs=[ "2: file = 'main.c', line = %d, exact_match = 0, locations = 1" % self.line], patterns=[ "1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" % self.line, "1.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" % self.line, "2.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" % self.line]) self.expect("breakpoint command list 1", "Breakpoint 1 command ok", substrs=["Breakpoint commands:", "frame variable --show-types --scope"]) self.expect("breakpoint command list 2", "Breakpoint 2 command ok", substrs=["Breakpoint commands (Python):", "import side_effect", "side_effect.one_liner"]) self.expect("breakpoint command list 3", "Breakpoint 3 command ok", substrs=["Breakpoint commands (Python):", "bktptcmd.function(frame, bp_loc, internal_dict)"]) self.expect("breakpoint command list 4", "Breakpoint 4 command ok", substrs=["Breakpoint commands:", "frame variable --show-types --scope"]) self.runCmd("breakpoint delete 4") self.runCmd("command script import --allow-reload ./bktptcmd.py") # Next lets try some other breakpoint kinds. First break with a regular expression # and then specify only one file. The first time we should get two locations, # the second time only one: lldbutil.run_break_set_by_regexp( self, r"._MyFunction", num_expected_locations=2) lldbutil.run_break_set_by_regexp( self, r"._MyFunction", extra_options="-f a.c", num_expected_locations=1) lldbutil.run_break_set_by_regexp( self, r"._MyFunction", extra_options="-f a.c -f b.c", num_expected_locations=2) # Now try a source regex breakpoint: lldbutil.run_break_set_by_source_regexp( self, r"is about to return [12]0", extra_options="-f a.c -f b.c", num_expected_locations=2) lldbutil.run_break_set_by_source_regexp( self, r"is about to return [12]0", extra_options="-f a.c", num_expected_locations=1) # Reset our canary variables and run the program. side_effect.one_liner = None side_effect.bktptcmd = None self.runCmd("run", RUN_SUCCEEDED) # Check the value of canary variables. self.assertEquals("one liner was here", side_effect.one_liner) self.assertEquals("function was here", side_effect.bktptcmd) # Finish the program. self.runCmd("process continue") # Remove the breakpoint command associated with breakpoint 1. self.runCmd("breakpoint command delete 1") # Remove breakpoint 2. self.runCmd("breakpoint delete 2") self.expect( "breakpoint command list 1", startstr="Breakpoint 1 does not have an associated command.") self.expect( "breakpoint command list 2", error=True, startstr="error: '2' is not a currently valid breakpoint ID.") # The breakpoint list now only contains breakpoint 1. self.expect( "breakpoint list -f", "Breakpoint 1 exists", patterns=[ "1: file = '.*main.c', line = %d, exact_match = 0, locations = 1, resolved = 1" % self.line, "hit count = 1"]) # Not breakpoint 2. self.expect( "breakpoint list -f", "No more breakpoint 2", matching=False, substrs=[ "2: file = 'main.c', line = %d, exact_match = 0, locations = 1, resolved = 1" % self.line]) # Run the program again, with breakpoint 1 remaining. self.runCmd("run", RUN_SUCCEEDED) # We should be stopped again due to breakpoint 1. # The stop reason of the thread should be breakpoint. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) # The breakpoint should have a hit count of 2. self.expect("breakpoint list -f", BREAKPOINT_HIT_TWICE, substrs=['resolved, hit count = 2']) def breakpoint_command_script_parameters(self): """Test that the frame and breakpoint location are being properly passed to the script breakpoint command function.""" exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # Add a breakpoint. lldbutil.run_break_set_by_file_and_line( self, "main.c", self.line, num_expected_locations=1, loc_exact=True) # Now add callbacks for the breakpoints just created. self.runCmd("breakpoint command add -s python -o 'import side_effect; side_effect.frame = str(frame); side_effect.bp_loc = str(bp_loc)' 1") # Reset canary variables and run. side_effect.frame = None side_effect.bp_loc = None self.runCmd("run", RUN_SUCCEEDED) self.expect(side_effect.frame, exe=False, startstr="frame #0:") self.expect(side_effect.bp_loc, exe=False, patterns=["1.* where = .*main .* resolved, hit count = 1"]) def breakpoint_commands_on_creation(self): """Test that setting breakpoint commands when creating the breakpoint works""" exe = self.getBuildArtifact("a.out") target = self.dbg.CreateTarget(exe) self.assertTrue(target.IsValid(), "Created an invalid target.") # Add a breakpoint. lldbutil.run_break_set_by_file_and_line( self, "main.c", self.line, num_expected_locations=1, loc_exact=True, extra_options='-C bt -C "thread list" -C continue') bkpt = target.FindBreakpointByID(1) self.assertTrue(bkpt.IsValid(), "Couldn't find breakpoint 1") com_list = lldb.SBStringList() bkpt.GetCommandLineCommands(com_list) self.assertEqual(com_list.GetSize(), 3, "Got the wrong number of commands") self.assertEqual(com_list.GetStringAtIndex(0), "bt", "First bt") self.assertEqual(com_list.GetStringAtIndex(1), "thread list", "Next thread list") self.assertEqual(com_list.GetStringAtIndex(2), "continue", "Last continue")
apple/swift-lldb
packages/Python/lldbsuite/test/commands/breakpoint/basic/TestBreakpointCommand.py
Python
apache-2.0
12,042
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for Stack and ParallelStack Ops.""" import numpy as np from tensorflow.python import tf2 from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.platform import test def np_split_squeeze(array, axis): axis_len = array.shape[axis] return [ np.squeeze( arr, axis=(axis,)) for arr in np.split( array, axis_len, axis=axis) ] class StackOpTest(test.TestCase): def randn(self, shape, dtype): data = np.random.randn(*shape) if dtype == np.bool_: return data < 0 # Naive casting yields True with P(1)! else: return data.astype(dtype) def testSimple(self): np.random.seed(7) for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10): rank = len(shape) for axis in range(-rank, rank): for dtype in [np.bool_, np.float32, np.int32, np.int64]: data = self.randn(shape, dtype) xs = np_split_squeeze(data, axis) # Stack back into a single tensorflow tensor with self.subTest(shape=shape, axis=axis, dtype=dtype): c = array_ops.stack(xs, axis=axis) self.assertAllEqual(c, data) def testSimpleParallelCPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): np.random.seed(7) with test_util.device(use_gpu=False): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3): with self.subTest(shape=shape): data = self.randn(shape, np.float32) xs = list(map(constant_op.constant, data)) c = array_ops.parallel_stack(xs) self.assertAllEqual(c, data) def testParallelConcatShapeZero(self): if not tf2.enabled(): self.skipTest("only fails in TF2") @def_function.function def f(): y = gen_array_ops.parallel_concat(values=[["tf"]], shape=0) return y with self.assertRaisesRegex(errors.InvalidArgumentError, r"0th dimension of value .* is less than"): f() def testSimpleParallelGPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): with test_util.device(use_gpu=True): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3): with self.subTest(shape=shape): data = self.randn(shape, np.float32) xs = list(map(constant_op.constant, data)) c = array_ops.parallel_stack(xs) self.assertAllEqual(c, data) def testConst(self): np.random.seed(7) with test_util.use_gpu(): # Verify that shape induction works with shapes produced via const stack a = constant_op.constant([1, 2, 3, 4, 5, 6]) b = array_ops.reshape(a, array_ops.stack([2, 3])) self.assertAllEqual(b.get_shape(), [2, 3]) # Check on a variety of shapes and types for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10): for dtype in [np.bool_, np.float32, np.int16, np.int32, np.int64]: with self.subTest(shape=shape, dtype=dtype): data = self.randn(shape, dtype) # Stack back into a single tensorflow tensor directly using np array c = array_ops.stack(data) if not context.executing_eagerly(): # This is implemented via a Const: self.assertEqual(c.op.type, "Const") self.assertAllEqual(c, data) # Python lists also work for 1-D case: if len(shape) == 1: data_list = list(data) cl = array_ops.stack(data_list) if not context.executing_eagerly(): self.assertEqual(cl.op.type, "Const") self.assertAllEqual(cl, data) def testConstParallelCPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): np.random.seed(7) with test_util.device(use_gpu=False): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10): with self.subTest(shape=shape): data = self.randn(shape, np.float32) if len(shape) == 1: data_list = list(data) cl = array_ops.parallel_stack(data_list) self.assertAllEqual(cl, data) data = self.randn(shape, np.float32) c = array_ops.parallel_stack(data) self.assertAllEqual(c, data) def testConstParallelGPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): np.random.seed(7) with test_util.device(use_gpu=True): for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2): with self.subTest(shape=shape): data = self.randn(shape, np.float32) if len(shape) == 1: data_list = list(data) cl = array_ops.parallel_stack(data_list) self.assertAllEqual(cl, data) data = self.randn(shape, np.float32) c = array_ops.parallel_stack(data) self.assertAllEqual(c, data) def testGradientsAxis0(self): np.random.seed(7) for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10): data = np.random.randn(*shape) with self.subTest(shape=shape): with self.cached_session(): def func(*xs): return array_ops.stack(xs) # TODO(irving): Remove list() once we handle maps correctly xs = list(map(constant_op.constant, data)) theoretical, numerical = gradient_checker_v2.compute_gradient( func, xs) self.assertAllClose(theoretical, numerical) def testGradientsAxis1(self): np.random.seed(7) for shape in (2, 3), (3, 2), (8, 2, 10): data = np.random.randn(*shape) out_shape = list(shape[1:]) out_shape.insert(1, shape[0]) with self.subTest(shape=shape): with self.cached_session(): def func(*inp): return array_ops.stack(inp, axis=1) # TODO(irving): Remove list() once we handle maps correctly xs = list(map(constant_op.constant, data)) theoretical, numerical = gradient_checker_v2.compute_gradient( func, xs) self.assertAllClose(theoretical, numerical) def testZeroSizeCPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): # Verify that stack doesn't crash for zero size inputs with test_util.device(use_gpu=False): for shape in (0,), (3, 0), (0, 3): with self.subTest(shape=shape): x = np.zeros((2,) + shape).astype(np.int32) p = self.evaluate(array_ops.stack(list(x))) self.assertAllEqual(p, x) p = self.evaluate(array_ops.parallel_stack(list(x))) self.assertAllEqual(p, x) def testZeroSizeGPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): # Verify that stack doesn't crash for zero size inputs with test_util.device(use_gpu=True): for shape in (0,), (3, 0), (0, 3): with self.subTest(shape=shape): x = np.zeros((2,) + shape).astype(np.int32) p = self.evaluate(array_ops.stack(list(x))) self.assertAllEqual(p, x) p = self.evaluate(array_ops.parallel_stack(list(x))) self.assertAllEqual(p, x) def testAxis0DefaultCPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): with test_util.device(use_gpu=False): t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] stacked = self.evaluate(array_ops.stack(t)) parallel_stacked = self.evaluate(array_ops.parallel_stack(t)) expected = np.array([[1, 2, 3], [4, 5, 6]]) self.assertAllEqual(stacked, expected) self.assertAllEqual(parallel_stacked, expected) def testAxis0DefaultGPU(self): # tf.parallel_stack is only supported in graph mode. with ops.Graph().as_default(): with test_util.device(use_gpu=True): t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] stacked = self.evaluate(array_ops.stack(t)) parallel_stacked = self.evaluate(array_ops.parallel_stack(t)) expected = np.array([[1, 2, 3], [4, 5, 6]]) self.assertAllEqual(stacked, expected) self.assertAllEqual(parallel_stacked, expected) def testAgainstNumpy(self): # For 1 to 5 dimensions. for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10): rank = len(shape) expected = self.randn(shape, np.float32) for dtype in [np.bool_, np.float32, np.int32, np.int64]: # For all the possible axis to split it, including negative indices. for axis in range(-rank, rank): test_arrays = np_split_squeeze(expected, axis) with self.cached_session(): with self.subTest(shape=shape, dtype=dtype, axis=axis): actual_pack = array_ops.stack(test_arrays, axis=axis) self.assertEqual(expected.shape, actual_pack.get_shape()) actual_pack = self.evaluate(actual_pack) actual_stack = array_ops.stack(test_arrays, axis=axis) self.assertEqual(expected.shape, actual_stack.get_shape()) actual_stack = self.evaluate(actual_stack) self.assertNDArrayNear(expected, actual_stack, 1e-6) def testDimOutOfRange(self): t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] with self.assertRaisesRegex(ValueError, r"Argument `axis` = 2 not in range \[-2, 2\)"): array_ops.stack(t, axis=2) def testDimOutOfNegativeRange(self): t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])] with self.assertRaisesRegex(ValueError, r"Argument `axis` = -3 not in range \[-2, 2\)"): array_ops.stack(t, axis=-3) def testComplex(self): np.random.seed(7) with self.session(): for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10): for dtype in [np.complex64, np.complex128]: with self.subTest(shape=shape, dtype=dtype): data = self.randn(shape, dtype) xs = list(map(constant_op.constant, data)) c = array_ops.stack(xs) self.assertAllEqual(self.evaluate(c), data) class AutomaticStackingTest(test.TestCase): def testSimple(self): self.assertAllEqual([1, 0, 2], ops.convert_to_tensor([1, constant_op.constant(0), 2])) self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]], ops.convert_to_tensor([[0, 0, 0], [0, constant_op.constant(1), 0], [0, 0, 0]])) self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]], ops.convert_to_tensor([[0, 0, 0], constant_op.constant([0, 1, 0]), [0, 0, 0]])) self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]], ops.convert_to_tensor([ constant_op.constant([0, 0, 0]), constant_op.constant([0, 1, 0]), constant_op.constant([0, 0, 0]) ])) def testWithNDArray(self): with self.session(): result = ops.convert_to_tensor([[[0., 0.], constant_op.constant([1., 1.])], np.array( [[2., 2.], [3., 3.]], dtype=np.float32)]) self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]], self.evaluate(result)) def testDtype(self): t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]) self.assertEqual(dtypes.float32, t_0.dtype) t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant( [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]]) self.assertEqual(dtypes.float64, t_1.dtype) t_2 = ops.convert_to_tensor( [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64) self.assertEqual(dtypes.float64, t_2.dtype) t_3 = ops.convert_to_tensor( [[0., 0., 0.], constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.] ], dtype=dtypes.float32) self.assertEqual(dtypes.float32, t_3.dtype) t_4 = ops.convert_to_tensor( [constant_op.constant([0., 0., 0.], dtype=dtypes.float64)], dtype=dtypes.float32) self.assertEqual(dtypes.float32, t_4.dtype) with self.assertRaises(TypeError): ops.convert_to_tensor([ constant_op.constant( [0., 0., 0.], dtype=dtypes.float32), constant_op.constant( [0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.] ]) def testDtypeConversionWhenTensorDtypeMismatch(self): t_0 = ops.convert_to_tensor([0., 0., 0.]) self.assertEqual(dtypes.float32, t_0.dtype) t_1 = ops.convert_to_tensor([0, 0, 0]) self.assertEqual(dtypes.int32, t_1.dtype) t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64) self.assertEqual(dtypes.float64, t_2.dtype) if __name__ == "__main__": test.main()
Intel-Corporation/tensorflow
tensorflow/python/kernel_tests/array_ops/stack_op_test.py
Python
apache-2.0
14,594
# (c) Copyright 2017-2019 SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import abort from flask import Blueprint from flask import jsonify from flask import request from keystoneauth1 import exceptions as exc from keystoneauth1 import session as ks_session from keystoneclient.auth.identity import v3 from keystoneclient.v3 import client as ks_client import logging import os from oslo_config import cfg import pbr.version import pwd import threading import time from .util import ping from . import config from . import policy bp = Blueprint('admin', __name__) CONF = cfg.CONF LOG = logging.getLogger(__name__) USER_AGENT = 'Installer UI' @bp.route("/api/v2/version") def version(): """Returns the version of the service .. :quickref: Admin; Returns the version of the service **Example valid response**: .. sourcecode:: http HTTP/1.1 200 OK 0.0.1.dev16 """ version_info = pbr.version.VersionInfo('ardana-service') return version_info.version_string_with_vcs() @bp.route("/api/v2/heartbeat") def heartbeat(): """Returns the epoch time Simple API to verify that the service is up and responding. Returns the number of seconds since 1970-01-01 00:00:00 GMT. .. :quickref: Admin; Returns the epoch time **Example valid response**: .. sourcecode:: http HTTP/1.1 200 OK 1502745650 """ return jsonify(int(time.time())) @bp.route("/api/v2/user") @policy.enforce('lifecycle:get_user') def user(): """Returns the username the service is running under .. :quickref: Admin; Returns the username the service is running under **Example valid response**: .. sourcecode:: http HTTP/1.1 200 OK {"username": "myusername"} """ user_dict = {'username': pwd.getpwuid(os.getuid()).pw_name} return jsonify(user_dict) def update_trigger_file(): trigger_file = os.path.join(CONF.paths.log_dir, 'trigger.txt') with open(trigger_file, 'w') as f: f.write("Triggered restart at %s\n" % time.asctime()) @bp.route("/api/v2/restart", methods=['POST']) @policy.enforce('lifecycle:restart') def restart(): """Requests the service to restart after a specified delay, in seconds .. :quickref: Admin; Requests a service restart after a delay **Example Request**: .. sourcecode:: http POST /api/v2/user HTTP/1.1 Content-Type: application/json { "delay": 60 } """ info = request.get_json() or {} delay_secs = int(info.get('delay', 0)) t = threading.Timer(delay_secs, update_trigger_file) t.start() return jsonify('Success') @bp.route("/api/v2/login", methods=['POST']) def login(): """Authenticates with keystone and returns a token .. :quickref: Admin; Authenticates with keystone **Example Request**: .. sourcecode:: http POST /api/v2/login HTTP/1.1 Content-Type: application/json { "username": "admin", "password": "secret" } **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "token": "gAAAAABbEaruZDQGIH5KmKWHlDZIw7CLq", "expires": "2018-06-01T21:22:06+00:00" } :status 200: successful authentication :status 401: invalid credentials :status 403: authentication not permitted, or user not authorized for any projects """ if not config.requires_auth(): abort(403, "authentication not permitted since service is in insecure mode") info = request.get_json() or {} username = info.get('username') password = info.get('password') user_domain_name = info.get('user_domain_name', 'Default') token = _authenticate(CONF.keystone_authtoken.auth_url, username, password, user_domain_name) return jsonify(token) def _authenticate(auth_url, username=None, password=None, user_domain_name='Default'): """Authenticate with keystone Creates an unscoped token using the given credentials (which validates them), and then uses that token to get a project-scoped token. """ unscoped_auth = v3.Password(auth_url, username=username, password=password, user_domain_name=user_domain_name, unscoped=True) session = ks_session.Session(user_agent=USER_AGENT, verify=not CONF.keystone_authtoken.insecure) try: # Trigger keystone to verify the credentials unscoped_auth_ref = unscoped_auth.get_access(session) except exc.connection.ConnectFailure as e: abort(503, str(e)) except exc.http.HttpError as e: abort(e.http_status, e.message) except exc.ClientException as e: abort(401, str(e)) except Exception as e: LOG.exception(e) abort(500, "Unable to authenticate") client = ks_client.Client(session=session, auth=unscoped_auth, user_agent=USER_AGENT) auth_url = unscoped_auth.auth_url projects = client.projects.list(user=unscoped_auth_ref.user_id) # Filter out disabled projects projects = [project for project in projects if project.enabled] # Prioritize the admin project by putting it at the beginning of the list for pos, project in enumerate(projects): if project.name == 'admin': projects.pop(pos) projects.insert(0, project) break # Return the first project token that we have the admin role on, otherwise # return the first project token we have any role on. fallback_auth_ref = None for project in projects: auth = v3.Token(auth_url=auth_url, token=unscoped_auth_ref.auth_token, project_id=project.id, reauthenticate=False) try: auth_ref = auth.get_access(session) if 'admin' in auth_ref.role_names: return {'token': auth_ref.auth_token, 'expires': auth_ref.expires.isoformat()} elif not fallback_auth_ref: fallback_auth_ref = auth_ref except Exception as e: pass if fallback_auth_ref: return {'token': fallback_auth_ref.auth_token, 'expires': fallback_auth_ref.expires.isoformat()} # TODO(gary): Consider as a secondary fallback to return a domain-scoped # token abort(403, "Not authorized for any project") @bp.route("/api/v2/is_secured") def get_secured(): """Returns whether authentication is required Returns a json object indicating whether the service is configured to enforce authentication .. :quickref: Model; Returns whether authentication is required **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json { "isSecured": false } :status 200: success """ return jsonify({'isSecured': config.requires_auth()}) @bp.route("/api/v2/connection_test", methods=['POST']) def connection_test(): body = request.get_json() or {} host = body['host'] try: ping(host, 22) return jsonify('Success') except Exception as e: return jsonify(error=str(e)), 404
ArdanaCLM/ardana-service
ardana_service/admin.py
Python
apache-2.0
8,071
import experiment from ..util import dirs from ..util import file_handling as fh from optparse import OptionParser import sys def main(): usage = "%prog project logfile " parser = OptionParser(usage=usage) parser.add_option('-n', dest='new_name', default=None, help='New name for experiment: default= old name + _rerun') #parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False, # help='Keyword argument: default=%default') (options, args) = parser.parse_args() project = args[0] log_filename = args[1] new_name = options.new_name log = fh.read_json(log_filename) if new_name is None: new_name = log['name'] + '_rerun' log['name'] = new_name float_vars = ['best_alpha', 'alpha_exp_base', 'max_alpha_exp', 'min_alpha_exp', 'orig_T', 'tau'] for v in float_vars: if v in log: if log[v] is not None: log[v] = float(log[v]) else: log[v] = None #if log['reuse'] == 'False': # log['reuse'] = False #else: # log['reuse'] = True # convert list stirng to list #list_vars = ['feature_list', 'additional_label_files', 'additional_label_weights'] #for v in list_vars: # if v in log: # print v # print log[v] # quoted_strings = [p.strip() for p in log[v][1:-1].split(',')] # print quoted_strings # log[v] = [p[1:-1] for p in quoted_strings] # print log[v] # print '\n' #print log #if 'additional_label_weights' in log: # log['additional_label_weights'] = [float(w) for w in log['additional_label_weights']] dirs.make_base_dir(project) print log result = experiment.run_experiment(**log) print result if __name__ == '__main__': main()
dallascard/guac
core/experiment/rerun.py
Python
apache-2.0
1,879
# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import functools import errno import os import resource import signal import time import subprocess import re from swift.common.utils import search_tree, remove_file, write_file SWIFT_DIR = '/etc/swift' RUN_DIR = '/var/run/swift' # auth-server has been removed from ALL_SERVERS, start it explicitly ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor', 'container-replicator', 'container-server', 'container-sync', 'container-updater', 'object-auditor', 'object-server', 'object-expirer', 'object-replicator', 'object-updater', 'proxy-server', 'account-replicator', 'account-reaper'] MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server', 'object-server'] REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS] GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server'] START_ONCE_SERVERS = REST_SERVERS # These are servers that match a type (account-*, container-*, object-*) but # don't use that type-server.conf file and instead use their own. STANDALONE_SERVERS = ['object-expirer'] KILL_WAIT = 15 # seconds to wait for servers to die (by default) WARNING_WAIT = 3 # seconds to wait after message that may just be a warning MAX_DESCRIPTORS = 32768 MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB def setup_env(): """Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp """ try: resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) resource.setrlimit(resource.RLIMIT_DATA, (MAX_MEMORY, MAX_MEMORY)) except ValueError: print _("WARNING: Unable to increase file descriptor limit. " "Running as non-root?") os.environ['PYTHON_EGG_CACHE'] = '/tmp' def command(func): """ Decorator to declare which methods are accessible as commands, commands always return 1 or 0, where 0 should indicate success. :param func: function to make public """ func.publicly_accessible = True @functools.wraps(func) def wrapped(*a, **kw): rv = func(*a, **kw) return 1 if rv else 0 return wrapped def watch_server_pids(server_pids, interval=1, **kwargs): """Monitor a collection of server pids yeilding back those pids that aren't responding to signals. :param server_pids: a dict, lists of pids [int,...] keyed on Server objects """ status = {} start = time.time() end = start + interval server_pids = dict(server_pids) # make a copy while True: for server, pids in server_pids.items(): for pid in pids: try: # let pid stop if it wants to os.waitpid(pid, os.WNOHANG) except OSError, e: if e.errno not in (errno.ECHILD, errno.ESRCH): raise # else no such child/process # check running pids for server status[server] = server.get_running_pids(**kwargs) for pid in pids: # original pids no longer in running pids! if pid not in status[server]: yield server, pid # update active pids list using running_pids server_pids[server] = status[server] if not [p for server, pids in status.items() for p in pids]: # no more running pids break if time.time() > end: break else: time.sleep(0.1) class UnknownCommandError(Exception): pass class Manager(): """Main class for performing commands on groups of servers. :param servers: list of server names as strings """ def __init__(self, servers, run_dir=RUN_DIR): server_names = set() for server in servers: if server == 'all': server_names.update(ALL_SERVERS) elif server == 'main': server_names.update(MAIN_SERVERS) elif server == 'rest': server_names.update(REST_SERVERS) elif '*' in server: # convert glob to regex server_names.update([s for s in ALL_SERVERS if re.match(server.replace('*', '.*'), s)]) else: server_names.add(server) self.servers = set() for name in server_names: self.servers.add(Server(name, run_dir)) @command def status(self, **kwargs): """display status of tracked pids for server """ status = 0 for server in self.servers: status += server.status(**kwargs) return status @command def start(self, **kwargs): """starts a server """ setup_env() status = 0 for server in self.servers: server.launch(**kwargs) if not kwargs.get('daemon', True): for server in self.servers: try: status += server.interact(**kwargs) except KeyboardInterrupt: print _('\nuser quit') self.stop(**kwargs) break elif kwargs.get('wait', True): for server in self.servers: status += server.wait(**kwargs) return status @command def no_wait(self, **kwargs): """spawn server and return immediately """ kwargs['wait'] = False return self.start(**kwargs) @command def no_daemon(self, **kwargs): """start a server interactively """ kwargs['daemon'] = False return self.start(**kwargs) @command def once(self, **kwargs): """start server and run one pass on supporting daemons """ kwargs['once'] = True return self.start(**kwargs) @command def stop(self, **kwargs): """stops a server """ server_pids = {} for server in self.servers: signaled_pids = server.stop(**kwargs) if not signaled_pids: print _('No %s running') % server else: server_pids[server] = signaled_pids # all signaled_pids, i.e. list(itertools.chain(*server_pids.values())) signaled_pids = [p for server, pids in server_pids.items() for p in pids] # keep track of the pids yeiled back as killed for all servers killed_pids = set() kill_wait = kwargs.get('kill_wait', KILL_WAIT) for server, killed_pid in watch_server_pids(server_pids, interval=kill_wait, **kwargs): print _("%s (%s) appears to have stopped") % (server, killed_pid) killed_pids.add(killed_pid) if not killed_pids.symmetric_difference(signaled_pids): # all proccesses have been stopped return 0 # reached interval n watch_pids w/o killing all servers for server, pids in server_pids.items(): if not killed_pids.issuperset(pids): # some pids of this server were not killed print _('Waited %s seconds for %s to die; giving up') % ( kill_wait, server) return 1 @command def shutdown(self, **kwargs): """allow current requests to finish on supporting servers """ kwargs['graceful'] = True status = 0 status += self.stop(**kwargs) return status @command def restart(self, **kwargs): """stops then restarts server """ status = 0 status += self.stop(**kwargs) status += self.start(**kwargs) return status @command def reload(self, **kwargs): """graceful shutdown then restart on supporting servers """ kwargs['graceful'] = True status = 0 for server in self.servers: m = Manager([server.server]) status += m.stop(**kwargs) status += m.start(**kwargs) return status @command def force_reload(self, **kwargs): """alias for reload """ return self.reload(**kwargs) def get_command(self, cmd): """Find and return the decorated method named like cmd :param cmd: the command to get, a string, if not found raises UnknownCommandError """ cmd = cmd.lower().replace('-', '_') try: f = getattr(self, cmd) except AttributeError: raise UnknownCommandError(cmd) if not hasattr(f, 'publicly_accessible'): raise UnknownCommandError(cmd) return f @classmethod def list_commands(cls): """Get all publicly accessible commands :returns: a list of string tuples (cmd, help), the method names who are decorated as commands """ get_method = lambda cmd: getattr(cls, cmd) return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip()) for x in dir(cls) if getattr(get_method(x), 'publicly_accessible', False)]) def run_command(self, cmd, **kwargs): """Find the named command and run it :param cmd: the command name to run """ f = self.get_command(cmd) return f(**kwargs) class Server(): """Manage operations on a server or group of servers of similar type :param server: name of server """ def __init__(self, server, run_dir=RUN_DIR): if '-' not in server: server = '%s-server' % server self.server = server.lower() self.type = server.rsplit('-', 1)[0] self.cmd = 'swift-%s' % server self.procs = [] self.run_dir = run_dir def __str__(self): return self.server def __repr__(self): return "%s(%s)" % (self.__class__.__name__, repr(str(self))) def __hash__(self): return hash(str(self)) def __eq__(self, other): try: return self.server == other.server except AttributeError: return False def get_pid_file_name(self, conf_file): """Translate conf_file to a corresponding pid_file :param conf_file: an conf_file for this server, a string :returns: the pid_file for this conf_file """ return conf_file.replace( os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace( '%s-server' % self.type, self.server, 1).rsplit( '.conf', 1)[0] + '.pid' def get_conf_file_name(self, pid_file): """Translate pid_file to a corresponding conf_file :param pid_file: a pid_file for this server, a string :returns: the conf_file for this pid_file """ if self.server in STANDALONE_SERVERS: return pid_file.replace( os.path.normpath(self.run_dir), SWIFT_DIR, 1)\ .rsplit('.pid', 1)[0] + '.conf' else: return pid_file.replace( os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace( self.server, '%s-server' % self.type, 1).rsplit( '.pid', 1)[0] + '.conf' def conf_files(self, **kwargs): """Get conf files for this server :param: number, if supplied will only lookup the nth server :returns: list of conf files """ if self.server in STANDALONE_SERVERS: found_conf_files = search_tree(SWIFT_DIR, self.server + '*', '.conf') else: found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type, '.conf') number = kwargs.get('number') if number: try: conf_files = [found_conf_files[number - 1]] except IndexError: conf_files = [] else: conf_files = found_conf_files if not conf_files: # maybe there's a config file(s) out there, but I couldn't find it! if not kwargs.get('quiet'): print _('Unable to locate config %sfor %s') % ( ('number %s ' % number if number else ''), self.server) if kwargs.get('verbose') and not kwargs.get('quiet'): if found_conf_files: print _('Found configs:') for i, conf_file in enumerate(found_conf_files): print ' %d) %s' % (i + 1, conf_file) return conf_files def pid_files(self, **kwargs): """Get pid files for this server :param: number, if supplied will only lookup the nth server :returns: list of pid files """ pid_files = search_tree(self.run_dir, '%s*' % self.server, '.pid') if kwargs.get('number', 0): conf_files = self.conf_files(**kwargs) # filter pid_files to match the index of numbered conf_file pid_files = [pid_file for pid_file in pid_files if self.get_conf_file_name(pid_file) in conf_files] return pid_files def iter_pid_files(self, **kwargs): """Generator, yields (pid_file, pids) """ for pid_file in self.pid_files(**kwargs): yield pid_file, int(open(pid_file).read().strip()) def signal_pids(self, sig, **kwargs): """Send a signal to pids for this server :param sig: signal to send :returns: a dict mapping pids (ints) to pid_files (paths) """ pids = {} for pid_file, pid in self.iter_pid_files(**kwargs): try: if sig != signal.SIG_DFL: print _('Signal %s pid: %s signal: %s') % (self.server, pid, sig) os.kill(pid, sig) except OSError, e: if e.errno == errno.ESRCH: # pid does not exist if kwargs.get('verbose'): print _("Removing stale pid file %s") % pid_file remove_file(pid_file) elif e.errno == errno.EPERM: print _("No permission to signal PID %d") % pid else: # process exists pids[pid] = pid_file return pids def get_running_pids(self, **kwargs): """Get running pids :returns: a dict mapping pids (ints) to pid_files (paths) """ return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop def kill_running_pids(self, **kwargs): """Kill running pids :param graceful: if True, attempt SIGHUP on supporting servers :returns: a dict mapping pids (ints) to pid_files (paths) """ graceful = kwargs.get('graceful') if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS: sig = signal.SIGHUP else: sig = signal.SIGTERM return self.signal_pids(sig, **kwargs) def status(self, pids=None, **kwargs): """Display status of server :param: pids, if not supplied pids will be populated automatically :param: number, if supplied will only lookup the nth server :returns: 1 if server is not running, 0 otherwise """ if pids is None: pids = self.get_running_pids(**kwargs) if not pids: number = kwargs.get('number', 0) if number: kwargs['quiet'] = True conf_files = self.conf_files(**kwargs) if conf_files: print _("%s #%d not running (%s)") % (self.server, number, conf_files[0]) else: print _("No %s running") % self.server return 1 for pid, pid_file in pids.items(): conf_file = self.get_conf_file_name(pid_file) print _("%s running (%s - %s)") % (self.server, pid, conf_file) return 0 def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs): """Launch a subprocess for this server. :param conf_file: path to conf_file to use as first arg :param once: boolean, add once argument to command :param wait: boolean, if true capture stdout with a pipe :param daemon: boolean, if true ask server to log to console :returns : the pid of the spawned process """ args = [self.cmd, conf_file] if once: args.append('once') if not daemon: # ask the server to log to console args.append('verbose') # figure out what we're going to do with stdio if not daemon: # do nothing, this process is open until the spawns close anyway re_out = None re_err = None else: re_err = subprocess.STDOUT if wait: # we're going to need to block on this... re_out = subprocess.PIPE else: re_out = open(os.devnull, 'w+b') proc = subprocess.Popen(args, stdout=re_out, stderr=re_err) pid_file = self.get_pid_file_name(conf_file) write_file(pid_file, proc.pid) self.procs.append(proc) return proc.pid def wait(self, **kwargs): """ wait on spawned procs to start """ status = 0 for proc in self.procs: # wait for process to close its stdout output = proc.stdout.read() if output: print output start = time.time() # wait for process to die (output may just be a warning) while time.time() - start < WARNING_WAIT: time.sleep(0.1) if proc.poll() is not None: status += proc.returncode break return status def interact(self, **kwargs): """ wait on spawned procs to terminate """ status = 0 for proc in self.procs: # wait for process to terminate proc.communicate() if proc.returncode: status += 1 return status def launch(self, **kwargs): """ Collect conf files and attempt to spawn the processes for this server """ conf_files = self.conf_files(**kwargs) if not conf_files: return [] pids = self.get_running_pids(**kwargs) already_started = False for pid, pid_file in pids.items(): conf_file = self.get_conf_file_name(pid_file) # for legacy compat you can't start other servers if one server is # already running (unless -n specifies which one you want), this # restriction could potentially be lifted, and launch could start # any unstarted instances if conf_file in conf_files: already_started = True print _("%s running (%s - %s)") % (self.server, pid, conf_file) elif not kwargs.get('number', 0): already_started = True print _("%s running (%s - %s)") % (self.server, pid, pid_file) if already_started: print _("%s already started...") % self.server return [] if self.server not in START_ONCE_SERVERS: kwargs['once'] = False pids = {} for conf_file in conf_files: if kwargs.get('once'): msg = _('Running %s once') % self.server else: msg = _('Starting %s') % self.server print '%s...(%s)' % (msg, conf_file) try: pid = self.spawn(conf_file, **kwargs) except OSError, e: if e.errno == errno.ENOENT: # TODO: should I check if self.cmd exists earlier? print _("%s does not exist") % self.cmd break pids[pid] = conf_file return pids def stop(self, **kwargs): """Send stop signals to pids for this server :returns: a dict mapping pids (ints) to pid_files (paths) """ return self.kill_running_pids(**kwargs)
garvenshen/zeda-swift
swift/common/manager.py
Python
apache-2.0
21,270
"""App name""" from django.apps import AppConfig class CertificateEngineConfig(AppConfig): name = "certificate_engine"
repleo/bounca
certificate_engine/apps.py
Python
apache-2.0
126
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base classes and functions for dynamic decoding.""" import abc import tensorflow as tf from tensorflow_addons.utils.types import TensorLike from typeguard import typechecked from typing import Any, Optional, Tuple, Union # TODO: Find public API alternatives to these from tensorflow.python.ops import control_flow_util class Decoder(metaclass=abc.ABCMeta): """An RNN Decoder abstract interface object. Concepts used by this interface: - `inputs`: (structure of) tensors and TensorArrays that is passed as input to the RNN cell composing the decoder, at each time step. - `state`: (structure of) tensors and TensorArrays that is passed to the RNN cell instance as the state. - `finished`: boolean tensor telling whether each sequence in the batch is finished. - `training`: boolean whether it should behave in training mode or in inference mode. - `outputs`: instance of `tfa.seq2seq.BasicDecoderOutput`. Result of the decoding, at each time step. """ @property def batch_size(self): """The batch size of input values.""" raise NotImplementedError @property def output_size(self): """A (possibly nested tuple of...) integer[s] or `TensorShape` object[s].""" raise NotImplementedError @property def output_dtype(self): """A (possibly nested tuple of...) dtype[s].""" raise NotImplementedError @abc.abstractmethod def initialize(self, name=None): """Called before any decoding iterations. This methods must compute initial input values and initial state. Args: name: Name scope for any created operations. Returns: `(finished, initial_inputs, initial_state)`: initial values of 'finished' flags, inputs and state. """ raise NotImplementedError @abc.abstractmethod def step(self, time, inputs, state, training=None, name=None): """Called per step of decoding (but only once for dynamic decoding). Args: time: Scalar `int32` tensor. Current step number. inputs: RNN cell input (possibly nested tuple of) tensor[s] for this time step. state: RNN cell state (possibly nested tuple of) tensor[s] from previous time step. training: Python boolean. Indicates whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. name: Name scope for any created operations. Returns: `(outputs, next_state, next_inputs, finished)`: `outputs` is an object containing the decoder output, `next_state` is a (structure of) state tensors and TensorArrays, `next_inputs` is the tensor that should be used as input for the next step, `finished` is a boolean tensor telling whether the sequence is complete, for each sequence in the batch. """ raise NotImplementedError def finalize(self, outputs, final_state, sequence_lengths): raise NotImplementedError @property def tracks_own_finished(self): """Describes whether the Decoder keeps track of finished states. Most decoders will emit a true/false `finished` value independently at each time step. In this case, the `tfa.seq2seq.dynamic_decode` function keeps track of which batch entries are already finished, and performs a logical OR to insert new batches to the finished set. Some decoders, however, shuffle batches / beams between time steps and `tfa.seq2seq.dynamic_decode` will mix up the finished state across these entries because it does not track the reshuffle across time steps. In this case, it is up to the decoder to declare that it will keep track of its own finished state by setting this property to `True`. Returns: Python bool. """ return False class BaseDecoder(tf.keras.layers.Layer): """An RNN Decoder that is based on a Keras layer. Concepts used by this interface: - `inputs`: (structure of) Tensors and TensorArrays that is passed as input to the RNN cell composing the decoder, at each time step. - `state`: (structure of) Tensors and TensorArrays that is passed to the RNN cell instance as the state. - `memory`: tensor that is usually the full output of the encoder, which will be used for the attention wrapper for the RNN cell. - `finished`: boolean tensor telling whether each sequence in the batch is finished. - `training`: boolean whether it should behave in training mode or in inference mode. - `outputs`: instance of `tfa.seq2seq.BasicDecoderOutput`. Result of the decoding, at each time step. """ @typechecked def __init__( self, output_time_major: bool = False, impute_finished: bool = False, maximum_iterations: Optional[TensorLike] = None, parallel_iterations: int = 32, swap_memory: bool = False, **kwargs, ): self.output_time_major = output_time_major self.impute_finished = impute_finished self.maximum_iterations = maximum_iterations self.parallel_iterations = parallel_iterations self.swap_memory = swap_memory super().__init__(**kwargs) def call(self, inputs, initial_state=None, training=None, **kwargs): init_kwargs = kwargs init_kwargs["initial_state"] = initial_state return dynamic_decode( self, output_time_major=self.output_time_major, impute_finished=self.impute_finished, maximum_iterations=self.maximum_iterations, parallel_iterations=self.parallel_iterations, swap_memory=self.swap_memory, training=training, decoder_init_input=inputs, decoder_init_kwargs=init_kwargs, ) @property def batch_size(self): """The batch size of input values.""" raise NotImplementedError @property def output_size(self): """A (possibly nested tuple of...) integer[s] or `TensorShape` object[s].""" raise NotImplementedError @property def output_dtype(self): """A (possibly nested tuple of...) dtype[s].""" raise NotImplementedError def initialize(self, inputs, initial_state=None, **kwargs): """Called before any decoding iterations. This methods must compute initial input values and initial state. Args: inputs: (structure of) tensors that contains the input for the decoder. In the normal case, it's a tensor with shape [batch, timestep, embedding]. initial_state: (structure of) tensors that contains the initial state for the RNN cell. **kwargs: Other arguments that are passed in from layer.call() method. It could contains item like input `sequence_length`, or masking for input. Returns: `(finished, initial_inputs, initial_state)`: initial values of 'finished' flags, inputs and state. """ raise NotImplementedError def step(self, time, inputs, state, training): """Called per step of decoding (but only once for dynamic decoding). Args: time: Scalar `int32` tensor. Current step number. inputs: RNN cell input (possibly nested tuple of) tensor[s] for this time step. state: RNN cell state (possibly nested tuple of) tensor[s] from previous time step. training: Python boolean. Indicates whether the layer should behave in training mode or in inference mode. Returns: `(outputs, next_state, next_inputs, finished)`: `outputs` is an object containing the decoder output, `next_state` is a (structure of) state tensors and TensorArrays, `next_inputs` is the tensor that should be used as input for the next step, `finished` is a boolean tensor telling whether the sequence is complete, for each sequence in the batch. """ raise NotImplementedError def finalize(self, outputs, final_state, sequence_lengths): raise NotImplementedError @property def tracks_own_finished(self): """Describes whether the Decoder keeps track of finished states. Most decoders will emit a true/false `finished` value independently at each time step. In this case, the `tfa.seq2seq.dynamic_decode` function keeps track of which batch entries are already finished, and performs a logical OR to insert new batches to the finished set. Some decoders, however, shuffle batches / beams between time steps and `tfa.seq2seq.dynamic_decode` will mix up the finished state across these entries because it does not track the reshuffle across time steps. In this case, it is up to the decoder to declare that it will keep track of its own finished state by setting this property to `True`. Returns: Python bool. """ return False # TODO(scottzhu): Add build/get_config/from_config and other layer methods. @typechecked def dynamic_decode( decoder: Union[Decoder, BaseDecoder], output_time_major: bool = False, impute_finished: bool = False, maximum_iterations: Optional[TensorLike] = None, parallel_iterations: int = 32, swap_memory: bool = False, training: Optional[bool] = None, scope: Optional[str] = None, enable_tflite_convertible: bool = False, **kwargs, ) -> Tuple[Any, Any, Any]: """Runs dynamic decoding with a decoder. Calls `initialize()` once and `step()` repeatedly on the decoder object. Args: decoder: A `tfa.seq2seq.Decoder` or `tfa.seq2seq.BaseDecoder` instance. output_time_major: Python boolean. Default: `False` (batch major). If `True`, outputs are returned as time major tensors (this mode is faster). Otherwise, outputs are returned as batch major tensors (this adds extra time to the computation). impute_finished: Python boolean. If `True`, then states for batch entries which are marked as finished get copied through and the corresponding outputs get zeroed out. This causes some slowdown at each time step, but ensures that the final state and outputs have the correct values and that backprop ignores time steps that were marked as finished. maximum_iterations: A strictly positive `int32` scalar, the maximum allowed number of decoding steps. Default is `None` (decode until the decoder is fully done). parallel_iterations: Argument passed to `tf.while_loop`. swap_memory: Argument passed to `tf.while_loop`. training: Python boolean. Indicates whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. scope: Optional name scope to use. enable_tflite_convertible: Python boolean. If `True`, then the variables of `TensorArray` become of 1-D static shape. Also zero pads in the output tensor will be discarded. Default: `False`. **kwargs: dict, other keyword arguments for dynamic_decode. It might contain arguments for `BaseDecoder` to initialize, which takes all tensor inputs during call(). Returns: `(final_outputs, final_state, final_sequence_lengths)`. Raises: ValueError: if `maximum_iterations` is provided but is not a scalar. """ with tf.name_scope(scope or "decoder"): is_xla = ( not tf.executing_eagerly() and control_flow_util.GraphOrParentsInXlaContext( tf.compat.v1.get_default_graph() ) ) if maximum_iterations is not None: maximum_iterations = tf.convert_to_tensor( maximum_iterations, dtype=tf.int32, name="maximum_iterations" ) if maximum_iterations.shape.ndims != 0: raise ValueError("maximum_iterations must be a scalar") tf.debugging.assert_greater( maximum_iterations, 0, message="maximum_iterations should be greater than 0", ) elif is_xla: raise ValueError("maximum_iterations is required for XLA compilation.") if isinstance(decoder, Decoder): initial_finished, initial_inputs, initial_state = decoder.initialize() else: # For BaseDecoder that takes tensor inputs during call. decoder_init_input = kwargs.pop("decoder_init_input", None) decoder_init_kwargs = kwargs.pop("decoder_init_kwargs", {}) initial_finished, initial_inputs, initial_state = decoder.initialize( decoder_init_input, **decoder_init_kwargs ) if enable_tflite_convertible: # Assume the batch_size = 1 for inference. # So we can change 2-D TensorArray into 1-D by reshaping it. tf.debugging.assert_equal( decoder.batch_size, 1, message="TFLite conversion requires a batch size of 1", ) zero_outputs = tf.nest.map_structure( lambda shape, dtype: tf.reshape( tf.zeros(_prepend_batch(decoder.batch_size, shape), dtype=dtype), [-1], ), decoder.output_size, decoder.output_dtype, ) else: zero_outputs = tf.nest.map_structure( lambda shape, dtype: tf.zeros( _prepend_batch(decoder.batch_size, shape), dtype=dtype ), decoder.output_size, decoder.output_dtype, ) if maximum_iterations is not None: initial_finished = tf.logical_or(initial_finished, 0 >= maximum_iterations) initial_sequence_lengths = tf.zeros_like(initial_finished, dtype=tf.int32) initial_time = tf.constant(0, dtype=tf.int32) def _shape(batch_size, from_shape): if not isinstance(from_shape, tf.TensorShape) or from_shape.ndims == 0: return None else: batch_size = tf.get_static_value( tf.convert_to_tensor(batch_size, name="batch_size") ) return tf.TensorShape([batch_size]).concatenate(from_shape) dynamic_size = maximum_iterations is None or not is_xla # The dynamic shape `TensorArray` is not allowed in TFLite yet. dynamic_size = dynamic_size and (not enable_tflite_convertible) def _create_ta(s, d): if enable_tflite_convertible: # TFLite requires 1D element_shape. if isinstance(s, tf.TensorShape) and s.ndims == 0: s = (1,) element_shape = s else: element_shape = _shape(decoder.batch_size, s) return tf.TensorArray( dtype=d, size=0 if dynamic_size else maximum_iterations, dynamic_size=dynamic_size, element_shape=element_shape, ) initial_outputs_ta = tf.nest.map_structure( _create_ta, decoder.output_size, decoder.output_dtype ) def condition( unused_time, unused_outputs_ta, unused_state, unused_inputs, finished, unused_sequence_lengths, ): return tf.logical_not(tf.reduce_all(finished)) def body(time, outputs_ta, state, inputs, finished, sequence_lengths): """Internal while_loop body. Args: time: scalar int32 tensor. outputs_ta: structure of TensorArray. state: (structure of) state tensors and TensorArrays. inputs: (structure of) input tensors. finished: bool tensor (keeping track of what's finished). sequence_lengths: int32 tensor (keeping track of time of finish). Returns: `(time + 1, outputs_ta, next_state, next_inputs, next_finished, next_sequence_lengths)`. ``` """ (next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step( time, inputs, state, training ) decoder_state_sequence_lengths = False if decoder.tracks_own_finished: next_finished = decoder_finished lengths = getattr(decoder_state, "lengths", None) if lengths is not None: # sequence lengths are provided by decoder_state.lengths; # overwrite our sequence lengths. decoder_state_sequence_lengths = True sequence_lengths = tf.cast(lengths, tf.int32) else: next_finished = tf.logical_or(decoder_finished, finished) if decoder_state_sequence_lengths: # Just pass something through the loop; at the next iteration # we'll pull the sequence lengths from the decoder_state again. next_sequence_lengths = sequence_lengths else: next_sequence_lengths = tf.where( tf.logical_not(finished), tf.fill(tf.shape(sequence_lengths), time + 1), sequence_lengths, ) tf.nest.assert_same_structure(state, decoder_state) tf.nest.assert_same_structure(outputs_ta, next_outputs) tf.nest.assert_same_structure(inputs, next_inputs) # Zero out output values past finish if impute_finished: def zero_out_finished(out, zero): if finished.shape.rank < zero.shape.rank: broadcast_finished = tf.broadcast_to( tf.expand_dims(finished, axis=-1), zero.shape ) return tf.where(broadcast_finished, zero, out) else: return tf.where(finished, zero, out) emit = tf.nest.map_structure( zero_out_finished, next_outputs, zero_outputs ) else: emit = next_outputs # Copy through states past finish def _maybe_copy_state(new, cur): # TensorArrays and scalar states get passed through. if isinstance(cur, tf.TensorArray): pass_through = True else: new.set_shape(cur.shape) pass_through = new.shape.ndims == 0 if not pass_through: broadcast_finished = tf.broadcast_to( tf.expand_dims(finished, axis=-1), new.shape ) return tf.where(broadcast_finished, cur, new) else: return new if impute_finished: next_state = tf.nest.map_structure( _maybe_copy_state, decoder_state, state ) else: next_state = decoder_state if enable_tflite_convertible: # Reshape to 1-D. emit = tf.nest.map_structure(lambda x: tf.reshape(x, [-1]), emit) outputs_ta = tf.nest.map_structure( lambda ta, out: ta.write(time, out), outputs_ta, emit ) return ( time + 1, outputs_ta, next_state, next_inputs, next_finished, next_sequence_lengths, ) res = tf.while_loop( condition, body, loop_vars=( initial_time, initial_outputs_ta, initial_state, initial_inputs, initial_finished, initial_sequence_lengths, ), parallel_iterations=parallel_iterations, maximum_iterations=maximum_iterations, swap_memory=swap_memory, ) final_outputs_ta = res[1] final_state = res[2] final_sequence_lengths = res[5] final_outputs = tf.nest.map_structure(lambda ta: ta.stack(), final_outputs_ta) try: final_outputs, final_state = decoder.finalize( final_outputs, final_state, final_sequence_lengths ) except NotImplementedError: pass if not output_time_major: if enable_tflite_convertible: # Reshape the output to the original shape. def _restore_batch(x): return tf.expand_dims(x, [1]) final_outputs = tf.nest.map_structure(_restore_batch, final_outputs) final_outputs = tf.nest.map_structure(_transpose_batch_time, final_outputs) return final_outputs, final_state, final_sequence_lengths def _prepend_batch(batch_size, shape): """Prepends the batch dimension to the shape. If the batch_size value is known statically, this function returns a TensorShape, otherwise a Tensor. """ if isinstance(batch_size, tf.Tensor): static_batch_size = tf.get_static_value(batch_size) else: static_batch_size = batch_size if static_batch_size is None: return tf.concat(([batch_size], shape), axis=0) return [static_batch_size] + shape def _transpose_batch_time(tensor): """Transposes the batch and time dimension of tensor if its rank is at least 2.""" shape = tensor.shape if shape.rank is not None and shape.rank < 2: return tensor perm = tf.concat(([1, 0], tf.range(2, tf.rank(tensor))), axis=0) return tf.transpose(tensor, perm)
tensorflow/addons
tensorflow_addons/seq2seq/decoder.py
Python
apache-2.0
23,035
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import ( Dict, Optional, AsyncIterable, Awaitable, AsyncIterator, Sequence, Tuple, Type, Union, ) import pkg_resources from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.cloud.bigquery_storage_v1beta2.types import storage from google.cloud.bigquery_storage_v1beta2.types import stream from google.cloud.bigquery_storage_v1beta2.types import table from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore from .transports.base import BigQueryWriteTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigQueryWriteGrpcAsyncIOTransport from .client import BigQueryWriteClient class BigQueryWriteAsyncClient: """BigQuery Write API. The Write API can be used to write data to BigQuery. """ _client: BigQueryWriteClient DEFAULT_ENDPOINT = BigQueryWriteClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigQueryWriteClient.DEFAULT_MTLS_ENDPOINT table_path = staticmethod(BigQueryWriteClient.table_path) parse_table_path = staticmethod(BigQueryWriteClient.parse_table_path) write_stream_path = staticmethod(BigQueryWriteClient.write_stream_path) parse_write_stream_path = staticmethod(BigQueryWriteClient.parse_write_stream_path) common_billing_account_path = staticmethod( BigQueryWriteClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( BigQueryWriteClient.parse_common_billing_account_path ) common_folder_path = staticmethod(BigQueryWriteClient.common_folder_path) parse_common_folder_path = staticmethod( BigQueryWriteClient.parse_common_folder_path ) common_organization_path = staticmethod( BigQueryWriteClient.common_organization_path ) parse_common_organization_path = staticmethod( BigQueryWriteClient.parse_common_organization_path ) common_project_path = staticmethod(BigQueryWriteClient.common_project_path) parse_common_project_path = staticmethod( BigQueryWriteClient.parse_common_project_path ) common_location_path = staticmethod(BigQueryWriteClient.common_location_path) parse_common_location_path = staticmethod( BigQueryWriteClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: BigQueryWriteAsyncClient: The constructed client. """ return BigQueryWriteClient.from_service_account_info.__func__(BigQueryWriteAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: BigQueryWriteAsyncClient: The constructed client. """ return BigQueryWriteClient.from_service_account_file.__func__(BigQueryWriteAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @classmethod def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[ClientOptions] = None ): """Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the client cert source is None. (2) if `client_options.client_cert_source` is provided, use the provided one; if the default client cert source exists, use the default one; otherwise the client cert source is None. The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the default mTLS endpoint; if the environment variabel is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. More details can be found at https://google.aip.dev/auth/4114. Args: client_options (google.api_core.client_options.ClientOptions): Custom options for the client. Only the `api_endpoint` and `client_cert_source` properties may be used in this method. Returns: Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ return BigQueryWriteClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> BigQueryWriteTransport: """Returns the transport used by the client instance. Returns: BigQueryWriteTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(BigQueryWriteClient).get_transport_class, type(BigQueryWriteClient) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, BigQueryWriteTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query write client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.BigQueryWriteTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = BigQueryWriteClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def create_write_stream( self, request: Union[storage.CreateWriteStreamRequest, dict] = None, *, parent: str = None, write_stream: stream.WriteStream = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Creates a write stream to the given table. Additionally, every table has a special COMMITTED stream named '_default' to which data can be written. This stream doesn't need to be created using CreateWriteStream. It is a stream that can be used simultaneously by any number of clients. Data written to this stream is considered committed as soon as an acknowledgement is received. .. code-block:: python from google.cloud import bigquery_storage_v1beta2 def sample_create_write_stream(): # Create a client client = bigquery_storage_v1beta2.BigQueryWriteClient() # Initialize request argument(s) request = bigquery_storage_v1beta2.CreateWriteStreamRequest( parent="parent_value", ) # Make the request response = client.create_write_stream(request=request) # Handle the response print(response) Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest, dict]): The request object. Request message for `CreateWriteStream`. parent (:class:`str`): Required. Reference to the table to which the stream belongs, in the format of ``projects/{project}/datasets/{dataset}/tables/{table}``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. write_stream (:class:`google.cloud.bigquery_storage_v1beta2.types.WriteStream`): Required. Stream to be created. This corresponds to the ``write_stream`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_storage_v1beta2.types.WriteStream: Information about a single stream that gets data inside the storage system. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, write_stream]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = storage.CreateWriteStreamRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent if write_stream is not None: request.write_stream = write_stream # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_write_stream, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def append_rows( self, requests: AsyncIterator[storage.AppendRowsRequest] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[storage.AppendRowsResponse]]: r"""Appends data to the given stream. If ``offset`` is specified, the ``offset`` is checked against the end of stream. The server returns ``OUT_OF_RANGE`` in ``AppendRowsResponse`` if an attempt is made to append to an offset beyond the current end of the stream or ``ALREADY_EXISTS`` if user provids an ``offset`` that has already been written to. User can retry with adjusted offset within the same RPC stream. If ``offset`` is not specified, append happens at the end of the stream. The response contains the offset at which the append happened. Responses are received in the same order in which requests are sent. There will be one response for each successful request. If the ``offset`` is not set in response, it means append didn't happen due to some errors. If one request fails, all the subsequent requests will also fail until a success request is made again. If the stream is of ``PENDING`` type, data will only be available for read operations after the stream is committed. .. code-block:: python from google.cloud import bigquery_storage_v1beta2 def sample_append_rows(): # Create a client client = bigquery_storage_v1beta2.BigQueryWriteClient() # Initialize request argument(s) request = bigquery_storage_v1beta2.AppendRowsRequest( write_stream="write_stream_value", ) # This method expects an iterator which contains # 'bigquery_storage_v1beta2.AppendRowsRequest' objects # Here we create a generator that yields a single `request` for # demonstrative purposes. requests = [request] def request_generator(): for request in requests: yield request # Make the request stream = client.append_rows(requests=request_generator()) # Handle the response for response in stream: print(response) Args: requests (AsyncIterator[`google.cloud.bigquery_storage_v1beta2.types.AppendRowsRequest`]): The request object AsyncIterator. Request message for `AppendRows`. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: AsyncIterable[google.cloud.bigquery_storage_v1beta2.types.AppendRowsResponse]: Response message for AppendRows. """ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.append_rows, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ResourceExhausted, core_exceptions.ServiceUnavailable, ), deadline=86400.0, ), default_timeout=86400.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),) # Send the request. response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def get_write_stream( self, request: Union[storage.GetWriteStreamRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Gets a write stream. .. code-block:: python from google.cloud import bigquery_storage_v1beta2 def sample_get_write_stream(): # Create a client client = bigquery_storage_v1beta2.BigQueryWriteClient() # Initialize request argument(s) request = bigquery_storage_v1beta2.GetWriteStreamRequest( name="name_value", ) # Make the request response = client.get_write_stream(request=request) # Handle the response print(response) Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest, dict]): The request object. Request message for `GetWriteStreamRequest`. name (:class:`str`): Required. Name of the stream to get, in the form of ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_storage_v1beta2.types.WriteStream: Information about a single stream that gets data inside the storage system. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = storage.GetWriteStreamRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_write_stream, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def finalize_write_stream( self, request: Union[storage.FinalizeWriteStreamRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FinalizeWriteStreamResponse: r"""Finalize a write stream so that no new data can be appended to the stream. Finalize is not supported on the '_default' stream. .. code-block:: python from google.cloud import bigquery_storage_v1beta2 def sample_finalize_write_stream(): # Create a client client = bigquery_storage_v1beta2.BigQueryWriteClient() # Initialize request argument(s) request = bigquery_storage_v1beta2.FinalizeWriteStreamRequest( name="name_value", ) # Make the request response = client.finalize_write_stream(request=request) # Handle the response print(response) Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest, dict]): The request object. Request message for invoking `FinalizeWriteStream`. name (:class:`str`): Required. Name of the stream to finalize, in the form of ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamResponse: Response message for FinalizeWriteStream. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = storage.FinalizeWriteStreamRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.finalize_write_stream, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def batch_commit_write_streams( self, request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.BatchCommitWriteStreamsResponse: r"""Atomically commits a group of ``PENDING`` streams that belong to the same ``parent`` table. Streams must be finalized before commit and cannot be committed multiple times. Once a stream is committed, data in the stream becomes available for read operations. .. code-block:: python from google.cloud import bigquery_storage_v1beta2 def sample_batch_commit_write_streams(): # Create a client client = bigquery_storage_v1beta2.BigQueryWriteClient() # Initialize request argument(s) request = bigquery_storage_v1beta2.BatchCommitWriteStreamsRequest( parent="parent_value", write_streams=['write_streams_value_1', 'write_streams_value_2'], ) # Make the request response = client.batch_commit_write_streams(request=request) # Handle the response print(response) Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest, dict]): The request object. Request message for `BatchCommitWriteStreams`. parent (:class:`str`): Required. Parent table that all the streams should belong to, in the form of ``projects/{project}/datasets/{dataset}/tables/{table}``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsResponse: Response message for BatchCommitWriteStreams. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = storage.BatchCommitWriteStreamsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.batch_commit_write_streams, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def flush_rows( self, request: Union[storage.FlushRowsRequest, dict] = None, *, write_stream: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FlushRowsResponse: r"""Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush operation is required in order for the rows to become available for reading. A Flush operation flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in the request. Flush is not supported on the \_default stream, since it is not BUFFERED. .. code-block:: python from google.cloud import bigquery_storage_v1beta2 def sample_flush_rows(): # Create a client client = bigquery_storage_v1beta2.BigQueryWriteClient() # Initialize request argument(s) request = bigquery_storage_v1beta2.FlushRowsRequest( write_stream="write_stream_value", ) # Make the request response = client.flush_rows(request=request) # Handle the response print(response) Args: request (Union[google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest, dict]): The request object. Request message for `FlushRows`. write_stream (:class:`str`): Required. The stream that is the target of the flush operation. This corresponds to the ``write_stream`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery_storage_v1beta2.types.FlushRowsResponse: Respond message for FlushRows. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([write_stream]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = storage.FlushRowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if write_stream is not None: request.write_stream = write_stream # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.flush_rows, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("write_stream", request.write_stream),) ), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-bigquery-storage", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("BigQueryWriteAsyncClient",)
googleapis/python-bigquery-storage
google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py
Python
apache-2.0
35,099
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Testing. See the [Testing](https://tensorflow.org/api_docs/python/tf/test) guide. Note: `tf.compat.v1.test.mock` is an alias to the python `mock` or `unittest.mock` depending on the python version. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=g-bad-import-order from tensorflow.python.framework import test_util as _test_util from tensorflow.python.platform import googletest as _googletest # pylint: disable=unused-import from tensorflow.python.framework.test_util import assert_equal_graph_def from tensorflow.python.framework.test_util import create_local_cluster from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase from tensorflow.python.framework.test_util import gpu_device_name from tensorflow.python.framework.test_util import is_gpu_available from tensorflow.python.ops.gradient_checker import compute_gradient_error from tensorflow.python.ops.gradient_checker import compute_gradient # pylint: enable=unused-import,g-bad-import-order import sys from tensorflow.python.util.tf_export import tf_export if sys.version_info.major == 2: import mock # pylint: disable=g-import-not-at-top,unused-import else: from unittest import mock # pylint: disable=g-import-not-at-top,g-importing-member tf_export(v1=['test.mock'])(mock) # Import Benchmark class Benchmark = _googletest.Benchmark # pylint: disable=invalid-name # Import StubOutForTesting class StubOutForTesting = _googletest.StubOutForTesting # pylint: disable=invalid-name @tf_export('test.main') def main(argv=None): """Runs all unit tests.""" _test_util.InstallStackTraceHandler() return _googletest.main(argv) @tf_export(v1=['test.get_temp_dir']) def get_temp_dir(): """Returns a temporary directory for use during tests. There is no need to delete the directory after the test. Returns: The temporary directory. """ return _googletest.GetTempDir() @tf_export(v1=['test.test_src_dir_path']) def test_src_dir_path(relative_path): """Creates an absolute test srcdir path given a relative path. Args: relative_path: a path relative to tensorflow root. e.g. "core/platform". Returns: An absolute path to the linked in runfiles. """ return _googletest.test_src_dir_path(relative_path) @tf_export('test.is_built_with_cuda') def is_built_with_cuda(): """Returns whether TensorFlow was built with CUDA (GPU) support.""" return _test_util.IsGoogleCudaEnabled() @tf_export('test.is_built_with_rocm') def is_built_with_rocm(): """Returns whether TensorFlow was built with ROCm (GPU) support.""" return _test_util.IsBuiltWithROCm() @tf_export('test.is_built_with_gpu_support') def is_built_with_gpu_support(): """Returns whether TensorFlow was built with GPU (i.e. CUDA or ROCm) support.""" return is_built_with_cuda() or is_built_with_rocm() @tf_export('test.is_built_with_xla') def is_built_with_xla(): """Returns whether TensorFlow was built with XLA support.""" return _test_util.IsBuiltWithXLA()
jhseu/tensorflow
tensorflow/python/platform/test.py
Python
apache-2.0
3,771
#!/usr/bin/env python import click from copy import copy from netCDF4 import Dataset import numpy as np import numpy.ma as ma import os import rasterio import rasterio.warp as rwarp import time import osr from .. import geotools from .. import utils def earth_radius(): srs = osr.SpatialReference() srs.ImportFromEPSG(4326) return srs.GetSemiMajor() def init_nc(dst_ds, transform, lats, lons, years, variables): # Set attributes dst_ds.setncattr('Conventions', u'CF-1.5') dst_ds.setncattr('GDAL', u'GDAL 1.11.3, released 2015/09/16') # Create dimensions dst_ds.createDimension('time', None) dst_ds.createDimension('lat', len(lats)) dst_ds.createDimension('lon', len(lons)) # Create variables times = dst_ds.createVariable("time", "f8", ("time"), zlib=True, least_significant_digit=3) latitudes = dst_ds.createVariable("lat", "f4", ("lat"), zlib=True, least_significant_digit = 3) longitudes = dst_ds.createVariable("lon", "f4", ("lon"), zlib=True, least_significant_digit=3) crs = dst_ds.createVariable('crs', "S1", ()) # Add metadata dst_ds.history = "Created at " + time.ctime(time.time()) dst_ds.source = "gen-sps.py" latitudes.units = "degrees_north" latitudes.long_name = 'latitude' longitudes.units = "degrees_east" longitudes.long_name = "longitude" times.units = "years since 2010-01-01 00:00:00.0" times.calendar = "gregorian" times.standard_name = "time" times.axis = 'T' # Assign data to variables latitudes[:] = lats longitudes[:] = lons times[:] = years srs = osr.SpatialReference() srs.ImportFromWkt(geotools.WGS84_WKT) crs.grid_mapping_name = 'latitude_longitude' crs.spatial_ref = srs.ExportToWkt() crs.GeoTransform = ' '.join(map(str, transform)) crs.longitude_of_prime_meridian = srs.GetPrimeMeridian() crs.semi_major_axis = srs.GetSemiMajor() crs.inverse_flattening = srs.GetInvFlattening() out = {} for name, dtype, units, fill in variables: dst_data = dst_ds.createVariable(name, dtype, ("time", "lat","lon"), zlib = True, least_significant_digit = 4, fill_value = fill) dst_data.units = units dst_data.grid_mapping = 'crs' out[name] = dst_data return out def get_transform(r1, r2): # Get the geo transform using r1 resolution but r2 bounds dst = rasterio.open(r1) src = rasterio.open(r2) #src_bounds = np.around(src.bounds, decimals=3) affine, width, height = rwarp.calculate_default_transform(src.crs, dst.crs, src.width, src.height, *src.bounds, resolution=dst.res) ul = affine * (0.5, 0.5) lr = affine * (width - 0.5, height - 0.5) lats = np.linspace(ul[1], lr[1], height) lons = np.linspace(ul[0], lr[0], width) cratio = np.prod(dst.res) / np.prod(src.res) #cratio = 1.0 static = rasterio.open(utils.luh2_static('carea')) carea = static.read(1, window=static.window(*src.bounds)) rcs = (np.sin(np.radians(lats + dst.res[0] / 2.0)) - np.sin(np.radians(lats - dst.res[0] / 2.0))) * \ (dst.res[0] * np.pi/180) * earth_radius() ** 2 / 1e6 #carea *= rcs.reshape(carea.shape[0], 1) return affine, lats, lons, dst.res, cratio# / carea def mixing(year): if year % 10 == 0: return [year] y0 = year - (year % 10) return (y0, y0 + 10) def resample(ds, bidx, resolution, resampling, out): arr = ds.read(bidx, masked=True) nodata = ds.nodatavals[bidx - 1] if nodata is None: #"'nodata' must be set!" nodata = -9999 if ds.crs.data == {}: crs = ds.crs.from_string(u'epsg:4326') else: crs = ds.crs newaff, width, height = rwarp.calculate_default_transform(crs, crs, ds.width, ds.height, *ds.bounds, resolution=resolution) out.mask.fill(False) rwarp.reproject(arr, out, src_transform = ds.affine, dst_transform = newaff, width = width, height = height, src_nodata = nodata, dst_nodata = nodata, src_crs = crs, resampling = resampling) out.mask = np.where(out == nodata, 1, 0) def main(): years = range(2010, 2101) ssps = ['ssp%d' % i for i in range(1, 6)] variables = [(ssp, 'f4', 'ppl/km^2', -9999) for ssp in ssps] fname = '%s/luh2/un_codes-full.tif' % utils.outdir() affine, lats, lons, res, cfudge = get_transform(fname, utils.sps(ssps[0], 2010)) arr = (ma.empty((len(lats), len(lons)), fill_value=-9999), ma.empty((len(lats), len(lons)), fill_value=-9999)) oname = '%s/luh2/sps.nc' % utils.outdir() with Dataset(oname, 'w') as out: data = init_nc(out, affine.to_gdal(), lats, lons, years, variables) for ssp in ssps: print(ssp) with click.progressbar(enumerate(years), length=len(years)) as bar: for idx, year in bar: yy = mixing(year) files = map(lambda y: utils.sps(ssp, y), yy) rasters = map(rasterio.open, files) if len(rasters) == 1: resample(rasters[0], 1, res, rwarp.Resampling.average, arr[0]) data[ssp][idx, :, :] = np.clip(arr[0], 0, None) * cfudge else: f0 = (year % 10) / 10.0 resample(rasters[0], 1, res, rwarp.Resampling.average, arr[0]) resample(rasters[1], 1, res, rwarp.Resampling.average, arr[1]) data[ssp][idx, :, :] = ((1 - f0) * np.clip(arr[0], 0, None) + f0 * np.clip(arr[1], 0, None)) * cfudge if __name__ == '__main__': main()
ricardog/raster-project
projections/scripts/gen_sps.py
Python
apache-2.0
6,241
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<[email protected]> # http://binux.me # Created on 2014-02-07 17:05:11 import itertools import json import logging import os import time from collections import deque from six import iteritems, itervalues from six.moves import queue as Queue from pyspider.libs import counter, utils from pyspider.libs.base_handler import BaseHandler from .task_queue import TaskQueue logger = logging.getLogger('scheduler') class Project(object): ''' project for scheduler ''' def __init__(self, scheduler, project_info): ''' ''' self.scheduler = scheduler self.active_tasks = deque(maxlen=scheduler.ACTIVE_TASKS) self.task_queue = TaskQueue() self.task_loaded = False self._selected_tasks = False # selected tasks after recent pause self._send_finished_event_wait = 0 # wait for scheduler.FAIL_PAUSE_NUM loop steps before sending the event self.md5sum = None self._send_on_get_info = False self.waiting_get_info = True self._paused = False self._paused_time = 0 self._unpause_last_seen = None self.update(project_info) @property def paused(self): # unpaused --(last FAIL_PAUSE_NUM task failed)--> paused --(PAUSE_TIME)--> unpause_checking # unpaused <--(last UNPAUSE_CHECK_NUM task have success)--| # paused <--(last UNPAUSE_CHECK_NUM task no success)--| if not self._paused: fail_cnt = 0 for _, task in self.active_tasks: # ignore select task if task.get('type') == self.scheduler.TASK_PACK: continue if 'process' not in task['track']: logger.error('process not in task, %r', task) if task['track']['process']['ok']: break else: fail_cnt += 1 if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM: break if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM: self._paused = True self._paused_time = time.time() elif self._paused is True and (self._paused_time + self.scheduler.PAUSE_TIME < time.time()): self._paused = 'checking' self._unpause_last_seen = self.active_tasks[0][1] if len(self.active_tasks) else None elif self._paused == 'checking': cnt = 0 fail_cnt = 0 for _, task in self.active_tasks: if task is self._unpause_last_seen: break # ignore select task if task.get('type') == self.scheduler.TASK_PACK: continue cnt += 1 if task['track']['process']['ok']: # break with enough check cnt cnt = max(cnt, self.scheduler.UNPAUSE_CHECK_NUM) break else: fail_cnt += 1 if cnt >= self.scheduler.UNPAUSE_CHECK_NUM: if fail_cnt == cnt: self._paused = True self._paused_time = time.time() else: self._paused = False return self._paused is True def update(self, project_info): self.project_info = project_info self.name = project_info['name'] self.group = project_info['group'] self.db_status = project_info['status'] self.updatetime = project_info['updatetime'] md5sum = utils.md5string(project_info['script']) if (self.md5sum != md5sum or self.waiting_get_info) and self.active: self._send_on_get_info = True self.waiting_get_info = True self.md5sum = md5sum if self.active: self.task_queue.rate = project_info['rate'] self.task_queue.burst = project_info['burst'] else: self.task_queue.rate = 0 self.task_queue.burst = 0 logger.info('project %s updated, status:%s, paused:%s, %d tasks', self.name, self.db_status, self.paused, len(self.task_queue)) def on_get_info(self, info): self.waiting_get_info = False self.min_tick = info.get('min_tick', 0) self.retry_delay = info.get('retry_delay', {}) self.crawl_config = info.get('crawl_config', {}) @property def active(self): return self.db_status in ('RUNNING', 'DEBUG') class Scheduler(object): UPDATE_PROJECT_INTERVAL = 5 * 60 default_schedule = { 'priority': 0, 'retries': 3, 'exetime': 0, 'age': -1, 'itag': None, } LOOP_LIMIT = 1000 LOOP_INTERVAL = 0.1 ACTIVE_TASKS = 100 INQUEUE_LIMIT = 0 EXCEPTION_LIMIT = 3 DELETE_TIME = 24 * 60 * 60 DEFAULT_RETRY_DELAY = { 0: 30, 1: 1*60*60, 2: 6*60*60, 3: 12*60*60, '': 24*60*60 } FAIL_PAUSE_NUM = 10 PAUSE_TIME = 5*60 UNPAUSE_CHECK_NUM = 3 TASK_PACK = 1 STATUS_PACK = 2 # current not used REQUEST_PACK = 3 # current not used def __init__(self, taskdb, projectdb, newtask_queue, status_queue, out_queue, data_path='./data', resultdb=None): self.taskdb = taskdb self.projectdb = projectdb self.resultdb = resultdb self.newtask_queue = newtask_queue self.status_queue = status_queue self.out_queue = out_queue self.data_path = data_path self._send_buffer = deque() self._quit = False self._exceptions = 0 self.projects = dict() self._force_update_project = False self._last_update_project = 0 self._last_tick = int(time.time()) self._postpone_request = [] self._cnt = { "5m_time": counter.CounterManager( lambda: counter.TimebaseAverageEventCounter(30, 10)), "5m": counter.CounterManager( lambda: counter.TimebaseAverageWindowCounter(30, 10)), "1h": counter.CounterManager( lambda: counter.TimebaseAverageWindowCounter(60, 60)), "1d": counter.CounterManager( lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)), "all": counter.CounterManager( lambda: counter.TotalCounter()), } self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h')) self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d')) self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all')) self._last_dump_cnt = 0 def _update_projects(self): '''Check project update''' now = time.time() if ( not self._force_update_project and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now ): return for project in self.projectdb.check_update(self._last_update_project): self._update_project(project) logger.debug("project: %s updated.", project['name']) self._force_update_project = False self._last_update_project = now get_info_attributes = ['min_tick', 'retry_delay', 'crawl_config'] def _update_project(self, project): '''update one project''' if project['name'] not in self.projects: self.projects[project['name']] = Project(self, project) else: self.projects[project['name']].update(project) project = self.projects[project['name']] if project._send_on_get_info: # update project runtime info from processor by sending a _on_get_info # request, result is in status_page.track.save project._send_on_get_info = False self.on_select_task({ 'taskid': '_on_get_info', 'project': project.name, 'url': 'data:,_on_get_info', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': self.get_info_attributes, }, 'process': { 'callback': '_on_get_info', }, }) # load task queue when project is running and delete task_queue when project is stoped if project.active: if not project.task_loaded: self._load_tasks(project) project.task_loaded = True else: if project.task_loaded: project.task_queue = TaskQueue() project.task_loaded = False if project not in self._cnt['all']: self._update_project_cnt(project.name) scheduler_task_fields = ['taskid', 'project', 'schedule', ] def _load_tasks(self, project): '''load tasks from database''' task_queue = project.task_queue for task in self.taskdb.load_tasks( self.taskdb.ACTIVE, project.name, self.scheduler_task_fields ): taskid = task['taskid'] _schedule = task.get('schedule', self.default_schedule) priority = _schedule.get('priority', self.default_schedule['priority']) exetime = _schedule.get('exetime', self.default_schedule['exetime']) task_queue.put(taskid, priority, exetime) project.task_loaded = True logger.debug('project: %s loaded %d tasks.', project.name, len(task_queue)) if project not in self._cnt['all']: self._update_project_cnt(project) self._cnt['all'].value((project.name, 'pending'), len(project.task_queue)) def _update_project_cnt(self, project_name): status_count = self.taskdb.status_count(project_name) self._cnt['all'].value( (project_name, 'success'), status_count.get(self.taskdb.SUCCESS, 0) ) self._cnt['all'].value( (project_name, 'failed'), status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0) ) self._cnt['all'].value( (project_name, 'pending'), status_count.get(self.taskdb.ACTIVE, 0) ) def task_verify(self, task): ''' return False if any of 'taskid', 'project', 'url' is not in task dict or project in not in task_queue ''' for each in ('taskid', 'project', 'url', ): if each not in task or not task[each]: logger.error('%s not in task: %.200r', each, task) return False if task['project'] not in self.projects: logger.error('unknown project: %s', task['project']) return False project = self.projects[task['project']] if not project.active: logger.error('project %s not started, please set status to RUNNING or DEBUG', task['project']) return False return True def insert_task(self, task): '''insert task into database''' return self.taskdb.insert(task['project'], task['taskid'], task) def update_task(self, task): '''update task in database''' return self.taskdb.update(task['project'], task['taskid'], task) def put_task(self, task): '''put task to task queue''' _schedule = task.get('schedule', self.default_schedule) self.projects[task['project']].task_queue.put( task['taskid'], priority=_schedule.get('priority', self.default_schedule['priority']), exetime=_schedule.get('exetime', self.default_schedule['exetime']) ) def send_task(self, task, force=True): ''' dispatch task to fetcher out queue may have size limit to prevent block, a send_buffer is used ''' try: self.out_queue.put_nowait(task) except Queue.Full: if force: self._send_buffer.appendleft(task) else: raise def _check_task_done(self): '''Check status queue''' cnt = 0 try: while True: task = self.status_queue.get_nowait() # check _on_get_info result here if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task: if task['project'] not in self.projects: continue project = self.projects[task['project']] project.on_get_info(task['track'].get('save') or {}) logger.info( '%s on_get_info %r', task['project'], task['track'].get('save', {}) ) continue elif not self.task_verify(task): continue self.on_task_status(task) cnt += 1 except Queue.Empty: pass return cnt merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime'] def _check_request(self): '''Check new task queue''' # check _postpone_request first todo = [] for task in self._postpone_request: if task['project'] not in self.projects: continue if self.projects[task['project']].task_queue.is_processing(task['taskid']): todo.append(task) else: self.on_request(task) self._postpone_request = todo tasks = {} while len(tasks) < self.LOOP_LIMIT: try: task = self.newtask_queue.get_nowait() except Queue.Empty: break if isinstance(task, list): _tasks = task else: _tasks = (task, ) for task in _tasks: if not self.task_verify(task): continue if task['taskid'] in self.projects[task['project']].task_queue: if not task.get('schedule', {}).get('force_update', False): logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task) continue if task['taskid'] in tasks: if not task.get('schedule', {}).get('force_update', False): continue tasks[task['taskid']] = task for task in itervalues(tasks): self.on_request(task) return len(tasks) def _check_cronjob(self): """Check projects cronjob tick, return True when a new tick is sended""" now = time.time() self._last_tick = int(self._last_tick) if now - self._last_tick < 1: return False self._last_tick += 1 for project in itervalues(self.projects): if not project.active: continue if project.waiting_get_info: continue if project.min_tick == 0: continue if self._last_tick % int(project.min_tick) != 0: continue self.on_select_task({ 'taskid': '_on_cronjob', 'project': project.name, 'url': 'data:,_on_cronjob', 'status': self.taskdb.SUCCESS, 'fetch': { 'save': { 'tick': self._last_tick, }, }, 'process': { 'callback': '_on_cronjob', }, }) return True request_task_fields = [ 'taskid', 'project', 'url', 'status', 'schedule', 'fetch', 'process', 'track', 'lastcrawltime' ] def _check_select(self): '''Select task to fetch & process''' while self._send_buffer: _task = self._send_buffer.pop() try: # use force=False here to prevent automatic send_buffer append and get exception self.send_task(_task, False) except Queue.Full: self._send_buffer.append(_task) break if self.out_queue.full(): return {} taskids = [] cnt = 0 cnt_dict = dict() limit = self.LOOP_LIMIT for project in itervalues(self.projects): if not project.active: continue # only check project pause when select new tasks, cronjob and new request still working if project.paused: continue if project.waiting_get_info: continue if cnt >= limit: break # task queue task_queue = project.task_queue task_queue.check_update() project_cnt = 0 # check send_buffer here. when not empty, out_queue may blocked. Not sending tasks while cnt < limit and project_cnt < limit / 10: taskid = task_queue.get() if not taskid: break taskids.append((project.name, taskid)) if taskid != 'on_finished': project_cnt += 1 cnt += 1 cnt_dict[project.name] = project_cnt if project_cnt: project._selected_tasks = True project._send_finished_event_wait = 0 # check and send finished event to project if not project_cnt and len(task_queue) == 0 and project._selected_tasks: # wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed if project._send_finished_event_wait < self.FAIL_PAUSE_NUM: project._send_finished_event_wait += 1 else: project._selected_tasks = False project._send_finished_event_wait = 0 self.newtask_queue.put({ 'project': project.name, 'taskid': 'on_finished', 'url': 'data:,on_finished', 'process': { 'callback': 'on_finished', }, "schedule": { "age": 0, "priority": 9, "force_update": True, }, }) for project, taskid in taskids: self._load_put_task(project, taskid) return cnt_dict def _load_put_task(self, project, taskid): try: task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields) except ValueError: logger.error('bad task pack %s:%s', project, taskid) return if not task: return task = self.on_select_task(task) def _print_counter_log(self): # print top 5 active counters keywords = ('pending', 'success', 'retry', 'failed') total_cnt = {} project_actives = [] project_fails = [] for key in keywords: total_cnt[key] = 0 for project, subcounter in iteritems(self._cnt['5m']): actives = 0 for key in keywords: cnt = subcounter.get(key, None) if cnt: cnt = cnt.sum total_cnt[key] += cnt actives += cnt project_actives.append((actives, project)) fails = subcounter.get('failed', None) if fails: project_fails.append((fails.sum, project)) top_2_fails = sorted(project_fails, reverse=True)[:2] top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails], reverse=True)[:5 - len(top_2_fails)] log_str = ("in 5m: new:%(pending)d,success:%(success)d," "retry:%(retry)d,failed:%(failed)d" % total_cnt) for _, project in itertools.chain(top_3_actives, top_2_fails): subcounter = self._cnt['5m'][project].to_dict(get_value='sum') log_str += " %s:%d,%d,%d,%d" % (project, subcounter.get('pending', 0), subcounter.get('success', 0), subcounter.get('retry', 0), subcounter.get('failed', 0)) logger.info(log_str) def _dump_cnt(self): '''Dump counters to file''' self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h')) self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d')) self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all')) def _try_dump_cnt(self): '''Dump counters every 60 seconds''' now = time.time() if now - self._last_dump_cnt > 60: self._last_dump_cnt = now self._dump_cnt() self._print_counter_log() def _check_delete(self): '''Check project delete''' now = time.time() for project in list(itervalues(self.projects)): if project.db_status != 'STOP': continue if now - project.updatetime < self.DELETE_TIME: continue if 'delete' not in self.projectdb.split_group(project.group): continue logger.warning("deleting project: %s!", project.name) del self.projects[project.name] self.taskdb.drop(project.name) self.projectdb.drop(project.name) if self.resultdb: self.resultdb.drop(project.name) for each in self._cnt.values(): del each[project.name] def __len__(self): return sum(len(x.task_queue) for x in itervalues(self.projects)) def quit(self): '''Set quit signal''' self._quit = True # stop xmlrpc server if hasattr(self, 'xmlrpc_server'): self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop) self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop) def run_once(self): '''comsume queues and feed tasks to fetcher, once''' self._update_projects() self._check_task_done() self._check_request() while self._check_cronjob(): pass self._check_select() self._check_delete() self._try_dump_cnt() def run(self): '''Start scheduler loop''' logger.info("scheduler starting...") while not self._quit: try: time.sleep(self.LOOP_INTERVAL) self.run_once() self._exceptions = 0 except KeyboardInterrupt: break except Exception as e: logger.exception(e) self._exceptions += 1 if self._exceptions > self.EXCEPTION_LIMIT: break continue logger.info("scheduler exiting...") self._dump_cnt() def trigger_on_start(self, project): '''trigger an on_start callback of project''' self.newtask_queue.put({ "project": project, "taskid": "on_start", "url": "data:,on_start", "process": { "callback": "on_start", }, }) def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False): '''Start xmlrpc interface''' from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication application = WSGIXMLRPCApplication() application.register_function(self.quit, '_quit') application.register_function(self.__len__, 'size') def dump_counter(_time, _type): try: return self._cnt[_time].to_dict(_type) except: logger.exception('') application.register_function(dump_counter, 'counter') def new_task(task): if self.task_verify(task): self.newtask_queue.put(task) return True return False application.register_function(new_task, 'newtask') def send_task(task): '''dispatch task to fetcher''' self.send_task(task) return True application.register_function(send_task, 'send_task') def update_project(): self._force_update_project = True application.register_function(update_project, 'update_project') def get_active_tasks(project=None, limit=100): allowed_keys = set(( 'type', 'taskid', 'project', 'status', 'url', 'lastcrawltime', 'updatetime', 'track', )) track_allowed_keys = set(( 'ok', 'time', 'follows', 'status_code', )) iters = [iter(x.active_tasks) for k, x in iteritems(self.projects) if x and (k == project if project else True)] tasks = [next(x, None) for x in iters] result = [] while len(result) < limit and tasks and not all(x is None for x in tasks): updatetime, task = t = max(t for t in tasks if t) i = tasks.index(t) tasks[i] = next(iters[i], None) for key in list(task): if key == 'track': for k in list(task[key].get('fetch', [])): if k not in track_allowed_keys: del task[key]['fetch'][k] for k in list(task[key].get('process', [])): if k not in track_allowed_keys: del task[key]['process'][k] if key in allowed_keys: continue del task[key] result.append(t) # fix for "<type 'exceptions.TypeError'>:dictionary key must be string" # have no idea why return json.loads(json.dumps(result)) application.register_function(get_active_tasks, 'get_active_tasks') def get_projects_pause_status(): result = {} for project_name, project in iteritems(self.projects): result[project_name] = project.paused return result application.register_function(get_projects_pause_status, 'get_projects_pause_status') def webui_update(): return { 'pause_status': get_projects_pause_status(), 'counter': { '5m_time': dump_counter('5m_time', 'avg'), '5m': dump_counter('5m', 'sum'), '1h': dump_counter('1h', 'sum'), '1d': dump_counter('1d', 'sum'), 'all': dump_counter('all', 'sum'), }, } application.register_function(webui_update, 'webui_update') import tornado.wsgi import tornado.ioloop import tornado.httpserver container = tornado.wsgi.WSGIContainer(application) self.xmlrpc_ioloop = tornado.ioloop.IOLoop() self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop) self.xmlrpc_server.listen(port=port, address=bind) logger.info('scheduler.xmlrpc listening on %s:%s', bind, port) self.xmlrpc_ioloop.start() def on_request(self, task): if self.INQUEUE_LIMIT and len(self.projects[task['project']].task_queue) >= self.INQUEUE_LIMIT: logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task) return oldtask = self.taskdb.get_task(task['project'], task['taskid'], fields=self.merge_task_fields) if oldtask: return self.on_old_request(task, oldtask) else: return self.on_new_request(task) def on_new_request(self, task): '''Called when a new request is arrived''' task['status'] = self.taskdb.ACTIVE self.insert_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) self._cnt['all'].event((project, 'pending'), +1) logger.info('new task %(project)s:%(taskid)s %(url)s', task) return task def on_old_request(self, task, old_task): '''Called when a crawled task is arrived''' now = time.time() _schedule = task.get('schedule', self.default_schedule) old_schedule = old_task.get('schedule', {}) if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']): # when a task is in processing, the modify may conflict with the running task. # postpone the modify after task finished. logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task) self._postpone_request.append(task) return restart = False schedule_age = _schedule.get('age', self.default_schedule['age']) if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'): restart = True elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now: restart = True elif _schedule.get('force_update'): restart = True if not restart: logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task) return if _schedule.get('cancel'): logger.info('cancel task %(project)s:%(taskid)s %(url)s', task) task['status'] = self.taskdb.BAD self.update_task(task) self.projects[task['project']].task_queue.delete(task['taskid']) return task task['status'] = self.taskdb.ACTIVE self.update_task(task) self.put_task(task) project = task['project'] if old_task['status'] != self.taskdb.ACTIVE: self._cnt['5m'].event((project, 'pending'), +1) self._cnt['1h'].event((project, 'pending'), +1) self._cnt['1d'].event((project, 'pending'), +1) if old_task['status'] == self.taskdb.SUCCESS: self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1) elif old_task['status'] == self.taskdb.FAILED: self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1) logger.info('restart task %(project)s:%(taskid)s %(url)s', task) return task def on_task_status(self, task): '''Called when a status pack is arrived''' try: procesok = task['track']['process']['ok'] if not self.projects[task['project']].task_queue.done(task['taskid']): logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task) return None except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret def on_task_done(self, task): '''Called when a task is done and success, called by `on_task_status`''' task['status'] = self.taskdb.SUCCESS task['lastcrawltime'] = time.time() if 'schedule' in task: if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: task['status'] = self.taskdb.ACTIVE next_exetime = task['schedule'].get('age') task['schedule']['exetime'] = time.time() + next_exetime self.put_task(task) else: del task['schedule'] self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'success'), +1) self._cnt['1h'].event((project, 'success'), +1) self._cnt['1d'].event((project, 'success'), +1) self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1) logger.info('task done %(project)s:%(taskid)s %(url)s', task) return task def on_task_failed(self, task): '''Called when a task is failed, called by `on_task_status`''' if 'schedule' not in task: old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule']) if old_task is None: logging.error('unknown status pack: %s' % task) return task['schedule'] = old_task.get('schedule', {}) retries = task['schedule'].get('retries', self.default_schedule['retries']) retried = task['schedule'].get('retried', 0) project_info = self.projects[task['project']] retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY[''])) if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']: next_exetime = min(next_exetime, task['schedule'].get('age')) else: if retried >= retries: next_exetime = -1 elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'): next_exetime = task['schedule'].get('age') if next_exetime < 0: task['status'] = self.taskdb.FAILED task['lastcrawltime'] = time.time() self.update_task(task) project = task['project'] self._cnt['5m'].event((project, 'failed'), +1) self._cnt['1h'].event((project, 'failed'), +1) self._cnt['1d'].event((project, 'failed'), +1) self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1) logger.info('task failed %(project)s:%(taskid)s %(url)s' % task) return task else: task['schedule']['retried'] = retried + 1 task['schedule']['exetime'] = time.time() + next_exetime task['lastcrawltime'] = time.time() self.update_task(task) self.put_task(task) project = task['project'] self._cnt['5m'].event((project, 'retry'), +1) self._cnt['1h'].event((project, 'retry'), +1) self._cnt['1d'].event((project, 'retry'), +1) # self._cnt['all'].event((project, 'retry'), +1) logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % ( retried, retries), task) return task def on_select_task(self, task): '''Called when a task is selected to fetch & process''' # inject informations about project logger.info('select %(project)s:%(taskid)s %(url)s', task) project_info = self.projects.get(task['project']) assert project_info, 'no such project' task['type'] = self.TASK_PACK task['group'] = project_info.group task['project_md5sum'] = project_info.md5sum task['project_updatetime'] = project_info.updatetime # lazy join project.crawl_config if getattr(project_info, 'crawl_config', None): task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config) project_info.active_tasks.appendleft((time.time(), task)) self.send_task(task) return task from tornado import gen class OneScheduler(Scheduler): """ Scheduler Mixin class for one mode overwirted send_task method call processor.on_task(fetcher.fetch(task)) instead of consuming queue """ def _check_select(self): """ interactive mode of select tasks """ if not self.interactive: return super(OneScheduler, self)._check_select() # waiting for running tasks if self.running_task > 0: return is_crawled = [] def run(project=None): return crawl('on_start', project=project) def crawl(url, project=None, **kwargs): """ Crawl given url, same parameters as BaseHandler.crawl url - url or taskid, parameters will be used if in taskdb project - can be ignored if only one project exists. """ # looking up the project instance if project is None: if len(self.projects) == 1: project = list(self.projects.keys())[0] else: raise LookupError('You need specify the project: %r' % list(self.projects.keys())) project_data = self.processor.project_manager.get(project) if not project_data: raise LookupError('no such project: %s' % project) # get task package instance = project_data['instance'] instance._reset() task = instance.crawl(url, **kwargs) if isinstance(task, list): raise Exception('url list is not allowed in interactive mode') # check task in taskdb if not kwargs: dbtask = self.taskdb.get_task(task['project'], task['taskid'], fields=self.request_task_fields) if not dbtask: dbtask = self.taskdb.get_task(task['project'], task['url'], fields=self.request_task_fields) if dbtask: task = dbtask # select the task self.on_select_task(task) is_crawled.append(True) shell.ask_exit() def quit_interactive(): '''Quit interactive mode''' is_crawled.append(True) self.interactive = False shell.ask_exit() def quit_pyspider(): '''Close pyspider''' is_crawled[:] = [] shell.ask_exit() shell = utils.get_python_console() banner = ( 'pyspider shell - Select task\n' 'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n' 'quit_interactive() - Quit interactive mode\n' 'quit_pyspider() - Close pyspider' ) if hasattr(shell, 'show_banner'): shell.show_banner(banner) shell.interact() else: shell.interact(banner) if not is_crawled: self.ioloop.add_callback(self.ioloop.stop) def __getattr__(self, name): """patch for crawl(url, callback=self.index_page) API""" if self.interactive: return name raise AttributeError(name) def on_task_status(self, task): """Ignore not processing error in interactive mode""" if not self.interactive: super(OneScheduler, self).on_task_status(task) try: procesok = task['track']['process']['ok'] except KeyError as e: logger.error("Bad status pack: %s", e) return None if procesok: ret = self.on_task_done(task) else: ret = self.on_task_failed(task) if task['track']['fetch'].get('time'): self._cnt['5m_time'].event((task['project'], 'fetch_time'), task['track']['fetch']['time']) if task['track']['process'].get('time'): self._cnt['5m_time'].event((task['project'], 'process_time'), task['track']['process'].get('time')) self.projects[task['project']].active_tasks.appendleft((time.time(), task)) return ret def init_one(self, ioloop, fetcher, processor, result_worker=None, interactive=False): self.ioloop = ioloop self.fetcher = fetcher self.processor = processor self.result_worker = result_worker self.interactive = interactive self.running_task = 0 @gen.coroutine def do_task(self, task): self.running_task += 1 result = yield gen.Task(self.fetcher.fetch, task) type, task, response = result.args self.processor.on_task(task, response) # do with message while not self.processor.inqueue.empty(): _task, _response = self.processor.inqueue.get() self.processor.on_task(_task, _response) # do with results while not self.processor.result_queue.empty(): _task, _result = self.processor.result_queue.get() if self.result_worker: self.result_worker.on_result(_task, _result) self.running_task -= 1 def send_task(self, task, force=True): if self.fetcher.http_client.free_size() <= 0: if force: self._send_buffer.appendleft(task) else: raise self.outqueue.Full self.ioloop.add_future(self.do_task(task), lambda x: x.result()) def run(self): import tornado.ioloop tornado.ioloop.PeriodicCallback(self.run_once, 100, io_loop=self.ioloop).start() self.ioloop.start() def quit(self): self.ioloop.stop() logger.info("scheduler exiting...") import random import threading from pyspider.database.sqlite.sqlitebase import SQLiteMixin class ThreadBaseScheduler(Scheduler): def __init__(self, threads=4, *args, **kwargs): self.local = threading.local() super(ThreadBaseScheduler, self).__init__(*args, **kwargs) if isinstance(self.taskdb, SQLiteMixin): self.threads = 1 else: self.threads = threads self._taskdb = self.taskdb self._projectdb = self.projectdb self._resultdb = self.resultdb self.thread_objs = [] self.thread_queues = [] self._start_threads() assert len(self.thread_queues) > 0 @property def taskdb(self): if not hasattr(self.local, 'taskdb'): self.taskdb = self._taskdb.copy() return self.local.taskdb @taskdb.setter def taskdb(self, taskdb): self.local.taskdb = taskdb @property def projectdb(self): if not hasattr(self.local, 'projectdb'): self.projectdb = self._projectdb.copy() return self.local.projectdb @projectdb.setter def projectdb(self, projectdb): self.local.projectdb = projectdb @property def resultdb(self): if not hasattr(self.local, 'resultdb'): self.resultdb = self._resultdb.copy() return self.local.resultdb @resultdb.setter def resultdb(self, resultdb): self.local.resultdb = resultdb def _start_threads(self): for i in range(self.threads): queue = Queue.Queue() thread = threading.Thread(target=self._thread_worker, args=(queue, )) thread.daemon = True thread.start() self.thread_objs.append(thread) self.thread_queues.append(queue) def _thread_worker(self, queue): while True: method, args, kwargs = queue.get() try: method(*args, **kwargs) except Exception as e: logger.exception(e) def _run_in_thread(self, method, *args, **kwargs): i = kwargs.pop('_i', None) block = kwargs.pop('_block', False) if i is None: while True: for queue in self.thread_queues: if queue.empty(): break else: if block: time.sleep(0.1) continue else: queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)] break else: queue = self.thread_queues[i % len(self.thread_queues)] queue.put((method, args, kwargs)) if block: self._wait_thread() def _wait_thread(self): while True: if all(queue.empty() for queue in self.thread_queues): break time.sleep(0.1) def _update_project(self, project): self._run_in_thread(Scheduler._update_project, self, project) def on_task_status(self, task): i = hash(task['taskid']) self._run_in_thread(Scheduler.on_task_status, self, task, _i=i) def on_request(self, task): i = hash(task['taskid']) self._run_in_thread(Scheduler.on_request, self, task, _i=i) def _load_put_task(self, project, taskid): i = hash(taskid) self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i) def run_once(self): super(ThreadBaseScheduler, self).run_once() self._wait_thread()
nicozhang/pyspider
pyspider/scheduler/scheduler.py
Python
apache-2.0
46,109
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for building a disk image, launching it via the underlying virtualization driver, responding to calls to check its state, attaching persistent storage, and terminating it. """ import base64 import contextlib import functools import socket import sys import time import traceback import uuid from cinderclient import exceptions as cinder_exception import eventlet.event from eventlet import greenthread import eventlet.timeout from oslo.config import cfg from oslo import messaging import six from nova import block_device from nova.cells import rpcapi as cells_rpcapi from nova.cloudpipe import pipelib from nova import compute from nova.compute import flavors from nova.compute import power_state from nova.compute import resource_tracker from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import conductor from nova import consoleauth import nova.context from nova import exception from nova import hooks from nova.i18n import _ from nova.i18n import _LE from nova.i18n import _LI from nova.i18n import _LW from nova import image from nova.image import glance from nova import manager from nova import network from nova.network import model as network_model from nova.network.security_group import openstack_driver from nova import objects from nova.objects import base as obj_base from nova.objects import instance as instance_obj from nova.objects import quotas as quotas_obj from nova.openstack.common import excutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import periodic_task from nova.openstack.common import strutils from nova.openstack.common import timeutils from nova import paths from nova import rpc from nova import safe_utils from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import driver from nova.virt import event as virtevent from nova.virt import storage_users from nova.virt import virtapi from nova import volume from nova.volume import encryptors compute_opts = [ cfg.StrOpt('console_host', default=socket.gethostname(), help='Console proxy host to use to connect ' 'to instances on this host.'), cfg.StrOpt('default_access_ip_network_name', help='Name of network to use to set access IPs for instances'), cfg.BoolOpt('defer_iptables_apply', default=False, help='Whether to batch up the application of IPTables rules' ' during a host restart and apply all at the end of the' ' init phase'), cfg.StrOpt('instances_path', default=paths.state_path_def('instances'), help='Where instances are stored on disk'), cfg.BoolOpt('instance_usage_audit', default=False, help="Generate periodic compute.instance.exists" " notifications"), cfg.IntOpt('live_migration_retry_count', default=30, help="Number of 1 second retries needed in live_migration"), cfg.BoolOpt('resume_guests_state_on_host_boot', default=False, help='Whether to start guests that were running before the ' 'host rebooted'), cfg.IntOpt('network_allocate_retries', default=0, help="Number of times to retry network allocation on failures"), cfg.IntOpt('block_device_allocate_retries', default=60, help='Number of times to retry block device' ' allocation on failures') ] interval_opts = [ cfg.IntOpt('bandwidth_poll_interval', default=600, help='Interval to pull network bandwidth usage info. Not ' 'supported on all hypervisors. Set to -1 to disable. ' 'Setting this to 0 will disable, but this will change in ' 'the K release to mean "run at the default rate".'), # TODO(gilliard): Clean the above message after the K release cfg.IntOpt('sync_power_state_interval', default=600, help='Interval to sync power states between the database and ' 'the hypervisor. Set to -1 to disable. ' 'Setting this to 0 will disable, but this will change in ' 'Juno to mean "run at the default rate".'), # TODO(gilliard): Clean the above message after the K release cfg.IntOpt("heal_instance_info_cache_interval", default=60, help="Number of seconds between instance info_cache self " "healing updates"), cfg.IntOpt('reclaim_instance_interval', default=0, help='Interval in seconds for reclaiming deleted instances'), cfg.IntOpt('volume_usage_poll_interval', default=0, help='Interval in seconds for gathering volume usages'), cfg.IntOpt('shelved_poll_interval', default=3600, help='Interval in seconds for polling shelved instances to ' 'offload. Set to -1 to disable.' 'Setting this to 0 will disable, but this will change in ' 'Juno to mean "run at the default rate".'), # TODO(gilliard): Clean the above message after the K release cfg.IntOpt('shelved_offload_time', default=0, help='Time in seconds before a shelved instance is eligible ' 'for removing from a host. -1 never offload, 0 offload ' 'when shelved'), cfg.IntOpt('instance_delete_interval', default=300, help=('Interval in seconds for retrying failed instance file ' 'deletes')), cfg.IntOpt('block_device_allocate_retries_interval', default=3, help='Waiting time interval (seconds) between block' ' device allocation retries on failures') ] timeout_opts = [ cfg.IntOpt("reboot_timeout", default=0, help="Automatically hard reboot an instance if it has been " "stuck in a rebooting state longer than N seconds. " "Set to 0 to disable."), cfg.IntOpt("instance_build_timeout", default=0, help="Amount of time in seconds an instance can be in BUILD " "before going into ERROR status." "Set to 0 to disable."), cfg.IntOpt("rescue_timeout", default=0, help="Automatically unrescue an instance after N seconds. " "Set to 0 to disable."), cfg.IntOpt("resize_confirm_window", default=0, help="Automatically confirm resizes after N seconds. " "Set to 0 to disable."), ] running_deleted_opts = [ cfg.StrOpt("running_deleted_instance_action", default="reap", help="Action to take if a running deleted instance is detected." "Valid options are 'noop', 'log', 'shutdown', or 'reap'. " "Set to 'noop' to take no action."), cfg.IntOpt("running_deleted_instance_poll_interval", default=1800, help="Number of seconds to wait between runs of the cleanup " "task."), cfg.IntOpt("running_deleted_instance_timeout", default=0, help="Number of seconds after being deleted when a running " "instance should be considered eligible for cleanup."), ] instance_cleaning_opts = [ cfg.IntOpt('maximum_instance_delete_attempts', default=5, help=('The number of times to attempt to reap an instance\'s ' 'files.')), ] CONF = cfg.CONF CONF.register_opts(compute_opts) CONF.register_opts(interval_opts) CONF.register_opts(timeout_opts) CONF.register_opts(running_deleted_opts) CONF.register_opts(instance_cleaning_opts) CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api') CONF.import_opt('console_topic', 'nova.console.rpcapi') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('vnc_enabled', 'nova.vnc') CONF.import_opt('enabled', 'nova.spice', group='spice') CONF.import_opt('enable', 'nova.cells.opts', group='cells') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache') CONF.import_opt('enabled', 'nova.rdp', group='rdp') CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp') LOG = logging.getLogger(__name__) get_notifier = functools.partial(rpc.get_notifier, service='compute') wrap_exception = functools.partial(exception.wrap_exception, get_notifier=get_notifier) @utils.expects_func_args('migration') def errors_out_migration(function): """Decorator to error out migration on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except Exception: with excutils.save_and_reraise_exception(): migration = kwargs['migration'] status = migration.status if status not in ['migrating', 'post-migrating']: return migration.status = 'error' try: migration.save(context.elevated()) except Exception: LOG.debug('Error setting migration status ' 'for instance %s.', migration.instance_uuid, exc_info=True) return decorated_function @utils.expects_func_args('instance') def reverts_task_state(function): """Decorator to revert task_state on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.UnexpectedTaskStateError as e: # Note(maoy): unexpected task state means the current # task is preempted. Do not clear task state in this # case. with excutils.save_and_reraise_exception(): LOG.info(_("Task possibly preempted: %s") % e.format_message()) except Exception: with excutils.save_and_reraise_exception(): try: self._instance_update(context, kwargs['instance']['uuid'], task_state=None) except Exception: pass return decorated_function @utils.expects_func_args('instance') def wrap_instance_fault(function): """Wraps a method to catch exceptions related to instances. This decorator wraps a method to catch any exceptions having to do with an instance that may get thrown. It then logs an instance fault in the db. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.InstanceNotFound: raise except Exception as e: # NOTE(gtt): If argument 'instance' is in args rather than kwargs, # we will get a KeyError exception which will cover up the real # exception. So, we update kwargs with the values from args first. # then, we can get 'instance' from kwargs easily. kwargs.update(dict(zip(function.func_code.co_varnames[2:], args))) with excutils.save_and_reraise_exception(): compute_utils.add_instance_fault_from_exc(context, kwargs['instance'], e, sys.exc_info()) return decorated_function @utils.expects_func_args('instance') def wrap_instance_event(function): """Wraps a method to log the event taken on the instance, and result. This decorator wraps a method to log the start and result of an event, as part of an action taken on an instance. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): wrapped_func = utils.get_wrapped_function(function) keyed_args = safe_utils.getcallargs(wrapped_func, context, *args, **kwargs) instance_uuid = keyed_args['instance']['uuid'] event_name = 'compute_{0}'.format(function.func_name) with compute_utils.EventReporter(context, event_name, instance_uuid): return function(self, context, *args, **kwargs) return decorated_function @utils.expects_func_args('image_id', 'instance') def delete_image_on_error(function): """Used for snapshot related method to ensure the image created in compute.api is deleted when an error occurs. """ @functools.wraps(function) def decorated_function(self, context, image_id, instance, *args, **kwargs): try: return function(self, context, image_id, instance, *args, **kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.debug("Cleaning up image %s", image_id, exc_info=True, instance=instance) try: self.image_api.delete(context, image_id) except Exception: LOG.exception(_LE("Error while trying to clean up " "image %s"), image_id, instance=instance) return decorated_function # TODO(danms): Remove me after Icehouse # NOTE(mikal): if the method being decorated has more than one decorator, then # put this one first. Otherwise the various exception handling decorators do # not function correctly. def object_compat(function): """Wraps a method that expects a new-world instance This provides compatibility for callers passing old-style dict instances. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): def _load_instance(instance_or_dict): if isinstance(instance_or_dict, dict): instance = objects.Instance._from_db_object( context, objects.Instance(), instance_or_dict, expected_attrs=metas) instance._context = context return instance return instance_or_dict metas = ['metadata', 'system_metadata'] try: kwargs['instance'] = _load_instance(kwargs['instance']) except KeyError: args = (_load_instance(args[0]),) + args[1:] migration = kwargs.get('migration') if isinstance(migration, dict): migration = objects.Migration._from_db_object( context.elevated(), objects.Migration(), migration) kwargs['migration'] = migration return function(self, context, *args, **kwargs) return decorated_function # TODO(danms): Remove me after Icehouse def aggregate_object_compat(function): """Wraps a method that expects a new-world aggregate.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): aggregate = kwargs.get('aggregate') if isinstance(aggregate, dict): aggregate = objects.Aggregate._from_db_object( context.elevated(), objects.Aggregate(), aggregate) kwargs['aggregate'] = aggregate return function(self, context, *args, **kwargs) return decorated_function class InstanceEvents(object): def __init__(self): self._events = {} @staticmethod def _lock_name(instance): return '%s-%s' % (instance.uuid, 'events') def prepare_for_instance_event(self, instance, event_name): """Prepare to receive an event for an instance. This will register an event for the given instance that we will wait on later. This should be called before initiating whatever action will trigger the event. The resulting eventlet.event.Event object should be wait()'d on to ensure completion. :param instance: the instance for which the event will be generated :param event_name: the name of the event we're expecting :returns: an event object that should be wait()'d on """ @utils.synchronized(self._lock_name(instance)) def _create_or_get_event(): if instance.uuid not in self._events: self._events.setdefault(instance.uuid, {}) return self._events[instance.uuid].setdefault( event_name, eventlet.event.Event()) LOG.debug('Preparing to wait for external event %(event)s', {'event': event_name}, instance=instance) return _create_or_get_event() def pop_instance_event(self, instance, event): """Remove a pending event from the wait list. This will remove a pending event from the wait list so that it can be used to signal the waiters to wake up. :param instance: the instance for which the event was generated :param event: the nova.objects.external_event.InstanceExternalEvent that describes the event :returns: the eventlet.event.Event object on which the waiters are blocked """ @utils.synchronized(self._lock_name(instance)) def _pop_event(): events = self._events.get(instance.uuid) if not events: return None _event = events.pop(event.key, None) if not events: del self._events[instance.uuid] return _event return _pop_event() def clear_events_for_instance(self, instance): """Remove all pending events for an instance. This will remove all events currently pending for an instance and return them (indexed by event name). :param instance: the instance for which events should be purged :returns: a dictionary of {event_name: eventlet.event.Event} """ @utils.synchronized(self._lock_name(instance)) def _clear_events(): # NOTE(danms): Use getitem syntax for the instance until # all the callers are using objects return self._events.pop(instance['uuid'], {}) return _clear_events() class ComputeVirtAPI(virtapi.VirtAPI): def __init__(self, compute): super(ComputeVirtAPI, self).__init__() self._compute = compute def provider_fw_rule_get_all(self, context): return self._compute.conductor_api.provider_fw_rule_get_all(context) def _default_error_callback(self, event_name, instance): raise exception.NovaException(_('Instance event failed')) @contextlib.contextmanager def wait_for_instance_event(self, instance, event_names, deadline=300, error_callback=None): """Plan to wait for some events, run some code, then wait. This context manager will first create plans to wait for the provided event_names, yield, and then wait for all the scheduled events to complete. Note that this uses an eventlet.timeout.Timeout to bound the operation, so callers should be prepared to catch that failure and handle that situation appropriately. If the event is not received by the specified timeout deadline, eventlet.timeout.Timeout is raised. If the event is received but did not have a 'completed' status, a NovaException is raised. If an error_callback is provided, instead of raising an exception as detailed above for the failure case, the callback will be called with the event_name and instance, and can return True to continue waiting for the rest of the events, False to stop processing, or raise an exception which will bubble up to the waiter. :param instance: The instance for which an event is expected :param event_names: A list of event names. Each element can be a string event name or tuple of strings to indicate (name, tag). :param deadline: Maximum number of seconds we should wait for all of the specified events to arrive. :param error_callback: A function to be called if an event arrives """ if error_callback is None: error_callback = self._default_error_callback events = {} for event_name in event_names: if isinstance(event_name, tuple): name, tag = event_name event_name = objects.InstanceExternalEvent.make_key( name, tag) events[event_name] = ( self._compute.instance_events.prepare_for_instance_event( instance, event_name)) yield with eventlet.timeout.Timeout(deadline): for event_name, event in events.items(): actual_event = event.wait() if actual_event.status == 'completed': continue decision = error_callback(event_name, instance) if decision is False: break class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" target = messaging.Target(version='3.32') def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" self.virtapi = ComputeVirtAPI(self) self.network_api = network.API() self.volume_api = volume.API() self.image_api = image.API() self._last_host_check = 0 self._last_bw_usage_poll = 0 self._bw_usage_supported = True self._last_bw_usage_cell_update = 0 self.compute_api = compute.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.conductor_api = conductor.API() self.compute_task_api = conductor.ComputeTaskAPI() self.is_neutron_security_groups = ( openstack_driver.is_neutron_security_groups()) self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self._resource_tracker_dict = {} self.instance_events = InstanceEvents() super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) # NOTE(russellb) Load the driver last. It may call back into the # compute manager via the virtapi, so we want it to be fully # initialized before that happens. self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.use_legacy_block_device_info = \ self.driver.need_legacy_block_device_info def _get_resource_tracker(self, nodename): rt = self._resource_tracker_dict.get(nodename) if not rt: if not self.driver.node_is_available(nodename): raise exception.NovaException( _("%s is not a valid node managed by this " "compute host.") % nodename) rt = resource_tracker.ResourceTracker(self.host, self.driver, nodename) self._resource_tracker_dict[nodename] = rt return rt def _update_resource_tracker(self, context, instance): """Let the resource tracker know that an instance has changed state.""" if (instance['host'] == self.host and self.driver.node_is_available(instance['node'])): rt = self._get_resource_tracker(instance.get('node')) rt.update_usage(context, instance) def _instance_update(self, context, instance_uuid, **kwargs): """Update an instance in the database using kwargs as value.""" instance_ref = self.conductor_api.instance_update(context, instance_uuid, **kwargs) self._update_resource_tracker(context, instance_ref) return instance_ref def _set_instance_error_state(self, context, instance): instance_uuid = instance['uuid'] try: self._instance_update(context, instance_uuid, vm_state=vm_states.ERROR) except exception.InstanceNotFound: LOG.debug('Instance has been destroyed from under us while ' 'trying to set it to ERROR', instance_uuid=instance_uuid) def _set_instance_obj_error_state(self, context, instance): try: instance.vm_state = vm_states.ERROR instance.save() except exception.InstanceNotFound: LOG.debug('Instance has been destroyed from under us while ' 'trying to set it to ERROR', instance=instance) def _get_instances_on_driver(self, context, filters=None): """Return a list of instance records for the instances found on the hypervisor which satisfy the specified filters. If filters=None return a list of instance records for all the instances found on the hypervisor. """ if not filters: filters = {} try: driver_uuids = self.driver.list_instance_uuids() filters['uuid'] = driver_uuids local_instances = objects.InstanceList.get_by_filters( context, filters, use_slave=True) return local_instances except NotImplementedError: pass # The driver doesn't support uuids listing, so we'll have # to brute force. driver_instances = self.driver.list_instances() instances = objects.InstanceList.get_by_filters(context, filters, use_slave=True) name_map = dict((instance.name, instance) for instance in instances) local_instances = [] for driver_instance in driver_instances: instance = name_map.get(driver_instance) if not instance: continue local_instances.append(instance) return local_instances def _destroy_evacuated_instances(self, context): """Destroys evacuated instances. While nova-compute was down, the instances running on it could be evacuated to another host. Check that the instances reported by the driver are still associated with this host. If they are not, destroy them, with the exception of instances which are in the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH task state or RESIZED vm state. """ our_host = self.host filters = {'deleted': False} local_instances = self._get_instances_on_driver(context, filters) for instance in local_instances: if instance.host != our_host: if (instance.task_state in [task_states.MIGRATING, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH] or instance.vm_state in [vm_states.RESIZED]): LOG.debug('Will not delete instance as its host (' '%(instance_host)s) is not equal to our ' 'host (%(our_host)s) but its task state is ' '(%(task_state)s) and vm state is ' '(%(vm_state)s)', {'instance_host': instance.host, 'our_host': our_host, 'task_state': instance.task_state, 'vm_state': instance.vm_state}, instance=instance) continue LOG.info(_('Deleting instance as its host (' '%(instance_host)s) is not equal to our ' 'host (%(our_host)s).'), {'instance_host': instance.host, 'our_host': our_host}, instance=instance) try: network_info = self._get_instance_nw_info(context, instance) bdi = self._get_instance_block_device_info(context, instance) destroy_disks = not (self._is_instance_storage_shared( context, instance)) except exception.InstanceNotFound: network_info = network_model.NetworkInfo() bdi = {} LOG.info(_('Instance has been marked deleted already, ' 'removing it from the hypervisor.'), instance=instance) # always destroy disks if the instance was deleted destroy_disks = True self.driver.destroy(context, instance, network_info, bdi, destroy_disks) def _is_instance_storage_shared(self, context, instance): shared_storage = True data = None try: data = self.driver.check_instance_shared_storage_local(context, instance) if data: shared_storage = (self.compute_rpcapi. check_instance_shared_storage(context, instance, data)) except NotImplementedError: LOG.warning(_('Hypervisor driver does not support ' 'instance shared storage check, ' 'assuming it\'s not on shared storage'), instance=instance) shared_storage = False except Exception: LOG.exception(_LE('Failed to check if instance shared'), instance=instance) finally: if data: self.driver.check_instance_shared_storage_cleanup(context, data) return shared_storage def _complete_partial_deletion(self, context, instance): """Complete deletion for instances in DELETED status but not marked as deleted in the DB """ instance.destroy() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) quotas = objects.Quotas(context) project_id, user_id = quotas_obj.ids_from_instance(context, instance) quotas.reserve(context, project_id=project_id, user_id=user_id, instances=-1, cores=-instance.vcpus, ram=-instance.memory_mb) self._complete_deletion(context, instance, bdms, quotas, instance.system_metadata) def _complete_deletion(self, context, instance, bdms, quotas, system_meta): if quotas: quotas.commit() # ensure block device mappings are not leaked for bdm in bdms: bdm.destroy() self._notify_about_instance_usage(context, instance, "delete.end", system_metadata=system_meta) if CONF.vnc_enabled or CONF.spice.enabled: if CONF.cells.enable: self.cells_rpcapi.consoleauth_delete_tokens(context, instance.uuid) else: self.consoleauth_rpcapi.delete_tokens_for_instance(context, instance.uuid) def _init_instance(self, context, instance): '''Initialize this instance during service init.''' # Instances that are shut down, or in an error state can not be # initialized and are not attempted to be recovered. The exception # to this are instances that are in RESIZE_MIGRATING or DELETING, # which are dealt with further down. if (instance.vm_state == vm_states.SOFT_DELETED or (instance.vm_state == vm_states.ERROR and instance.task_state not in (task_states.RESIZE_MIGRATING, task_states.DELETING))): LOG.debug("Instance is in %s state.", instance.vm_state, instance=instance) return if instance.vm_state == vm_states.DELETED: try: self._complete_partial_deletion(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) return if (instance.vm_state == vm_states.BUILDING or instance.task_state in [task_states.SCHEDULING, task_states.BLOCK_DEVICE_MAPPING, task_states.NETWORKING, task_states.SPAWNING]): # NOTE(dave-mcnally) compute stopped before instance was fully # spawned so set to ERROR state. This is safe to do as the state # may be set by the api but the host is not so if we get here the # instance has already been scheduled to this particular host. LOG.debug("Instance failed to spawn correctly, " "setting to ERROR state", instance=instance) instance.task_state = None instance.vm_state = vm_states.ERROR instance.save() return if (instance.vm_state != vm_states.ERROR and instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING, task_states.IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING, task_states.IMAGE_SNAPSHOT]): LOG.debug("Instance in transitional state %s at start-up " "clearing task state", instance['task_state'], instance=instance) instance.task_state = None instance.save() if instance.task_state == task_states.DELETING: try: LOG.info(_('Service started deleting the instance during ' 'the previous run, but did not finish. Restarting ' 'the deletion now.'), instance=instance) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # FIXME(comstud): This needs fixed. We should be creating # reservations and updating quotas, because quotas # wouldn't have been updated for this instance since it is # still in DELETING. See bug 1296414. # # Create a dummy quota object for now. quotas = objects.Quotas.from_reservations( context, None, instance=instance) self._delete_instance(context, instance, bdms, quotas) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) self._set_instance_error_state(context, instance) return try_reboot, reboot_type = self._retry_reboot(context, instance) current_power_state = self._get_power_state(context, instance) if try_reboot: LOG.debug("Instance in transitional state (%(task_state)s) at " "start-up and power state is (%(power_state)s), " "triggering reboot", {'task_state': instance['task_state'], 'power_state': current_power_state}, instance=instance) self.compute_rpcapi.reboot_instance(context, instance, block_device_info=None, reboot_type=reboot_type) return elif (current_power_state == power_state.RUNNING and instance.task_state in [task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD]): LOG.warning(_("Instance in transitional state " "(%(task_state)s) at start-up and power state " "is (%(power_state)s), clearing task state"), {'task_state': instance['task_state'], 'power_state': current_power_state}, instance=instance) instance.task_state = None instance.vm_state = vm_states.ACTIVE instance.save() if instance.task_state == task_states.POWERING_OFF: try: LOG.debug("Instance in transitional state %s at start-up " "retrying stop request", instance['task_state'], instance=instance) self.stop_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to stop instance') LOG.exception(msg, instance=instance) return if instance.task_state == task_states.POWERING_ON: try: LOG.debug("Instance in transitional state %s at start-up " "retrying start request", instance['task_state'], instance=instance) self.start_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to start instance') LOG.exception(msg, instance=instance) return net_info = compute_utils.get_nw_info_for_instance(instance) try: self.driver.plug_vifs(instance, net_info) except NotImplementedError as e: LOG.debug(e, instance=instance) if instance.task_state == task_states.RESIZE_MIGRATING: # We crashed during resize/migration, so roll back for safety try: # NOTE(mriedem): check old_vm_state for STOPPED here, if it's # not in system_metadata we default to True for backwards # compatibility power_on = (instance.system_metadata.get('old_vm_state') != vm_states.STOPPED) block_dev_info = self._get_instance_block_device_info(context, instance) self.driver.finish_revert_migration(context, instance, net_info, block_dev_info, power_on) except Exception as e: LOG.exception(_LE('Failed to revert crashed migration'), instance=instance) finally: LOG.info(_('Instance found in migrating state during ' 'startup. Resetting task_state'), instance=instance) instance.task_state = None instance.save() db_state = instance.power_state drv_state = self._get_power_state(context, instance) expect_running = (db_state == power_state.RUNNING and drv_state != db_state) LOG.debug('Current state is %(drv_state)s, state in DB is ' '%(db_state)s.', {'drv_state': drv_state, 'db_state': db_state}, instance=instance) if expect_running and CONF.resume_guests_state_on_host_boot: LOG.info(_('Rebooting instance after nova-compute restart.'), instance=instance) block_device_info = \ self._get_instance_block_device_info(context, instance) try: self.driver.resume_state_on_host_boot( context, instance, net_info, block_device_info) except NotImplementedError: LOG.warning(_('Hypervisor driver does not support ' 'resume guests'), instance=instance) except Exception: # NOTE(vish): The instance failed to resume, so we set the # instance to error and attempt to continue. LOG.warning(_('Failed to resume instance'), instance=instance) self._set_instance_error_state(context, instance) elif drv_state == power_state.RUNNING: # VMwareAPI drivers will raise an exception try: self.driver.ensure_filtering_rules_for_instance( instance, net_info) except NotImplementedError: LOG.warning(_('Hypervisor driver does not support ' 'firewall rules'), instance=instance) def _retry_reboot(self, context, instance): current_power_state = self._get_power_state(context, instance) current_task_state = instance.task_state retry_reboot = False reboot_type = compute_utils.get_reboot_type(current_task_state, current_power_state) pending_soft = (current_task_state == task_states.REBOOT_PENDING and instance.vm_state in vm_states.ALLOW_SOFT_REBOOT) pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD and instance.vm_state in vm_states.ALLOW_HARD_REBOOT) started_not_running = (current_task_state in [task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD] and current_power_state != power_state.RUNNING) if pending_soft or pending_hard or started_not_running: retry_reboot = True return retry_reboot, reboot_type def handle_lifecycle_event(self, event): LOG.info(_("VM %(state)s (Lifecycle Event)") % {'state': event.get_name()}, instance_uuid=event.get_instance_uuid()) context = nova.context.get_admin_context(read_deleted='yes') instance = objects.Instance.get_by_uuid(context, event.get_instance_uuid()) vm_power_state = None if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED: vm_power_state = power_state.SHUTDOWN elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED: vm_power_state = power_state.RUNNING elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED: vm_power_state = power_state.PAUSED elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED: vm_power_state = power_state.RUNNING else: LOG.warning(_("Unexpected power state %d") % event.get_transition()) if vm_power_state is not None: self._sync_instance_power_state(context, instance, vm_power_state) def handle_events(self, event): if isinstance(event, virtevent.LifecycleEvent): try: self.handle_lifecycle_event(event) except exception.InstanceNotFound: LOG.debug("Event %s arrived for non-existent instance. The " "instance was probably deleted.", event) else: LOG.debug("Ignoring event %s", event) def init_virt_events(self): self.driver.register_event_listener(self.handle_events) def init_host(self): """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) context = nova.context.get_admin_context() instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=['info_cache']) if CONF.defer_iptables_apply: self.driver.filter_defer_apply_on() self.init_virt_events() try: # checking that instance was not already evacuated to other host self._destroy_evacuated_instances(context) for instance in instances: self._init_instance(context, instance) finally: if CONF.defer_iptables_apply: self.driver.filter_defer_apply_off() def cleanup_host(self): self.driver.cleanup_host(host=self.host) def pre_start_hook(self): """After the service is initialized, but before we fully bring the service up by listening on RPC queues, make sure to update our available resources (and indirectly our available nodes). """ self.update_available_resource(nova.context.get_admin_context()) def _get_power_state(self, context, instance): """Retrieve the power state for the given instance.""" LOG.debug('Checking state', instance=instance) try: return self.driver.get_info(instance)["state"] except exception.NotFound: return power_state.NOSTATE def get_console_topic(self, context): """Retrieves the console host for a project on this host. Currently this is just set in the flags for each compute host. """ # TODO(mdragon): perhaps make this variable by console_type? return '%s.%s' % (CONF.console_topic, CONF.console_host) def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) @wrap_exception() def refresh_security_group_rules(self, context, security_group_id): """Tell the virtualization driver to refresh security group rules. Passes straight through to the virtualization driver. """ return self.driver.refresh_security_group_rules(security_group_id) @wrap_exception() def refresh_security_group_members(self, context, security_group_id): """Tell the virtualization driver to refresh security group members. Passes straight through to the virtualization driver. """ return self.driver.refresh_security_group_members(security_group_id) @wrap_exception() def refresh_instance_security_rules(self, context, instance): """Tell the virtualization driver to refresh security rules for an instance. Passes straight through to the virtualization driver. Synchronise the call because we may still be in the middle of creating the instance. """ @utils.synchronized(instance['uuid']) def _sync_refresh(): try: return self.driver.refresh_instance_security_rules(instance) except NotImplementedError: LOG.warning(_('Hypervisor driver does not support ' 'security groups.'), instance=instance) return _sync_refresh() @wrap_exception() def refresh_provider_fw_rules(self, context): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() def _get_instance_nw_info(self, context, instance, use_slave=False): """Get a list of dictionaries of network data of an instance.""" if (not hasattr(instance, 'system_metadata') or len(instance['system_metadata']) == 0): # NOTE(danms): Several places in the code look up instances without # pulling system_metadata for performance, and call this function. # If we get an instance without it, re-fetch so that the call # to network_api (which requires it for instance_type) will # succeed. instance = objects.Instance.get_by_uuid(context, instance['uuid'], use_slave=use_slave) network_info = self.network_api.get_instance_nw_info(context, instance) return network_info def _await_block_device_map_created(self, context, vol_id): # TODO(yamahata): creating volume simultaneously # reduces creation time? # TODO(yamahata): eliminate dumb polling attempts = 0 start = time.time() while attempts < CONF.block_device_allocate_retries: volume = self.volume_api.get(context, vol_id) volume_status = volume['status'] if volume_status not in ['creating', 'downloading']: if volume_status != 'available': LOG.warn(_("Volume id: %s finished being created but was" " not set as 'available'"), vol_id) return attempts + 1 greenthread.sleep(CONF.block_device_allocate_retries_interval) attempts += 1 # NOTE(harlowja): Should only happen if we ran out of attempts raise exception.VolumeNotCreated(volume_id=vol_id, seconds=int(time.time() - start), attempts=attempts) def _decode_files(self, injected_files): """Base64 decode the list of files to inject.""" if not injected_files: return [] def _decode(f): path, contents = f try: decoded = base64.b64decode(contents) return path, decoded except TypeError: raise exception.Base64Exception(path=path) return [_decode(f) for f in injected_files] def _run_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, legacy_bdm_in_spec): """Launch a new instance with specified options.""" extra_usage_info = {} def notify(status, msg="", fault=None, **kwargs): """Send a create.{start,error,end} notification.""" type_ = "create.%(status)s" % dict(status=status) info = extra_usage_info.copy() info['message'] = msg self._notify_about_instance_usage(context, instance, type_, extra_usage_info=info, fault=fault, **kwargs) try: self._prebuild_instance(context, instance) if request_spec and request_spec.get('image'): image_meta = request_spec['image'] else: image_meta = {} extra_usage_info = {"image_name": image_meta.get('name', '')} notify("start") # notify that build is starting instance, network_info = self._build_instance(context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, image_meta, legacy_bdm_in_spec) notify("end", msg=_("Success"), network_info=network_info) except exception.RescheduledException as e: # Instance build encountered an error, and has been rescheduled. notify("error", fault=e) except exception.BuildAbortException as e: # Instance build aborted due to a non-failure LOG.info(e) notify("end", msg=e.format_message()) # notify that build is done except Exception as e: # Instance build encountered a non-recoverable error: with excutils.save_and_reraise_exception(): self._set_instance_error_state(context, instance) notify("error", fault=e) # notify that build failed def _prebuild_instance(self, context, instance): self._check_instance_exists(context, instance) try: self._start_building(context, instance) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): msg = _("Instance disappeared before we could start it") # Quickly bail out of here raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) def _validate_instance_group_policy(self, context, instance, filter_properties): # NOTE(russellb) Instance group policy is enforced by the scheduler. # However, there is a race condition with the enforcement of # anti-affinity. Since more than one instance may be scheduled at the # same time, it's possible that more than one instance with an # anti-affinity policy may end up here. This is a validation step to # make sure that starting the instance here doesn't violate the policy. scheduler_hints = filter_properties.get('scheduler_hints') or {} group_hint = scheduler_hints.get('group') if not group_hint: return @utils.synchronized(group_hint) def _do_validation(context, instance, group_hint): group = objects.InstanceGroup.get_by_hint(context, group_hint) if 'anti-affinity' not in group.policies: return group_hosts = group.get_hosts(context, exclude=[instance.uuid]) if self.host in group_hosts: msg = _("Anti-affinity instance group policy was violated.") raise exception.RescheduledException( instance_uuid=instance.uuid, reason=msg) _do_validation(context, instance, group_hint) def _build_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, image_meta, legacy_bdm_in_spec): original_context = context context = context.elevated() # If neutron security groups pass requested security # groups to allocate_for_instance() if request_spec and self.is_neutron_security_groups: security_groups = request_spec.get('security_group') else: security_groups = [] if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug("No node specified, defaulting to %s", node) network_info = None bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance['uuid']) # b64 decode the files to inject: injected_files_orig = injected_files injected_files = self._decode_files(injected_files) rt = self._get_resource_tracker(node) try: limits = filter_properties.get('limits', {}) with rt.instance_claim(context, instance, limits): # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where # the host is set on the instance. self._validate_instance_group_policy(context, instance, filter_properties) macs = self.driver.macs_for_instance(instance) dhcp_options = self.driver.dhcp_options_for_instance(instance) network_info = self._allocate_network(original_context, instance, requested_networks, macs, security_groups, dhcp_options) self._instance_update( context, instance['uuid'], vm_state=vm_states.BUILDING, task_state=task_states.BLOCK_DEVICE_MAPPING) # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. self._default_block_device_names(context, instance, image_meta, bdms) block_device_info = self._prep_block_device( context, instance, bdms) set_access_ip = (is_first_time and not instance['access_ip_v4'] and not instance['access_ip_v6']) instance = self._spawn(context, instance, image_meta, network_info, block_device_info, injected_files, admin_password, set_access_ip=set_access_ip) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): # the instance got deleted during the spawn # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) try: self._deallocate_network(context, instance) except Exception: msg = _LE('Failed to dealloc network ' 'for deleted instance') LOG.exception(msg, instance=instance) raise exception.BuildAbortException( instance_uuid=instance['uuid'], reason=_("Instance disappeared during build")) except (exception.UnexpectedTaskStateError, exception.VirtualInterfaceCreateException) as e: # Don't try to reschedule, just log and reraise. with excutils.save_and_reraise_exception(): LOG.debug(e.format_message(), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) except exception.InvalidBDM: with excutils.save_and_reraise_exception(): if network_info is not None: network_info.wait(do_raise=False) try: self._deallocate_network(context, instance) except Exception: msg = _LE('Failed to dealloc network ' 'for failed instance') LOG.exception(msg, instance=instance) except Exception: exc_info = sys.exc_info() # try to re-schedule instance: # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) rescheduled = self._reschedule_or_error(context, instance, exc_info, requested_networks, admin_password, injected_files_orig, is_first_time, request_spec, filter_properties, bdms, legacy_bdm_in_spec) if rescheduled: # log the original build error self._log_original_error(exc_info, instance['uuid']) raise exception.RescheduledException( instance_uuid=instance['uuid'], reason=unicode(exc_info[1])) else: # not re-scheduling, go to error: raise exc_info[0], exc_info[1], exc_info[2] # spawn success return instance, network_info def _log_original_error(self, exc_info, instance_uuid): LOG.error(_('Error: %s') % exc_info[1], instance_uuid=instance_uuid, exc_info=exc_info) def _reschedule_or_error(self, context, instance, exc_info, requested_networks, admin_password, injected_files, is_first_time, request_spec, filter_properties, bdms=None, legacy_bdm_in_spec=True): """Try to re-schedule the build or re-raise the original build error to error out the instance. """ instance_uuid = instance['uuid'] rescheduled = False compute_utils.add_instance_fault_from_exc(context, instance, exc_info[1], exc_info=exc_info) self._notify_about_instance_usage(context, instance, 'instance.create.error', fault=exc_info[1]) try: LOG.debug("Clean up resource before rescheduling.", instance=instance) if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._shutdown_instance(context, instance, bdms, requested_networks) self._cleanup_volumes(context, instance['uuid'], bdms) except Exception: # do not attempt retry if clean up failed: with excutils.save_and_reraise_exception(): self._log_original_error(exc_info, instance_uuid) try: method_args = (request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec) task_state = task_states.SCHEDULING rescheduled = self._reschedule(context, request_spec, filter_properties, instance, self.scheduler_rpcapi.run_instance, method_args, task_state, exc_info) except Exception: rescheduled = False LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) return rescheduled def _reschedule(self, context, request_spec, filter_properties, instance, reschedule_method, method_args, task_state, exc_info=None): """Attempt to re-schedule a compute operation.""" instance_uuid = instance['uuid'] retry = filter_properties.get('retry', None) if not retry: # no retry information, do not reschedule. LOG.debug("Retry info not present, will not reschedule", instance_uuid=instance_uuid) return if not request_spec: LOG.debug("No request spec, will not reschedule", instance_uuid=instance_uuid) return request_spec['instance_uuids'] = [instance_uuid] LOG.debug("Re-scheduling %(method)s: attempt %(num)d", {'method': reschedule_method.func_name, 'num': retry['num_attempts']}, instance_uuid=instance_uuid) # reset the task state: self._instance_update(context, instance_uuid, task_state=task_state) if exc_info: # stringify to avoid circular ref problem in json serialization: retry['exc'] = traceback.format_exception_only(exc_info[0], exc_info[1]) reschedule_method(context, *method_args) return True @periodic_task.periodic_task def _check_instance_build_time(self, context): """Ensure that instances are not stuck in build.""" timeout = CONF.instance_build_timeout if timeout == 0: return filters = {'vm_state': vm_states.BUILDING, 'host': self.host} building_insts = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) for instance in building_insts: if timeutils.is_older_than(instance['created_at'], timeout): self._set_instance_error_state(context, instance) LOG.warn(_("Instance build timed out. Set to error state."), instance=instance) def _check_instance_exists(self, context, instance): """Ensure an instance with the same name is not already present.""" if self.driver.instance_exists(instance): raise exception.InstanceExists(name=instance.name) def _start_building(self, context, instance): """Save the host and launched_on fields and log appropriately.""" LOG.audit(_('Starting instance...'), context=context, instance=instance) self._instance_update(context, instance.uuid, vm_state=vm_states.BUILDING, task_state=None, expected_task_state=(task_states.SCHEDULING, None)) def _allocate_network_async(self, context, instance, requested_networks, macs, security_groups, is_vpn, dhcp_options): """Method used to allocate networks in the background. Broken out for testing. """ LOG.debug("Allocating IP information in the background.", instance=instance) retries = CONF.network_allocate_retries if retries < 0: LOG.warn(_("Treating negative config value (%(retries)s) for " "'network_allocate_retries' as 0."), {'retries': retries}) attempts = retries > 1 and retries + 1 or 1 retry_time = 1 for attempt in range(1, attempts + 1): try: nwinfo = self.network_api.allocate_for_instance( context, instance, vpn=is_vpn, requested_networks=requested_networks, macs=macs, security_groups=security_groups, dhcp_options=dhcp_options) LOG.debug('Instance network_info: |%s|', nwinfo, instance=instance) sys_meta = instance.system_metadata sys_meta['network_allocated'] = 'True' self._instance_update(context, instance.uuid, system_metadata=sys_meta) return nwinfo except Exception: exc_info = sys.exc_info() log_info = {'attempt': attempt, 'attempts': attempts} if attempt == attempts: LOG.exception(_LE('Instance failed network setup ' 'after %(attempts)d attempt(s)'), log_info) raise exc_info[0], exc_info[1], exc_info[2] LOG.warn(_('Instance failed network setup ' '(attempt %(attempt)d of %(attempts)d)'), log_info, instance=instance) time.sleep(retry_time) retry_time *= 2 if retry_time > 30: retry_time = 30 # Not reached. def _build_networks_for_instance(self, context, instance, requested_networks, security_groups): # If we're here from a reschedule the network may already be allocated. if strutils.bool_from_string( instance.system_metadata.get('network_allocated', 'False')): return self._get_instance_nw_info(context, instance) if not self.is_neutron_security_groups: security_groups = [] macs = self.driver.macs_for_instance(instance) dhcp_options = self.driver.dhcp_options_for_instance(instance) network_info = self._allocate_network(context, instance, requested_networks, macs, security_groups, dhcp_options) if not instance.access_ip_v4 and not instance.access_ip_v6: # If CONF.default_access_ip_network_name is set, grab the # corresponding network and set the access ip values accordingly. # Note that when there are multiple ips to choose from, an # arbitrary one will be chosen. network_name = CONF.default_access_ip_network_name if not network_name: return network_info for vif in network_info: if vif['network']['label'] == network_name: for ip in vif.fixed_ips(): if ip['version'] == 4: instance.access_ip_v4 = ip['address'] if ip['version'] == 6: instance.access_ip_v6 = ip['address'] instance.save() break return network_info def _allocate_network(self, context, instance, requested_networks, macs, security_groups, dhcp_options): """Start network allocation asynchronously. Return an instance of NetworkInfoAsyncWrapper that can be used to retrieve the allocated networks when the operation has finished. """ # NOTE(comstud): Since we're allocating networks asynchronously, # this task state has little meaning, as we won't be in this # state for very long. instance.vm_state = vm_states.BUILDING instance.task_state = task_states.NETWORKING instance.save(expected_task_state=[None]) self._update_resource_tracker(context, instance) is_vpn = pipelib.is_vpn_image(instance.image_ref) return network_model.NetworkInfoAsyncWrapper( self._allocate_network_async, context, instance, requested_networks, macs, security_groups, is_vpn, dhcp_options) def _default_root_device_name(self, instance, image_meta, root_bdm): try: return self.driver.default_root_device_name(instance, image_meta, root_bdm) except NotImplementedError: return compute_utils.get_next_device_name(instance, []) def _default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): try: self.driver.default_device_names_for_instance(instance, root_device_name, *block_device_lists) except NotImplementedError: compute_utils.default_device_names_for_instance( instance, root_device_name, *block_device_lists) def _default_block_device_names(self, context, instance, image_meta, block_devices): """Verify that all the devices have the device_name set. If not, provide a default name. It also ensures that there is a root_device_name and is set to the first block device in the boot sequence (boot_index=0). """ root_bdm = block_device.get_root_bdm(block_devices) if not root_bdm: return # Get the root_device_name from the root BDM or the instance root_device_name = None update_instance = False update_root_bdm = False if root_bdm.device_name: root_device_name = root_bdm.device_name instance['root_device_name'] = root_device_name update_instance = True elif instance['root_device_name']: root_device_name = instance['root_device_name'] root_bdm.device_name = root_device_name update_root_bdm = True else: root_device_name = self._default_root_device_name(instance, image_meta, root_bdm) instance['root_device_name'] = root_device_name root_bdm.device_name = root_device_name update_instance = update_root_bdm = True if update_instance: self._instance_update(context, instance['uuid'], root_device_name=root_device_name) if update_root_bdm: root_bdm.save() def _is_mapping(bdm): return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') and bdm.destination_type == 'volume' and driver_block_device.is_implemented(bdm)) ephemerals = filter(block_device.new_format_is_ephemeral, block_devices) swap = filter(block_device.new_format_is_swap, block_devices) block_device_mapping = filter(_is_mapping, block_devices) self._default_device_names_for_instance(instance, root_device_name, ephemerals, swap, block_device_mapping) def _prep_block_device(self, context, instance, bdms, do_check_attach=True): """Set up the block device for an instance with error logging.""" try: block_device_info = { 'root_device_name': instance['root_device_name'], 'swap': driver_block_device.convert_swap(bdms), 'ephemerals': driver_block_device.convert_ephemerals(bdms), 'block_device_mapping': ( driver_block_device.attach_block_devices( driver_block_device.convert_volumes(bdms), context, instance, self.volume_api, self.driver, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_snapshots(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_images(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_blanks(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach)) } if self.use_legacy_block_device_info: for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'): block_device_info[bdm_type] = \ driver_block_device.legacy_block_devices( block_device_info[bdm_type]) # Get swap out of the list block_device_info['swap'] = driver_block_device.get_swap( block_device_info['swap']) return block_device_info except exception.OverQuota: msg = ('Failed to create block device for instance due to being ' 'over volume resource quota') LOG.debug(msg, instance=instance) raise exception.InvalidBDM() except Exception: LOG.exception(_LE('Instance failed block device setup'), instance=instance) raise exception.InvalidBDM() @object_compat def _spawn(self, context, instance, image_meta, network_info, block_device_info, injected_files, admin_password, set_access_ip=False): """Spawn an instance with error logging and update its power state.""" instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING) try: self.driver.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Instance failed to spawn'), instance=instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() def _set_access_ip_values(): """Add access ip values for a given instance. If CONF.default_access_ip_network_name is set, this method will grab the corresponding network and set the access ip values accordingly. Note that when there are multiple ips to choose from, an arbitrary one will be chosen. """ network_name = CONF.default_access_ip_network_name if not network_name: return for vif in network_info: if vif['network']['label'] == network_name: for ip in vif.fixed_ips(): if ip['version'] == 4: instance.access_ip_v4 = ip['address'] if ip['version'] == 6: instance.access_ip_v6 = ip['address'] return if set_access_ip: _set_access_ip_values() network_info.wait(do_raise=True) instance.info_cache.network_info = network_info instance.save(expected_task_state=task_states.SPAWNING) return instance def _notify_about_instance_usage(self, context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, fault=None): compute_utils.notify_about_instance_usage( self.notifier, context, instance, event_suffix, network_info=network_info, system_metadata=system_metadata, extra_usage_info=extra_usage_info, fault=fault) def _deallocate_network(self, context, instance, requested_networks=None): LOG.debug('Deallocating network for instance', instance=instance) self.network_api.deallocate_for_instance( context, instance, requested_networks=requested_networks) def _get_instance_block_device_info(self, context, instance, refresh_conn_info=False, bdms=None): """Transform block devices to the driver block_device format.""" if not bdms: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance['uuid']) swap = driver_block_device.convert_swap(bdms) ephemerals = driver_block_device.convert_ephemerals(bdms) block_device_mapping = ( driver_block_device.convert_volumes(bdms) + driver_block_device.convert_snapshots(bdms) + driver_block_device.convert_images(bdms)) if not refresh_conn_info: # if the block_device_mapping has no value in connection_info # (returned as None), don't include in the mapping block_device_mapping = [ bdm for bdm in block_device_mapping if bdm.get('connection_info')] else: block_device_mapping = driver_block_device.refresh_conn_infos( block_device_mapping, context, instance, self.volume_api, self.driver) if self.use_legacy_block_device_info: swap = driver_block_device.legacy_block_devices(swap) ephemerals = driver_block_device.legacy_block_devices(ephemerals) block_device_mapping = driver_block_device.legacy_block_devices( block_device_mapping) # Get swap out of the list swap = driver_block_device.get_swap(swap) return {'swap': swap, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} # NOTE(mikal): No object_compat wrapper on this method because its # callers all pass objects already @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): @utils.synchronized(instance.uuid) def do_build_and_run_instance(context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node=None, limits=None): try: LOG.audit(_('Starting instance...'), context=context, instance=instance) instance.vm_state = vm_states.BUILDING instance.task_state = None instance.save(expected_task_state= (task_states.SCHEDULING, None)) except exception.InstanceNotFound: msg = 'Instance disappeared before build.' LOG.debug(msg, instance=instance) return except exception.UnexpectedTaskStateError as e: LOG.debug(e.format_message(), instance=instance) return # b64 decode the files to inject: decoded_files = self._decode_files(injected_files) if limits is None: limits = {} if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug('No node specified, defaulting to %s', node, instance=instance) try: self._build_and_run_instance(context, instance, image, decoded_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties) except exception.RescheduledException as e: LOG.debug(e.format_message(), instance=instance) retry = filter_properties.get('retry', None) if not retry: # no retry information, do not reschedule. LOG.debug("Retry info not present, will not reschedule", instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._set_instance_error_state(context, instance.uuid) return retry['exc'] = traceback.format_exception(*sys.exc_info()) # NOTE(comstud): Deallocate networks if the driver wants # us to do so. if self.driver.deallocate_networks_on_reschedule(instance): self._cleanup_allocated_networks(context, instance, requested_networks) instance.task_state = task_states.SCHEDULING instance.save() self.compute_task_api.build_instances(context, [instance], image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): msg = 'Instance disappeared during build.' LOG.debug(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) except exception.BuildAbortException as e: LOG.exception(e.format_message(), instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._cleanup_volumes(context, instance.uuid, block_device_mapping, raise_exc=False) self._set_instance_error_state(context, instance) except Exception: # Should not reach here. msg = _LE('Unexpected build failure, not rescheduling build.') LOG.exception(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._cleanup_volumes(context, instance.uuid, block_device_mapping, raise_exc=False) self._set_instance_error_state(context, instance) do_build_and_run_instance(context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node, limits) def _build_and_run_instance(self, context, instance, image, injected_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties): image_name = image.get('name') self._notify_about_instance_usage(context, instance, 'create.start', extra_usage_info={'image_name': image_name}) try: rt = self._get_resource_tracker(node) with rt.instance_claim(context, instance, limits): # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where # the host is set on the instance. self._validate_instance_group_policy(context, instance, filter_properties) with self._build_resources(context, instance, requested_networks, security_groups, image, block_device_mapping) as resources: instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING instance.save(expected_task_state= task_states.BLOCK_DEVICE_MAPPING) block_device_info = resources['block_device_info'] network_info = resources['network_info'] self.driver.spawn(context, instance, image, injected_files, admin_password, network_info=network_info, block_device_info=block_device_info) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): self._notify_about_instance_usage(context, instance, 'create.end', fault=e) except exception.ComputeResourcesUnavailable as e: LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=e.format_message()) except exception.BuildAbortException as e: with excutils.save_and_reraise_exception(): LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) except (exception.FixedIpLimitExceeded, exception.NoMoreNetworks) as e: LOG.warn(_LW('No more network or fixed IP to be allocated'), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) msg = _('Failed to allocate the network(s) with error %s, ' 'not rescheduling.') % e.format_message() raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.VirtualInterfaceCreateException, exception.VirtualInterfaceMacAddressException) as e: LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.ImageNotActive, exception.ImageUnacceptable) as e: self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception as e: self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=str(e)) # NOTE(alaski): This is only useful during reschedules, remove it now. instance.system_metadata.pop('network_allocated', None) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.SPAWNING) self._notify_about_instance_usage(context, instance, 'create.end', extra_usage_info={'message': _('Success')}, network_info=network_info) @contextlib.contextmanager def _build_resources(self, context, instance, requested_networks, security_groups, image, block_device_mapping): resources = {} try: network_info = self._build_networks_for_instance(context, instance, requested_networks, security_groups) resources['network_info'] = network_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): raise except exception.UnexpectedTaskStateError as e: raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: # Because this allocation is async any failures are likely to occur # when the driver accesses network_info during spawn(). LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) try: # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. self._default_block_device_names(context, instance, image, block_device_mapping) instance.vm_state = vm_states.BUILDING instance.task_state = task_states.BLOCK_DEVICE_MAPPING instance.save() block_device_info = self._prep_block_device(context, instance, block_device_mapping) resources['block_device_info'] = block_device_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): raise except exception.UnexpectedTaskStateError as e: raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: LOG.exception(_LE('Failure prepping block device'), instance=instance) msg = _('Failure prepping block device.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) try: yield resources except Exception as exc: with excutils.save_and_reraise_exception() as ctxt: if not isinstance(exc, (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError)): LOG.exception(_LE('Instance failed to spawn'), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) try: self._shutdown_instance(context, instance, block_device_mapping, requested_networks, try_deallocate_networks=False) except Exception: ctxt.reraise = False msg = _('Could not clean up failed build,' ' not rescheduling') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) def _cleanup_allocated_networks(self, context, instance, requested_networks): try: self._deallocate_network(context, instance, requested_networks) except Exception: msg = _LE('Failed to deallocate networks') LOG.exception(msg, instance=instance) return instance.system_metadata['network_allocated'] = 'False' try: instance.save() except exception.InstanceNotFound: # NOTE(alaski): It's possible that we're cleaning up the networks # because the instance was deleted. If that's the case then this # exception will be raised by instance.save() pass @object_compat @messaging.expected_exceptions(exception.BuildAbortException, exception.UnexpectedTaskStateError, exception.VirtualInterfaceCreateException, exception.RescheduledException) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def run_instance(self, context, instance, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, legacy_bdm_in_spec): # NOTE(alaski) This method should be deprecated when the scheduler and # compute rpc interfaces are bumped to 4.x, and slated for removal in # 5.x as it is no longer used. if filter_properties is None: filter_properties = {} @utils.synchronized(instance.uuid) def do_run_instance(): self._run_instance(context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, legacy_bdm_in_spec) do_run_instance() def _try_deallocate_network(self, context, instance, requested_networks=None): try: # tear down allocated network structure self._deallocate_network(context, instance, requested_networks) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to deallocate network for instance.'), instance=instance) self._set_instance_error_state(context, instance) def _shutdown_instance(self, context, instance, bdms, requested_networks=None, notify=True, try_deallocate_networks=True): """Shutdown an instance on this host. :param:context: security context :param:instance: a nova.objects.Instance object :param:bdms: the block devices for the instance to be torn down :param:requested_networks: the networks on which the instance has ports :param:notify: true if a final usage notification should be emitted :param:try_deallocate_networks: false if we should avoid trying to teardown networking """ context = context.elevated() LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'}, context=context, instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.start") network_info = compute_utils.get_nw_info_for_instance(instance) # NOTE(vish) get bdms before destroying the instance vol_bdms = [bdm for bdm in bdms if bdm.is_volume] block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) # NOTE(melwitt): attempt driver destroy before releasing ip, may # want to keep ip allocated for certain failures try: self.driver.destroy(context, instance, network_info, block_device_info) except exception.InstancePowerOffFailure: # if the instance can't power off, don't release the ip with excutils.save_and_reraise_exception(): pass except Exception: with excutils.save_and_reraise_exception(): # deallocate ip and fail without proceeding to # volume api calls, preserving current behavior if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) for bdm in vol_bdms: try: # NOTE(vish): actual driver detach done in driver.destroy, so # just tell cinder that we are done with it. connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, bdm.volume_id, connector) self.volume_api.detach(context, bdm.volume_id) except exception.DiskNotFound as exc: LOG.debug('Ignoring DiskNotFound: %s', exc, instance=instance) except exception.VolumeNotFound as exc: LOG.debug('Ignoring VolumeNotFound: %s', exc, instance=instance) except cinder_exception.EndpointNotFound as exc: LOG.warn(_LW('Ignoring EndpointNotFound: %s'), exc, instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.end") def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True): exc_info = None for bdm in bdms: LOG.debug("terminating bdm %s", bdm, instance_uuid=instance_uuid) if bdm.volume_id and bdm.delete_on_termination: try: self.volume_api.delete(context, bdm.volume_id) except Exception as exc: exc_info = sys.exc_info() LOG.warn(_LW('Failed to delete volume: %(volume_id)s due ' 'to %(exc)s'), {'volume_id': bdm.volume_id, 'exc': unicode(exc)}) if exc_info is not None and raise_exc: six.reraise(exc_info[0], exc_info[1], exc_info[2]) @hooks.add_hook("delete_instance") def _delete_instance(self, context, instance, bdms, quotas): """Delete an instance on this host. Commit or rollback quotas as necessary. """ instance_uuid = instance['uuid'] was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED if was_soft_deleted: # Instances in SOFT_DELETED vm_state have already had quotas # decremented. try: quotas.rollback() except Exception: pass try: events = self.instance_events.clear_events_for_instance(instance) if events: LOG.debug('Events pending at deletion: %(events)s', {'events': ','.join(events.keys())}, instance=instance) instance.info_cache.delete() self._notify_about_instance_usage(context, instance, "delete.start") self._shutdown_instance(context, instance, bdms) # NOTE(vish): We have already deleted the instance, so we have # to ignore problems cleaning up the volumes. It # would be nice to let the user know somehow that # the volume deletion failed, but it is not # acceptable to have an instance that can not be # deleted. Perhaps this could be reworked in the # future to set an instance fault the first time # and to only ignore the failure if the instance # is already in ERROR. self._cleanup_volumes(context, instance_uuid, bdms, raise_exc=False) # if a delete task succeed, always update vm state and task # state without expecting task state to be DELETING instance.vm_state = vm_states.DELETED instance.task_state = None instance.terminated_at = timeutils.utcnow() instance.save() self._update_resource_tracker(context, instance) system_meta = instance.system_metadata instance.destroy() except Exception: with excutils.save_and_reraise_exception(): quotas.rollback() self._complete_deletion(context, instance, bdms, quotas, system_meta) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def terminate_instance(self, context, instance, bdms, reservations): """Terminate an instance on this host.""" # NOTE (ndipanov): If we get non-object BDMs, just get them from the # db again, as this means they are sent in the old format and we want # to avoid converting them back when we can just get them. # Remove this when we bump the RPC major version to 4.0 if (bdms and any(not isinstance(bdm, obj_base.NovaObject) for bdm in bdms)): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) @utils.synchronized(instance['uuid']) def do_terminate_instance(instance, bdms): try: self._delete_instance(context, instance, bdms, quotas) except exception.InstanceNotFound: LOG.info(_("Instance disappeared during terminate"), instance=instance) except Exception: # As we're trying to delete always go to Error if something # goes wrong that _delete_instance can't handle. with excutils.save_and_reraise_exception(): LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) self._set_instance_error_state(context, instance) do_terminate_instance(instance, bdms) # NOTE(johannes): This is probably better named power_off_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def stop_instance(self, context, instance): """Stopping an instance on this host.""" @utils.synchronized(instance.uuid) def do_stop_instance(): self._notify_about_instance_usage(context, instance, "power_off.start") self.driver.power_off(instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.STOPPED instance.task_state = None instance.save(expected_task_state=task_states.POWERING_OFF) self._notify_about_instance_usage(context, instance, "power_off.end") do_stop_instance() def _power_on(self, context, instance): network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.power_on(context, instance, network_info, block_device_info) # NOTE(johannes): This is probably better named power_on_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def start_instance(self, context, instance): """Starting an instance on this host.""" self._notify_about_instance_usage(context, instance, "power_on.start") self._power_on(context, instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.POWERING_ON) self._notify_about_instance_usage(context, instance, "power_on.end") @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def soft_delete_instance(self, context, instance, reservations): """Soft delete an instance on this host.""" quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: self._notify_about_instance_usage(context, instance, "soft_delete.start") try: self.driver.soft_delete(instance) except NotImplementedError: # Fallback to just powering off the instance if the # hypervisor doesn't implement the soft_delete method self.driver.power_off(instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save(expected_task_state=[task_states.SOFT_DELETING]) except Exception: with excutils.save_and_reraise_exception(): quotas.rollback() quotas.commit() self._notify_about_instance_usage(context, instance, "soft_delete.end") @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def restore_instance(self, context, instance): """Restore a soft-deleted instance on this host.""" self._notify_about_instance_usage(context, instance, "restore.start") try: self.driver.restore(instance) except NotImplementedError: # Fallback to just powering on the instance if the hypervisor # doesn't implement the restore method self._power_on(context, instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.RESTORING) self._notify_about_instance_usage(context, instance, "restore.end") def _rebuild_default_impl(self, context, instance, image_meta, injected_files, admin_password, bdms, detach_block_devices, attach_block_devices, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False): if preserve_ephemeral: # The default code path does not support preserving ephemeral # partitions. raise exception.PreserveEphemeralNotSupported() detach_block_devices(context, bdms) if not recreate: self.driver.destroy(context, instance, network_info, block_device_info=block_device_info) instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) new_block_device_info = attach_block_devices(context, instance, bdms) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) self.driver.spawn(context, instance, image_meta, injected_files, admin_password, network_info=network_info, block_device_info=new_block_device_info) @object_compat @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and remakes the VM with given 'metadata' and 'personalities'. :param context: `nova.RequestContext` object :param instance: Instance object :param orig_image_ref: Original image_ref before rebuild :param image_ref: New image_ref for rebuild :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance :param orig_sys_metadata: instance system metadata from pre-rebuild :param bdms: block-device-mappings to use for rebuild :param recreate: True if the instance is being recreated (e.g. the hypervisor it was on failed) - cleanup of old state will be skipped. :param on_shared_storage: True if instance files on shared storage :param preserve_ephemeral: True if the default ephemeral storage partition must be preserved on rebuild """ context = context.elevated() # NOTE (ndipanov): If we get non-object BDMs, just get them from the # db again, as this means they are sent in the old format and we want # to avoid converting them back when we can just get them. # Remove this on the next major RPC version bump if (bdms and any(not isinstance(bdm, obj_base.NovaObject) for bdm in bdms)): bdms = None orig_vm_state = instance.vm_state with self._error_out_instance_on_exception(context, instance): LOG.audit(_("Rebuilding instance"), context=context, instance=instance) if recreate: if not self.driver.capabilities["supports_recreate"]: raise exception.InstanceRecreateNotSupported self._check_instance_exists(context, instance) # To cover case when admin expects that instance files are on # shared storage, but not accessible and vice versa if on_shared_storage != self.driver.instance_on_disk(instance): raise exception.InvalidSharedStorage( _("Invalid state of instance files on shared" " storage")) if on_shared_storage: LOG.info(_('disk on shared storage, recreating using' ' existing disk')) else: image_ref = orig_image_ref = instance.image_ref LOG.info(_("disk not on shared storage, rebuilding from:" " '%s'") % str(image_ref)) # NOTE(mriedem): On a recreate (evacuate), we need to update # the instance's host and node properties to reflect it's # destination node for the recreate. node_name = None try: compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.NotFound: LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host instance.node = node_name instance.save() if image_ref: image_meta = self.image_api.get(context, image_ref) else: image_meta = {} # This instance.exists message should contain the original # image_ref, not the new one. Since the DB has been updated # to point to the new one... we have to override it. # TODO(jaypipes): Move generate_image_url() into the nova.image.api orig_image_ref_url = glance.generate_image_url(orig_image_ref) extra_usage_info = {'image_ref_url': orig_image_ref_url} self.conductor_api.notify_usage_exists(context, obj_base.obj_to_primitive(instance), current_period=True, system_metadata=orig_sys_metadata, extra_usage_info=extra_usage_info) # This message should contain the new image_ref extra_usage_info = {'image_name': image_meta.get('name', '')} self._notify_about_instance_usage(context, instance, "rebuild.start", extra_usage_info=extra_usage_info) instance.power_state = self._get_power_state(context, instance) instance.task_state = task_states.REBUILDING instance.save(expected_task_state=[task_states.REBUILDING]) if recreate: self.network_api.setup_networks_on_host( context, instance, self.host) network_info = compute_utils.get_nw_info_for_instance(instance) if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = \ self._get_instance_block_device_info( context, instance, bdms=bdms) def detach_block_devices(context, bdms): for bdm in bdms: if bdm.is_volume: self.volume_api.detach(context, bdm.volume_id) files = self._decode_files(injected_files) kwargs = dict( context=context, instance=instance, image_meta=image_meta, injected_files=files, admin_password=new_pass, bdms=bdms, detach_block_devices=detach_block_devices, attach_block_devices=self._prep_block_device, block_device_info=block_device_info, network_info=network_info, preserve_ephemeral=preserve_ephemeral) try: self.driver.rebuild(**kwargs) except NotImplementedError: # NOTE(rpodolyaka): driver doesn't provide specialized version # of rebuild, fall back to the default implementation self._rebuild_default_impl(**kwargs) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=[task_states.REBUILD_SPAWNING]) if orig_vm_state == vm_states.STOPPED: LOG.info(_LI("bringing vm to original state: '%s'"), orig_vm_state, instance=instance) instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.progress = 0 instance.save() self.stop_instance(context, instance) self._notify_about_instance_usage( context, instance, "rebuild.end", network_info=network_info, extra_usage_info=extra_usage_info) def _handle_bad_volumes_detached(self, context, instance, bad_devices, block_device_info): """Handle cases where the virt-layer had to detach non-working volumes in order to complete an operation. """ for bdm in block_device_info['block_device_mapping']: if bdm.get('mount_device') in bad_devices: try: volume_id = bdm['connection_info']['data']['volume_id'] except KeyError: continue # NOTE(sirp): ideally we'd just call # `compute_api.detach_volume` here but since that hits the # DB directly, that's off limits from within the # compute-manager. # # API-detach LOG.info(_("Detaching from volume api: %s") % volume_id) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) self.volume_api.begin_detaching(context, volume_id) # Manager-detach self.detach_volume(context, volume_id, instance) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def reboot_instance(self, context, instance, block_device_info, reboot_type): """Reboot an instance on this host.""" # acknowledge the request made it to the manager if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_PENDING expected_states = (task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED) else: instance.task_state = task_states.REBOOT_PENDING_HARD expected_states = (task_states.REBOOTING_HARD, task_states.REBOOT_PENDING_HARD, task_states.REBOOT_STARTED_HARD) context = context.elevated() LOG.audit(_("Rebooting instance"), context=context, instance=instance) block_device_info = self._get_instance_block_device_info(context, instance) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "reboot.start") current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.save(expected_task_state=expected_states) if instance['power_state'] != power_state.RUNNING: state = instance['power_state'] running = power_state.RUNNING LOG.warn(_('trying to reboot a non-running instance:' ' (state: %(state)s expected: %(running)s)'), {'state': state, 'running': running}, context=context, instance=instance) def bad_volumes_callback(bad_devices): self._handle_bad_volumes_detached( context, instance, bad_devices, block_device_info) try: # Don't change it out of rescue mode if instance['vm_state'] == vm_states.RESCUED: new_vm_state = vm_states.RESCUED else: new_vm_state = vm_states.ACTIVE new_power_state = None if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_STARTED expected_state = task_states.REBOOT_PENDING else: instance.task_state = task_states.REBOOT_STARTED_HARD expected_state = task_states.REBOOT_PENDING_HARD instance.save(expected_task_state=expected_state) self.driver.reboot(context, instance, network_info, reboot_type, block_device_info=block_device_info, bad_volumes_callback=bad_volumes_callback) except Exception as error: with excutils.save_and_reraise_exception() as ctxt: exc_info = sys.exc_info() # if the reboot failed but the VM is running don't # put it into an error state new_power_state = self._get_power_state(context, instance) if new_power_state == power_state.RUNNING: LOG.warning(_('Reboot failed but instance is running'), context=context, instance=instance) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info) self._notify_about_instance_usage(context, instance, 'reboot.error', fault=error) ctxt.reraise = False else: LOG.error(_('Cannot reboot instance: %s'), error, context=context, instance=instance) self._set_instance_obj_error_state(context, instance) if not new_power_state: new_power_state = self._get_power_state(context, instance) try: instance.power_state = new_power_state instance.vm_state = new_vm_state instance.task_state = None instance.save() except exception.InstanceNotFound: LOG.warn(_("Instance disappeared during reboot"), context=context, instance=instance) self._notify_about_instance_usage(context, instance, "reboot.end") @delete_image_on_error def _do_snapshot_instance(self, context, image_id, instance, rotation): if rotation < 0: raise exception.RotationRequiredForBackup() self._snapshot_instance(context, image_id, instance, task_states.IMAGE_BACKUP) @wrap_exception() @reverts_task_state @wrap_instance_fault def backup_instance(self, context, image_id, instance, backup_type, rotation): """Backup an instance on this host. :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around """ self._do_snapshot_instance(context, image_id, instance, rotation) self._rotate_backups(context, instance, backup_type, rotation) @wrap_exception() @reverts_task_state @wrap_instance_fault @delete_image_on_error def snapshot_instance(self, context, image_id, instance): """Snapshot an instance on this host. :param context: security context :param instance: a nova.objects.instance.Instance object :param image_id: glance.db.sqlalchemy.models.Image.Id """ # NOTE(dave-mcnally) the task state will already be set by the api # but if the compute manager has crashed/been restarted prior to the # request getting here the task state may have been cleared so we set # it again and things continue normally try: instance.task_state = task_states.IMAGE_SNAPSHOT instance.save( expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING) except exception.InstanceNotFound: # possibility instance no longer exists, no point in continuing LOG.debug("Instance not found, could not set state %s " "for instance.", task_states.IMAGE_SNAPSHOT, instance=instance) return except exception.UnexpectedDeletingTaskStateError: LOG.debug("Instance being deleted, snapshot cannot continue", instance=instance) return self._snapshot_instance(context, image_id, instance, task_states.IMAGE_SNAPSHOT) def _snapshot_instance(self, context, image_id, instance, expected_task_state): context = context.elevated() current_power_state = self._get_power_state(context, instance) try: instance.power_state = current_power_state instance.save() LOG.audit(_('instance snapshotting'), context=context, instance=instance) if instance.power_state != power_state.RUNNING: state = instance.power_state running = power_state.RUNNING LOG.warn(_('trying to snapshot a non-running instance: ' '(state: %(state)s expected: %(running)s)'), {'state': state, 'running': running}, instance=instance) self._notify_about_instance_usage( context, instance, "snapshot.start") def update_task_state(task_state, expected_state=expected_task_state): instance.task_state = task_state instance.save(expected_task_state=expected_state) self.driver.snapshot(context, instance, image_id, update_task_state) instance.task_state = None instance.save(expected_task_state=task_states.IMAGE_UPLOADING) self._notify_about_instance_usage(context, instance, "snapshot.end") except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): # the instance got deleted during the snapshot # Quickly bail out of here msg = 'Instance disappeared during snapshot' LOG.debug(msg, instance=instance) try: image_service = glance.get_default_image_service() image = image_service.show(context, image_id) if image['status'] != 'active': image_service.delete(context, image_id) except Exception: LOG.warning(_("Error while trying to clean up image %s"), image_id, instance=instance) except exception.ImageNotFound: instance.task_state = None instance.save() msg = _("Image not found during snapshot") LOG.warn(msg, instance=instance) @object_compat @messaging.expected_exceptions(NotImplementedError) def volume_snapshot_create(self, context, instance, volume_id, create_info): self.driver.volume_snapshot_create(context, instance, volume_id, create_info) @object_compat @messaging.expected_exceptions(NotImplementedError) def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info): self.driver.volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info) @wrap_instance_fault def _rotate_backups(self, context, instance, backup_type, rotation): """Delete excess backups associated to an instance. Instances are allowed a fixed number of backups (the rotation number); this method deletes the oldest backups that exceed the rotation threshold. :param context: security context :param instance: Instance dict :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ filters = {'property-image_type': 'backup', 'property-backup_type': backup_type, 'property-instance_uuid': instance.uuid} images = self.image_api.get_all(context, filters=filters, sort_key='created_at', sort_dir='desc') num_images = len(images) LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)", {'num_images': num_images, 'rotation': rotation}, instance=instance) if num_images > rotation: # NOTE(sirp): this deletes all backups that exceed the rotation # limit excess = len(images) - rotation LOG.debug("Rotating out %d backups", excess, instance=instance) for i in xrange(excess): image = images.pop() image_id = image['id'] LOG.debug("Deleting image %s", image_id, instance=instance) self.image_api.delete(context, image_id) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def set_admin_password(self, context, instance, new_pass): """Set the root/admin password for an instance on this host. This is generally only called by API password resets after an image has been built. @param context: Nova auth context. @param instance: Nova instance object. @param new_pass: The admin password for the instance. """ context = context.elevated() if new_pass is None: # Generate a random password new_pass = utils.generate_password() current_power_state = self._get_power_state(context, instance) expected_state = power_state.RUNNING if current_power_state != expected_state: instance.task_state = None instance.save(expected_task_state=task_states.UPDATING_PASSWORD) _msg = _('Failed to set admin password. Instance %s is not' ' running') % instance.uuid raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) try: self.driver.set_admin_password(instance, new_pass) LOG.audit(_("Root password set"), instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) except NotImplementedError: _msg = _('set_admin_password is not implemented ' 'by this driver or guest instance.') LOG.warn(_msg, instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) raise NotImplementedError(_msg) except exception.UnexpectedTaskStateError: # interrupted by another (most likely delete) task # do not retry raise except Exception as e: # Catch all here because this could be anything. LOG.exception(_LE('set_admin_password failed: %s'), e, instance=instance) self._set_instance_obj_error_state(context, instance) # We create a new exception here so that we won't # potentially reveal password information to the # API caller. The real exception is logged above _msg = _('error setting admin password') raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) @wrap_exception() @reverts_task_state @wrap_instance_fault def inject_file(self, context, path, file_contents, instance): """Write a file to the specified path in an instance on this host.""" # NOTE(russellb) Remove this method, as well as the underlying virt # driver methods, when the compute rpc interface is bumped to 4.x # as it is no longer used. context = context.elevated() current_power_state = self._get_power_state(context, instance) expected_state = power_state.RUNNING if current_power_state != expected_state: LOG.warn(_('trying to inject a file into a non-running (state: ' '%(current_state)s expected: %(expected_state)s)'), {'current_state': current_power_state, 'expected_state': expected_state}, instance=instance) LOG.audit(_('injecting file to %s'), path, instance=instance) self.driver.inject_file(instance, path, file_contents) def _get_rescue_image(self, context, instance, rescue_image_ref=None): """Determine what image should be used to boot the rescue VM.""" # 1. If rescue_image_ref is passed in, use that for rescue. # 2. Else, use the base image associated with instance's current image. # The idea here is to provide the customer with a rescue # environment which they are familiar with. # So, if they built their instance off of a Debian image, # their rescue VM will also be Debian. # 3. As a last resort, use instance's current image. if not rescue_image_ref: system_meta = utils.instance_sys_meta(instance) rescue_image_ref = system_meta.get('image_base_image_ref') if not rescue_image_ref: LOG.warn(_('Unable to find a different image to use for rescue VM,' ' using instance\'s current image'), instance=instance) rescue_image_ref = instance.image_ref image_meta = compute_utils.get_image_metadata(context, self.image_api, rescue_image_ref, instance) # NOTE(belliott) bug #1227350 - xenapi needs the actual image id image_meta['id'] = rescue_image_ref return image_meta @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def rescue_instance(self, context, instance, rescue_password, rescue_image_ref=None): context = context.elevated() LOG.audit(_('Rescuing'), context=context, instance=instance) admin_password = (rescue_password if rescue_password else utils.generate_password()) network_info = self._get_instance_nw_info(context, instance) rescue_image_meta = self._get_rescue_image(context, instance, rescue_image_ref) extra_usage_info = {'rescue_image_name': rescue_image_meta.get('name', '')} self._notify_about_instance_usage(context, instance, "rescue.start", extra_usage_info=extra_usage_info, network_info=network_info) try: self.driver.rescue(context, instance, network_info, rescue_image_meta, admin_password) except Exception as e: LOG.exception(_LE("Error trying to Rescue Instance"), instance=instance) raise exception.InstanceNotRescuable( instance_id=instance.uuid, reason=_("Driver Error: %s") % unicode(e)) self.conductor_api.notify_usage_exists(context, instance, current_period=True) current_power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.RESCUED instance.task_state = None instance.power_state = current_power_state instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESCUING) self._notify_about_instance_usage(context, instance, "rescue.end", extra_usage_info=extra_usage_info, network_info=network_info) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unrescue_instance(self, context, instance): context = context.elevated() LOG.audit(_('Unrescuing'), context=context, instance=instance) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "unrescue.start", network_info=network_info) with self._error_out_instance_on_exception(context, instance): self.driver.unrescue(instance, network_info) current_power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.power_state = current_power_state instance.save(expected_task_state=task_states.UNRESCUING) self._notify_about_instance_usage(context, instance, "unrescue.end", network_info=network_info) @object_compat @wrap_exception() @wrap_instance_fault def change_instance_metadata(self, context, diff, instance): """Update the metadata published to the instance.""" LOG.debug("Changing instance metadata according to %r", diff, instance=instance) self.driver.change_instance_metadata(context, instance, diff) def _cleanup_stored_instance_types(self, migration, instance, restore_old=False): """Clean up "old" and "new" instance_type information stored in instance's system_metadata. Optionally update the "current" instance_type to the saved old one first. Returns the updated system_metadata as a dict, as well as the post-cleanup current instance type. """ sys_meta = instance.system_metadata if restore_old: instance_type = flavors.extract_flavor(instance, 'old_') sys_meta = flavors.save_flavor_info(sys_meta, instance_type) else: instance_type = flavors.extract_flavor(instance) flavors.delete_flavor_info(sys_meta, 'old_') flavors.delete_flavor_info(sys_meta, 'new_') return sys_meta, instance_type @wrap_exception() @wrap_instance_event @wrap_instance_fault def confirm_resize(self, context, instance, reservations, migration): quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) @utils.synchronized(instance['uuid']) def do_confirm_resize(context, instance, migration_id): # NOTE(wangpan): Get the migration status from db, if it has been # confirmed, we do nothing and return here LOG.debug("Going to confirm migration %s", migration_id, context=context, instance=instance) try: # TODO(russellb) Why are we sending the migration object just # to turn around and look it up from the db again? migration = objects.Migration.get_by_id( context.elevated(), migration_id) except exception.MigrationNotFound: LOG.error(_("Migration %s is not found during confirmation") % migration_id, context=context, instance=instance) return if migration.status == 'confirmed': LOG.info(_("Migration %s is already confirmed") % migration_id, context=context, instance=instance) return elif migration.status not in ('finished', 'confirming'): LOG.warn(_("Unexpected confirmation status '%(status)s' of " "migration %(id)s, exit confirmation process") % {"status": migration.status, "id": migration_id}, context=context, instance=instance) return # NOTE(wangpan): Get the instance from db, if it has been # deleted, we do nothing and return here expected_attrs = ['metadata', 'system_metadata'] try: instance = objects.Instance.get_by_uuid( context, instance.uuid, expected_attrs=expected_attrs) except exception.InstanceNotFound: LOG.info(_("Instance is not found during confirmation"), context=context, instance=instance) return self._confirm_resize(context, instance, quotas, migration=migration) do_confirm_resize(context, instance, migration.id) def _confirm_resize(self, context, instance, quotas, migration=None): """Destroys the source instance.""" self._notify_about_instance_usage(context, instance, "resize.confirm.start") with self._error_out_instance_on_exception(context, instance, quotas=quotas): # NOTE(danms): delete stashed migration information sys_meta, instance_type = self._cleanup_stored_instance_types( migration, instance) sys_meta.pop('old_vm_state', None) instance.system_metadata = sys_meta instance.save() # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(context, instance, migration.source_compute, teardown=True) network_info = self._get_instance_nw_info(context, instance) self.driver.confirm_migration(migration, instance, network_info) migration.status = 'confirmed' migration.save(context.elevated()) rt = self._get_resource_tracker(migration.source_node) rt.drop_resize_claim(instance, prefix='old_') # NOTE(mriedem): The old_vm_state could be STOPPED but the user # might have manually powered up the instance to confirm the # resize/migrate, so we need to check the current power state # on the instance and set the vm_state appropriately. We default # to ACTIVE because if the power state is not SHUTDOWN, we # assume _sync_instance_power_state will clean it up. p_state = instance.power_state vm_state = None if p_state == power_state.SHUTDOWN: vm_state = vm_states.STOPPED LOG.debug("Resized/migrated instance is powered off. " "Setting vm_state to '%s'.", vm_state, instance=instance) else: vm_state = vm_states.ACTIVE instance.vm_state = vm_state instance.task_state = None instance.save(expected_task_state=[None, task_states.DELETING]) self._notify_about_instance_usage( context, instance, "resize.confirm.end", network_info=network_info) quotas.commit() @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def revert_resize(self, context, instance, migration, reservations): """Destroys the new instance on the destination machine. Reverts the model changes, and powers on the old instance on the source machine. """ quotas = quotas_obj.Quotas.from_reservations(context, reservations, instance=instance) # NOTE(comstud): A revert_resize is essentially a resize back to # the old size, so we need to send a usage event here. self.conductor_api.notify_usage_exists( context, instance, current_period=True) with self._error_out_instance_on_exception(context, instance, quotas=quotas): # NOTE(tr3buchet): tear down networks on destination host self.network_api.setup_networks_on_host(context, instance, teardown=True) instance_p = obj_base.obj_to_primitive(instance) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_start(context, instance_p, migration_p) network_info = self._get_instance_nw_info(context, instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) self.driver.destroy(context, instance, network_info, block_device_info) self._terminate_volume_connections(context, instance, bdms) migration.status = 'reverted' migration.save(context.elevated()) rt = self._get_resource_tracker(instance.node) rt.drop_resize_claim(instance) self.compute_rpcapi.finish_revert_resize(context, instance, migration, migration.source_compute, quotas.reservations) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def finish_revert_resize(self, context, instance, reservations, migration): """Finishes the second half of reverting a resize. Bring the original source instance state back (active/shutoff) and revert the resized attributes in the database. """ quotas = quotas_obj.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "resize.revert.start") sys_meta, instance_type = self._cleanup_stored_instance_types( migration, instance, True) # NOTE(mriedem): delete stashed old_vm_state information; we # default to ACTIVE for backwards compatibility if old_vm_state # is not set old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE) instance.system_metadata = sys_meta instance.memory_mb = instance_type['memory_mb'] instance.vcpus = instance_type['vcpus'] instance.root_gb = instance_type['root_gb'] instance.ephemeral_gb = instance_type['ephemeral_gb'] instance.instance_type_id = instance_type['id'] instance.host = migration['source_compute'] instance.node = migration['source_node'] instance.save() self.network_api.setup_networks_on_host(context, instance, migration['source_compute']) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) power_on = old_vm_state != vm_states.STOPPED self.driver.finish_revert_migration(context, instance, network_info, block_device_info, power_on) instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_REVERTING) instance_p = obj_base.obj_to_primitive(instance) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_finish(context, instance_p, migration_p) # if the original vm state was STOPPED, set it back to STOPPED LOG.info(_("Updating instance to original state: '%s'") % old_vm_state) if power_on: instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save() else: instance.task_state = task_states.POWERING_OFF instance.save() self.stop_instance(context, instance=instance) self._notify_about_instance_usage( context, instance, "resize.revert.end") quotas.commit() def _prep_resize(self, context, image, instance, instance_type, quotas, request_spec, filter_properties, node): if not filter_properties: filter_properties = {} if not instance['host']: self._set_instance_error_state(context, instance) msg = _('Instance has no source host') raise exception.MigrationError(msg) same_host = instance['host'] == self.host if same_host and not CONF.allow_resize_to_same_host: self._set_instance_error_state(context, instance) msg = _('destination same as source!') raise exception.MigrationError(msg) # NOTE(danms): Stash the new instance_type to avoid having to # look it up in the database later sys_meta = instance.system_metadata flavors.save_flavor_info(sys_meta, instance_type, prefix='new_') # NOTE(mriedem): Stash the old vm_state so we can set the # resized/reverted instance back to the same state later. vm_state = instance['vm_state'] LOG.debug('Stashing vm_state: %s', vm_state, instance=instance) sys_meta['old_vm_state'] = vm_state instance.save() limits = filter_properties.get('limits', {}) rt = self._get_resource_tracker(node) with rt.resize_claim(context, instance, instance_type, limits=limits) as claim: LOG.audit(_('Migrating'), context=context, instance=instance) self.compute_rpcapi.resize_instance( context, instance, claim.migration, image, instance_type, quotas.reservations) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def prep_resize(self, context, image, instance, instance_type, reservations, request_spec, filter_properties, node): """Initiates the process of moving a running instance to another host. Possibly changes the RAM and disk size in the process. """ if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug("No node specified, defaulting to %s", node, instance=instance) quotas = quotas_obj.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): self.conductor_api.notify_usage_exists( context, instance, current_period=True) self._notify_about_instance_usage( context, instance, "resize.prep.start") try: self._prep_resize(context, image, instance, instance_type, quotas, request_spec, filter_properties, node) # NOTE(dgenin): This is thrown in LibvirtDriver when the # instance to be migrated is backed by LVM. # Remove when LVM migration is implemented. except exception.MigrationPreCheckError: raise except Exception: # try to re-schedule the resize elsewhere: exc_info = sys.exc_info() self._reschedule_resize_or_reraise(context, image, instance, exc_info, instance_type, quotas, request_spec, filter_properties) finally: extra_usage_info = dict( new_instance_type=instance_type['name'], new_instance_type_id=instance_type['id']) self._notify_about_instance_usage( context, instance, "resize.prep.end", extra_usage_info=extra_usage_info) def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, instance_type, quotas, request_spec, filter_properties): """Try to re-schedule the resize or re-raise the original error to error out the instance. """ if not request_spec: request_spec = {} if not filter_properties: filter_properties = {} rescheduled = False instance_uuid = instance['uuid'] try: reschedule_method = self.compute_task_api.resize_instance scheduler_hint = dict(filter_properties=filter_properties) method_args = (instance, None, scheduler_hint, instance_type, quotas.reservations) task_state = task_states.RESIZE_PREP rescheduled = self._reschedule(context, request_spec, filter_properties, instance, reschedule_method, method_args, task_state, exc_info) except Exception as error: rescheduled = False LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info=sys.exc_info()) self._notify_about_instance_usage(context, instance, 'resize.error', fault=error) if rescheduled: self._log_original_error(exc_info, instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, exc_info[1], exc_info=exc_info) self._notify_about_instance_usage(context, instance, 'resize.error', fault=exc_info[1]) else: # not re-scheduling raise exc_info[0], exc_info[1], exc_info[2] @wrap_exception() @reverts_task_state @wrap_instance_event @errors_out_migration @wrap_instance_fault def resize_instance(self, context, instance, image, reservations, migration, instance_type): """Starts the migration of a running instance to another host.""" quotas = quotas_obj.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): if not instance_type: instance_type = objects.Flavor.get_by_id( context, migration['new_instance_type_id']) network_info = self._get_instance_nw_info(context, instance) migration.status = 'migrating' migration.save(context.elevated()) instance.task_state = task_states.RESIZE_MIGRATING instance.save(expected_task_state=task_states.RESIZE_PREP) self._notify_about_instance_usage( context, instance, "resize.start", network_info=network_info) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) disk_info = self.driver.migrate_disk_and_power_off( context, instance, migration.dest_host, instance_type, network_info, block_device_info) self._terminate_volume_connections(context, instance, bdms) migration_p = obj_base.obj_to_primitive(migration) instance_p = obj_base.obj_to_primitive(instance) self.network_api.migrate_instance_start(context, instance_p, migration_p) migration.status = 'post-migrating' migration.save(context.elevated()) instance.host = migration.dest_compute instance.node = migration.dest_node instance.task_state = task_states.RESIZE_MIGRATED instance.save(expected_task_state=task_states.RESIZE_MIGRATING) self.compute_rpcapi.finish_resize(context, instance, migration, image, disk_info, migration.dest_compute, reservations=quotas.reservations) self._notify_about_instance_usage(context, instance, "resize.end", network_info=network_info) self.instance_events.clear_events_for_instance(instance) def _terminate_volume_connections(self, context, instance, bdms): connector = self.driver.get_volume_connector(instance) for bdm in bdms: if bdm.is_volume: self.volume_api.terminate_connection(context, bdm.volume_id, connector) @staticmethod def _save_instance_info(instance, instance_type, sys_meta): flavors.save_flavor_info(sys_meta, instance_type) instance.instance_type_id = instance_type['id'] instance.memory_mb = instance_type['memory_mb'] instance.vcpus = instance_type['vcpus'] instance.root_gb = instance_type['root_gb'] instance.ephemeral_gb = instance_type['ephemeral_gb'] instance.system_metadata = sys_meta instance.save() def _finish_resize(self, context, instance, migration, disk_info, image): resize_instance = False old_instance_type_id = migration['old_instance_type_id'] new_instance_type_id = migration['new_instance_type_id'] old_instance_type = flavors.extract_flavor(instance) sys_meta = instance.system_metadata # NOTE(mriedem): Get the old_vm_state so we know if we should # power on the instance. If old_vm_state is not set we need to default # to ACTIVE for backwards compatibility old_vm_state = sys_meta.get('old_vm_state', vm_states.ACTIVE) flavors.save_flavor_info(sys_meta, old_instance_type, prefix='old_') if old_instance_type_id != new_instance_type_id: instance_type = flavors.extract_flavor(instance, prefix='new_') self._save_instance_info(instance, instance_type, sys_meta) resize_instance = True # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, migration['dest_compute']) instance_p = obj_base.obj_to_primitive(instance) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_finish(context, instance_p, migration_p) network_info = self._get_instance_nw_info(context, instance) instance.task_state = task_states.RESIZE_FINISH instance.system_metadata = sys_meta instance.save(expected_task_state=task_states.RESIZE_MIGRATED) self._notify_about_instance_usage( context, instance, "finish_resize.start", network_info=network_info) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) # NOTE(mriedem): If the original vm_state was STOPPED, we don't # automatically power on the instance after it's migrated power_on = old_vm_state != vm_states.STOPPED try: self.driver.finish_migration(context, migration, instance, disk_info, network_info, image, resize_instance, block_device_info, power_on) except Exception: with excutils.save_and_reraise_exception(): if resize_instance: self._save_instance_info(instance, old_instance_type, sys_meta) migration.status = 'finished' migration.save(context.elevated()) instance.vm_state = vm_states.RESIZED instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_FINISH) self._notify_about_instance_usage( context, instance, "finish_resize.end", network_info=network_info) @wrap_exception() @reverts_task_state @wrap_instance_event @errors_out_migration @wrap_instance_fault def finish_resize(self, context, disk_info, image, instance, reservations, migration): """Completes the migration process. Sets up the newly transferred disk and turns on the instance at its new host machine. """ quotas = quotas_obj.Quotas.from_reservations(context, reservations, instance=instance) try: self._finish_resize(context, instance, migration, disk_info, image) quotas.commit() except Exception: LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) with excutils.save_and_reraise_exception(): try: quotas.rollback() except Exception as qr_error: LOG.exception(_LE("Failed to rollback quota for failed " "finish_resize: %s"), qr_error, instance=instance) self._set_instance_error_state(context, instance) @object_compat @wrap_exception() @wrap_instance_fault def add_fixed_ip_to_instance(self, context, network_id, instance): """Calls network_api to add new fixed_ip to instance then injects the new network info and resets instance networking. """ self._notify_about_instance_usage( context, instance, "create_ip.start") network_info = self.network_api.add_fixed_ip_to_instance(context, instance, network_id) self._inject_network_info(context, instance, network_info) self.reset_network(context, instance) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "create_ip.end", network_info=network_info) @object_compat @wrap_exception() @wrap_instance_fault def remove_fixed_ip_from_instance(self, context, address, instance): """Calls network_api to remove existing fixed_ip from instance by injecting the altered network info and resetting instance networking. """ self._notify_about_instance_usage( context, instance, "delete_ip.start") network_info = self.network_api.remove_fixed_ip_from_instance(context, instance, address) self._inject_network_info(context, instance, network_info) self.reset_network(context, instance) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "delete_ip.end", network_info=network_info) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def pause_instance(self, context, instance): """Pause an instance on this host.""" context = context.elevated() LOG.audit(_('Pausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'pause.start') self.driver.pause(instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.PAUSED instance.task_state = None instance.save(expected_task_state=task_states.PAUSING) self._notify_about_instance_usage(context, instance, 'pause.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unpause_instance(self, context, instance): """Unpause a paused instance on this host.""" context = context.elevated() LOG.audit(_('Unpausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'unpause.start') self.driver.unpause(instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.UNPAUSING) self._notify_about_instance_usage(context, instance, 'unpause.end') @wrap_exception() def host_power_action(self, context, action): """Reboots, shuts down or powers up the host.""" # TODO(russellb) Remove the unused host parameter from the driver API return self.driver.host_power_action(None, action) @wrap_exception() def host_maintenance_mode(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ return self.driver.host_maintenance_mode(host, mode) @wrap_exception() def set_host_enabled(self, context, enabled): """Sets the specified host's ability to accept new instances.""" # TODO(russellb) Remove the unused host parameter from the driver API return self.driver.set_host_enabled(None, enabled) @wrap_exception() def get_host_uptime(self, context): """Returns the result of calling "uptime" on the target host.""" return self.driver.get_host_uptime(self.host) @object_compat @wrap_exception() @wrap_instance_fault def get_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(context, instance) if current_power_state == power_state.RUNNING: LOG.audit(_("Retrieving diagnostics"), context=context, instance=instance) return self.driver.get_diagnostics(instance) else: raise exception.InstanceInvalidState( attr='power_state', instance_uuid=instance.uuid, state=instance.power_state, method='get_diagnostics') @object_compat @wrap_exception() @wrap_instance_fault def get_instance_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(context, instance) if current_power_state == power_state.RUNNING: LOG.audit(_("Retrieving diagnostics"), context=context, instance=instance) diags = self.driver.get_instance_diagnostics(instance) return diags.serialize() else: raise exception.InstanceInvalidState( attr='power_state', instance_uuid=instance.uuid, state=instance.power_state, method='get_diagnostics') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def suspend_instance(self, context, instance): """Suspend the given instance.""" context = context.elevated() with self._error_out_instance_on_exception(context, instance, instance_state=instance['vm_state']): self.driver.suspend(instance) current_power_state = self._get_power_state(context, instance) instance.power_state = current_power_state instance.vm_state = vm_states.SUSPENDED instance.task_state = None instance.save(expected_task_state=task_states.SUSPENDING) self._notify_about_instance_usage(context, instance, 'suspend') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def resume_instance(self, context, instance): """Resume the given suspended instance.""" context = context.elevated() LOG.audit(_('Resuming'), context=context, instance=instance) network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info( context, instance) self.driver.resume(context, instance, network_info, block_device_info) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.RESUMING) self._notify_about_instance_usage(context, instance, 'resume') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def shelve_instance(self, context, instance, image_id): """Shelve an instance. This should be used when you want to take a snapshot of the instance. It also adds system_metadata that can be used by a periodic task to offload the shelved instance after a period of time. :param context: request context :param instance: an Instance object :param image_id: an image id to snapshot to. """ self.conductor_api.notify_usage_exists( context, obj_base.obj_to_primitive(instance), current_period=True) self._notify_about_instance_usage(context, instance, 'shelve.start') def update_task_state(task_state, expected_state=task_states.SHELVING): shelving_state_map = { task_states.IMAGE_PENDING_UPLOAD: task_states.SHELVING_IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING: task_states.SHELVING_IMAGE_UPLOADING, task_states.SHELVING: task_states.SHELVING} task_state = shelving_state_map[task_state] expected_state = shelving_state_map[expected_state] instance.task_state = task_state instance.save(expected_task_state=expected_state) self.driver.power_off(instance) current_power_state = self._get_power_state(context, instance) self.driver.snapshot(context, instance, image_id, update_task_state) instance.system_metadata['shelved_at'] = timeutils.strtime() instance.system_metadata['shelved_image_id'] = image_id instance.system_metadata['shelved_host'] = self.host instance.vm_state = vm_states.SHELVED instance.task_state = None if CONF.shelved_offload_time == 0: instance.task_state = task_states.SHELVING_OFFLOADING instance.power_state = current_power_state instance.save(expected_task_state=[ task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING]) self._notify_about_instance_usage(context, instance, 'shelve.end') if CONF.shelved_offload_time == 0: self.shelve_offload_instance(context, instance) @wrap_exception() @reverts_task_state @wrap_instance_fault def shelve_offload_instance(self, context, instance): """Remove a shelved instance from the hypervisor. This frees up those resources for use by other instances, but may lead to slower unshelve times for this instance. This method is used by volume backed instances since restoring them doesn't involve the potentially large download of an image. :param context: request context :param instance: nova.objects.instance.Instance """ self._notify_about_instance_usage(context, instance, 'shelve_offload.start') self.driver.power_off(instance) current_power_state = self._get_power_state(context, instance) network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.destroy(context, instance, network_info, block_device_info) instance.power_state = current_power_state instance.host = None instance.node = None instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = None instance.save(expected_task_state=[task_states.SHELVING, task_states.SHELVING_OFFLOADING]) self._notify_about_instance_usage(context, instance, 'shelve_offload.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unshelve_instance(self, context, instance, image, filter_properties=None, node=None): """Unshelve the instance. :param context: request context :param instance: a nova.objects.instance.Instance object :param image: an image to build from. If None we assume a volume backed instance. :param filter_properties: dict containing limits, retry info etc. :param node: target compute node """ if filter_properties is None: filter_properties = {} @utils.synchronized(instance['uuid']) def do_unshelve_instance(): self._unshelve_instance(context, instance, image, filter_properties, node) do_unshelve_instance() def _unshelve_instance_key_scrub(self, instance): """Remove data from the instance that may cause side effects.""" cleaned_keys = dict( key_data=instance.key_data, auto_disk_config=instance.auto_disk_config) instance.key_data = None instance.auto_disk_config = False return cleaned_keys def _unshelve_instance_key_restore(self, instance, keys): """Restore previously scrubbed keys before saving the instance.""" instance.update(keys) def _unshelve_instance(self, context, instance, image, filter_properties, node): self._notify_about_instance_usage(context, instance, 'unshelve.start') instance.task_state = task_states.SPAWNING instance.save() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._prep_block_device(context, instance, bdms, do_check_attach=False) scrubbed_keys = self._unshelve_instance_key_scrub(instance) if node is None: node = self.driver.get_available_nodes()[0] LOG.debug('No node specified, defaulting to %s', node, instance=instance) rt = self._get_resource_tracker(node) limits = filter_properties.get('limits', {}) if image: shelved_image_ref = instance.image_ref instance.image_ref = image['id'] self.network_api.migrate_instance_finish(context, instance, {'source_compute': '', 'dest_compute': self.host}) network_info = self._get_instance_nw_info(context, instance) try: with rt.instance_claim(context, instance, limits): self.driver.spawn(context, instance, image, injected_files=[], admin_password=None, network_info=network_info, block_device_info=block_device_info) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Instance failed to spawn'), instance=instance) if image: instance.image_ref = shelved_image_ref self.image_api.delete(context, image['id']) self._unshelve_instance_key_restore(instance, scrubbed_keys) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.SPAWNING) self._notify_about_instance_usage(context, instance, 'unshelve.end') @wrap_instance_fault def reset_network(self, context, instance): """Reset networking on the given instance.""" LOG.debug('Reset network', context=context, instance=instance) self.driver.reset_network(instance) def _inject_network_info(self, context, instance, network_info): """Inject network info for the given instance.""" LOG.debug('Inject network info', context=context, instance=instance) LOG.debug('network_info to inject: |%s|', network_info, instance=instance) self.driver.inject_network_info(instance, network_info) @wrap_instance_fault def inject_network_info(self, context, instance): """Inject network info, but don't return the info.""" network_info = self._get_instance_nw_info(context, instance) self._inject_network_info(context, instance, network_info) @object_compat @messaging.expected_exceptions(NotImplementedError, exception.InstanceNotFound) @wrap_exception() @wrap_instance_fault def get_console_output(self, context, instance, tail_length): """Send the console output for the given instance.""" context = context.elevated() LOG.audit(_("Get console output"), context=context, instance=instance) output = self.driver.get_console_output(context, instance) if tail_length is not None: output = self._tail_log(output, tail_length) return output.decode('utf-8', 'replace').encode('ascii', 'replace') def _tail_log(self, log, length): try: length = int(length) except ValueError: length = 0 if length == 0: return '' else: return '\n'.join(log.split('\n')[-int(length):]) @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @object_compat @wrap_exception() @wrap_instance_fault def get_vnc_console(self, context, console_type, instance): """Return connection information for a vnc console.""" context = context.elevated() LOG.debug("Getting vnc console", instance=instance) token = str(uuid.uuid4()) if not CONF.vnc_enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'novnc': # For essex, novncproxy_base_url must include the full path # including the html file (like http://myhost/vnc_auto.html) access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token) elif console_type == 'xvpvnc': access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token connect_info = self.driver.get_vnc_console(context, instance) connect_info['token'] = token connect_info['access_url'] = access_url except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable) @wrap_exception() @wrap_instance_fault def get_spice_console(self, context, console_type, instance): """Return connection information for a spice console.""" context = context.elevated() LOG.debug("Getting spice console", instance=instance) token = str(uuid.uuid4()) if not CONF.spice.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'spice-html5': # For essex, spicehtml5proxy_base_url must include the full path # including the html file (like http://myhost/spice_auto.html) access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token connect_info = self.driver.get_spice_console(context, instance) connect_info['token'] = token connect_info['access_url'] = access_url except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_rdp_console(self, context, console_type, instance): """Return connection information for a RDP console.""" context = context.elevated() LOG.debug("Getting RDP console", instance=instance) token = str(uuid.uuid4()) if not CONF.rdp.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'rdp-html5': access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token connect_info = self.driver.get_rdp_console(context, instance) connect_info['token'] = token connect_info['access_url'] = access_url except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound) @object_compat @wrap_exception() @wrap_instance_fault def validate_console_port(self, ctxt, instance, port, console_type): if console_type == "spice-html5": console_info = self.driver.get_spice_console(ctxt, instance) elif console_type == "rdp-html5": console_info = self.driver.get_rdp_console(ctxt, instance) else: console_info = self.driver.get_vnc_console(ctxt, instance) return console_info['port'] == port @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def reserve_block_device_name(self, context, instance, device, volume_id, disk_bus=None, device_type=None): # NOTE(ndipanov): disk_bus and device_type will be set to None if not # passed (by older clients) and defaulted by the virt driver. Remove # default values on the next major RPC version bump. @utils.synchronized(instance['uuid']) def do_reserve(): bdms = ( objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid)) device_name = compute_utils.get_device_name_for_instance( context, instance, bdms, device) # NOTE(vish): create bdm here to avoid race condition bdm = objects.BlockDeviceMapping( source_type='volume', destination_type='volume', instance_uuid=instance.uuid, volume_id=volume_id or 'reserved', device_name=device_name, disk_bus=disk_bus, device_type=device_type) bdm.create(context) return device_name return do_reserve() @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def attach_volume(self, context, volume_id, mountpoint, instance, bdm=None): """Attach a volume to an instance.""" if not bdm: bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm) try: return self._attach_volume(context, instance, driver_bdm) except Exception: with excutils.save_and_reraise_exception(): bdm.destroy(context) def _attach_volume(self, context, instance, bdm): context = context.elevated() LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) try: bdm.attach(context, instance, self.volume_api, self.driver, do_check_attach=False, do_driver_attach=True) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to attach %(volume_id)s " "at %(mountpoint)s"), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) self.volume_api.unreserve_volume(context, bdm.volume_id) info = {'volume_id': bdm.volume_id} self._notify_about_instance_usage( context, instance, "volume.attach", extra_usage_info=info) def _detach_volume(self, context, instance, bdm): """Do the actual driver detach using block device mapping.""" mp = bdm.device_name volume_id = bdm.volume_id LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) connection_info = jsonutils.loads(bdm.connection_info) # NOTE(vish): We currently don't use the serial when disconnecting, # but added for completeness in case we ever do. if connection_info and 'serial' not in connection_info: connection_info['serial'] = volume_id try: if not self.driver.instance_exists(instance): LOG.warn(_('Detaching volume from unknown instance'), context=context, instance=instance) encryption = encryptors.get_encryption_metadata( context, self.volume_api, volume_id, connection_info) self.driver.detach_volume(connection_info, instance, mp, encryption=encryption) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to detach volume %(volume_id)s ' 'from %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) self.volume_api.roll_detaching(context, volume_id) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def detach_volume(self, context, volume_id, instance): """Detach a volume from an instance.""" bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) if CONF.volume_usage_poll_interval > 0: vol_stats = [] mp = bdm.device_name # Handle bootable volumes which will not contain /dev/ if '/dev/' in mp: mp = mp[5:] try: vol_stats = self.driver.block_stats(instance.name, mp) except NotImplementedError: pass if vol_stats: LOG.debug("Updating volume usage cache with totals", instance=instance) rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats self.conductor_api.vol_usage_update(context, volume_id, rd_req, rd_bytes, wr_req, wr_bytes, instance, update_totals=True) self._detach_volume(context, instance, bdm) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, volume_id, connector) bdm.destroy() info = dict(volume_id=volume_id) self._notify_about_instance_usage( context, instance, "volume.detach", extra_usage_info=info) self.volume_api.detach(context.elevated(), volume_id) def _init_volume_connection(self, context, new_volume_id, old_volume_id, connector, instance, bdm): new_cinfo = self.volume_api.initialize_connection(context, new_volume_id, connector) old_cinfo = jsonutils.loads(bdm['connection_info']) if old_cinfo and 'serial' not in old_cinfo: old_cinfo['serial'] = old_volume_id new_cinfo['serial'] = old_cinfo['serial'] return (old_cinfo, new_cinfo) def _swap_volume(self, context, instance, bdm, connector, old_volume_id, new_volume_id): mountpoint = bdm['device_name'] failed = False new_cinfo = None resize_to = 0 try: old_cinfo, new_cinfo = self._init_volume_connection(context, new_volume_id, old_volume_id, connector, instance, bdm) old_vol_size = self.volume_api.get(context, old_volume_id)['size'] new_vol_size = self.volume_api.get(context, new_volume_id)['size'] if new_vol_size > old_vol_size: resize_to = new_vol_size self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint, resize_to) except Exception: # pylint: disable=W0702 failed = True with excutils.save_and_reraise_exception(): if new_cinfo: msg = _LE("Failed to swap volume %(old_volume_id)s " "for %(new_volume_id)s") LOG.exception(msg, {'old_volume_id': old_volume_id, 'new_volume_id': new_volume_id}, context=context, instance=instance) else: msg = _LE("Failed to connect to volume %(volume_id)s " "with volume at %(mountpoint)s") LOG.exception(msg, {'volume_id': new_volume_id, 'mountpoint': bdm['device_name']}, context=context, instance=instance) self.volume_api.roll_detaching(context, old_volume_id) self.volume_api.unreserve_volume(context, new_volume_id) finally: conn_volume = new_volume_id if failed else old_volume_id if new_cinfo: self.volume_api.terminate_connection(context, conn_volume, connector) # If Cinder initiated the swap, it will keep # the original ID comp_ret = self.volume_api.migrate_volume_completion( context, old_volume_id, new_volume_id, error=failed) return (comp_ret, new_cinfo) @wrap_exception() @reverts_task_state @wrap_instance_fault def swap_volume(self, context, old_volume_id, new_volume_id, instance): """Swap volume for an instance.""" context = context.elevated() bdm = objects.BlockDeviceMapping.get_by_volume_id( context, old_volume_id, instance_uuid=instance.uuid) connector = self.driver.get_volume_connector(instance) comp_ret, new_cinfo = self._swap_volume(context, instance, bdm, connector, old_volume_id, new_volume_id) save_volume_id = comp_ret['save_volume_id'] mountpoint = bdm.device_name # Update bdm values = { 'connection_info': jsonutils.dumps(new_cinfo), 'delete_on_termination': False, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': save_volume_id, 'volume_size': None, 'no_device': None} bdm.update(values) bdm.save() self.volume_api.attach(context, new_volume_id, instance.uuid, mountpoint) # Remove old connection self.volume_api.detach(context.elevated(), old_volume_id) @wrap_exception() def remove_volume_connection(self, context, volume_id, instance): """Remove a volume connection using the volume api.""" # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. # NOTE(PhilDay): Can't use object_compat decorator here as # instance is not the second parameter if isinstance(instance, dict): metas = ['metadata', 'system_metadata'] instance = objects.Instance._from_db_object( context, objects.Instance(), instance, expected_attrs=metas) instance._context = context try: bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) self._detach_volume(context, instance, bdm) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, volume_id, connector) except exception.NotFound: pass @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def attach_interface(self, context, instance, network_id, port_id, requested_ip): """Use hotplug to add an network adapter to an instance.""" network_info = self.network_api.allocate_port_for_instance( context, instance, port_id, network_id, requested_ip) if len(network_info) != 1: LOG.error(_('allocate_port_for_instance returned %(ports)s ports') % dict(ports=len(network_info))) raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) image_ref = instance.get('image_ref') image_meta = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) self.driver.attach_interface(instance, image_meta, network_info[0]) return network_info[0] @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def detach_interface(self, context, instance, port_id): """Detach an network adapter from an instance.""" network_info = instance.info_cache.network_info condemned = None for vif in network_info: if vif['id'] == port_id: condemned = vif break if condemned is None: raise exception.PortNotFound(_("Port %s is not " "attached") % port_id) self.network_api.deallocate_port_for_instance(context, instance, port_id) self.driver.detach_interface(instance, condemned) def _get_compute_info(self, context, host): service = objects.Service.get_by_compute_host(context, host) try: return service.compute_node except IndexError: raise exception.NotFound(_("Host %s not found") % host) @wrap_exception() def check_instance_shared_storage(self, ctxt, instance, data): """Check if the instance files are shared :param context: security context :param data: result of driver.check_instance_shared_storage_local Returns True if instance disks located on shared storage and False otherwise. """ return self.driver.check_instance_shared_storage_remote(ctxt, data) @wrap_exception() @wrap_instance_fault def check_can_live_migrate_destination(self, ctxt, instance, block_migration, disk_over_commit): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a dict containing migration info """ src_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, instance.host)) dst_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, CONF.host)) dest_check_data = self.driver.check_can_live_migrate_destination(ctxt, instance, src_compute_info, dst_compute_info, block_migration, disk_over_commit) migrate_data = {} try: migrate_data = self.compute_rpcapi.\ check_can_live_migrate_source(ctxt, instance, dest_check_data) finally: self.driver.check_can_live_migrate_destination_cleanup(ctxt, dest_check_data) if 'migrate_data' in dest_check_data: migrate_data.update(dest_check_data['migrate_data']) return migrate_data @wrap_exception() @wrap_instance_fault def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance: dict of instance data :param dest_check_data: result of check_can_live_migrate_destination :returns: a dict containing migration info """ is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt, instance) dest_check_data['is_volume_backed'] = is_volume_backed return self.driver.check_can_live_migrate_source(ctxt, instance, dest_check_data) @object_compat @wrap_exception() @wrap_instance_fault def pre_live_migration(self, context, instance, block_migration, disk, migrate_data): """Preparations for live migration at dest host. :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration :param migrate_data: if not None, it is a dict which holds data required for live migration without shared storage. """ block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.pre.start", network_info=network_info) pre_live_migration_data = self.driver.pre_live_migration(context, instance, block_device_info, network_info, disk, migrate_data) # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, self.host) # Creating filters to hypervisors and firewalls. # An example is that nova-instance-instance-xxx, # which is written to libvirt.xml(Check "virsh nwfilter-list") # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. self.driver.ensure_filtering_rules_for_instance(instance, network_info) self._notify_about_instance_usage( context, instance, "live_migration.pre.end", network_info=network_info) return pre_live_migration_data @wrap_exception() @wrap_instance_fault def live_migration(self, context, dest, instance, block_migration, migrate_data): """Executing live migration. :param context: security context :param instance: a nova.objects.instance.Instance object :param dest: destination host :param block_migration: if true, prepare for block migration :param migrate_data: implementation specific params """ # NOTE(danms): since instance is not the first parameter, we can't # use @object_compat on this method. Since this is the only example, # we do this manually instead of complicating the decorator if not isinstance(instance, obj_base.NovaObject): expected = ['metadata', 'system_metadata', 'security_groups', 'info_cache'] instance = objects.Instance._from_db_object( context, objects.Instance(), instance, expected_attrs=expected) # Create a local copy since we'll be modifying the dictionary migrate_data = dict(migrate_data or {}) try: if block_migration: disk = self.driver.get_instance_disk_info(instance.name) else: disk = None pre_migration_data = self.compute_rpcapi.pre_live_migration( context, instance, block_migration, disk, dest, migrate_data) migrate_data['pre_live_migration_result'] = pre_migration_data except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Pre live migration failed at %s'), dest, instance=instance) self._rollback_live_migration(context, instance, dest, block_migration, migrate_data) # Executing live migration # live_migration might raises exceptions, but # nothing must be recovered in this version. self.driver.live_migration(context, instance, dest, self._post_live_migration, self._rollback_live_migration, block_migration, migrate_data) def _live_migration_cleanup_flags(self, block_migration, migrate_data): """Determine whether disks or intance path need to be cleaned up after live migration (at source on success, at destination on rollback) Block migration needs empty image at destination host before migration starts, so if any failure occurs, any empty images has to be deleted. Also Volume backed live migration w/o shared storage needs to delete newly created instance-xxx dir on the destination as a part of its rollback process :param block_migration: if true, it was a block migration :param migrate_data: implementation specific data :returns: (bool, bool) -- do_cleanup, destroy_disks """ # NOTE(angdraug): block migration wouldn't have been allowed if either # block storage or instance path were shared is_shared_block_storage = not block_migration is_shared_instance_path = not block_migration if migrate_data: is_shared_block_storage = migrate_data.get( 'is_shared_block_storage', is_shared_block_storage) is_shared_instance_path = migrate_data.get( 'is_shared_instance_path', is_shared_instance_path) # No instance booting at source host, but instance dir # must be deleted for preparing next block migration # must be deleted for preparing next live migration w/o shared storage do_cleanup = block_migration or not is_shared_instance_path destroy_disks = not is_shared_block_storage return (do_cleanup, destroy_disks) @wrap_exception() @wrap_instance_fault def _post_live_migration(self, ctxt, instance, dest, block_migration=False, migrate_data=None): """Post operations for live migration. This method is called from live_migration and mainly updating database record. :param ctxt: security context :param instance: instance dict :param dest: destination host :param block_migration: if true, prepare for block migration :param migrate_data: if not None, it is a dict which has data required for live migration without shared storage """ LOG.info(_('_post_live_migration() is started..'), instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance['uuid']) # Cleanup source host post live-migration block_device_info = self._get_instance_block_device_info( ctxt, instance, bdms=bdms) self.driver.post_live_migration(ctxt, instance, block_device_info, migrate_data) # Detaching volumes. connector = self.driver.get_volume_connector(instance) for bdm in bdms: # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. # remove the volume connection without detaching from hypervisor # because the instance is not running anymore on the current host if bdm.is_volume: self.volume_api.terminate_connection(ctxt, bdm.volume_id, connector) # Releasing vlan. # (not necessary in current implementation?) network_info = self._get_instance_nw_info(ctxt, instance) self._notify_about_instance_usage(ctxt, instance, "live_migration._post.start", network_info=network_info) # Releasing security group ingress rule. self.driver.unfilter_instance(instance, network_info) migration = {'source_compute': self.host, 'dest_compute': dest, } self.network_api.migrate_instance_start(ctxt, instance, migration) destroy_vifs = False try: self.driver.post_live_migration_at_source(ctxt, instance, network_info) except NotImplementedError as ex: LOG.debug(ex, instance=instance) # For all hypervisors other than libvirt, there is a possibility # they are unplugging networks from source node in the cleanup # method destroy_vifs = True # Define domain at destination host, without doing it, # pause/suspend/terminate do not work. self.compute_rpcapi.post_live_migration_at_destination(ctxt, instance, block_migration, dest) do_cleanup, destroy_disks = self._live_migration_cleanup_flags( block_migration, migrate_data) if do_cleanup: self.driver.cleanup(ctxt, instance, network_info, destroy_disks=destroy_disks, migrate_data=migrate_data, destroy_vifs=destroy_vifs) # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(ctxt, instance, self.host, teardown=True) self.instance_events.clear_events_for_instance(instance) self._notify_about_instance_usage(ctxt, instance, "live_migration._post.end", network_info=network_info) LOG.info(_('Migrating instance to %s finished successfully.'), dest, instance=instance) LOG.info(_("You may see the error \"libvirt: QEMU error: " "Domain not found: no domain with matching name.\" " "This error can be safely ignored."), instance=instance) if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled: if CONF.cells.enable: self.cells_rpcapi.consoleauth_delete_tokens(ctxt, instance['uuid']) else: self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt, instance['uuid']) @object_compat @wrap_exception() @wrap_instance_fault def post_live_migration_at_destination(self, context, instance, block_migration): """Post operations for live migration . :param context: security context :param instance: Instance dict :param block_migration: if true, prepare for block migration """ LOG.info(_('Post operation of migration started'), instance=instance) # NOTE(tr3buchet): setup networks on destination host # this is called a second time because # multi_host does not create the bridge in # plug_vifs self.network_api.setup_networks_on_host(context, instance, self.host) migration = {'source_compute': instance['host'], 'dest_compute': self.host, } self.network_api.migrate_instance_finish(context, instance, migration) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.start", network_info=network_info) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.post_live_migration_at_destination(context, instance, network_info, block_migration, block_device_info) # Restore instance state current_power_state = self._get_power_state(context, instance) node_name = None try: compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.NotFound: LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host instance.power_state = current_power_state instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.node = node_name instance.save(expected_task_state=task_states.MIGRATING) # NOTE(vish): this is necessary to update dhcp self.network_api.setup_networks_on_host(context, instance, self.host) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.end", network_info=network_info) @wrap_exception() @wrap_instance_fault def _rollback_live_migration(self, context, instance, dest, block_migration, migrate_data=None): """Recovers Instance/volume state from migrating -> running. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param dest: This method is called from live migration src host. This param specifies destination host. :param block_migration: if true, prepare for block migration :param migrate_data: if not none, contains implementation specific data. """ instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=[task_states.MIGRATING]) # NOTE(tr3buchet): setup networks on source host (really it's re-setup) self.network_api.setup_networks_on_host(context, instance, self.host) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance['uuid']) for bdm in bdms: if bdm.is_volume: self.compute_rpcapi.remove_volume_connection( context, instance, bdm.volume_id, dest) self._notify_about_instance_usage(context, instance, "live_migration._rollback.start") do_cleanup, destroy_disks = self._live_migration_cleanup_flags( block_migration, migrate_data) if do_cleanup: self.compute_rpcapi.rollback_live_migration_at_destination( context, instance, dest, destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage(context, instance, "live_migration._rollback.end") @object_compat @wrap_exception() @wrap_instance_fault def rollback_live_migration_at_destination(self, context, instance, destroy_disks=True, migrate_data=None): """Cleaning up image directory that is created pre_live_migration. :param context: security context :param instance: a nova.objects.instance.Instance object sent over rpc """ network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.start", network_info=network_info) # NOTE(tr3buchet): tear down networks on destination host self.network_api.setup_networks_on_host(context, instance, self.host, teardown=True) # NOTE(vish): The mapping is passed in so the driver can disconnect # from remote volumes if necessary block_device_info = self._get_instance_block_device_info(context, instance) self.driver.rollback_live_migration_at_destination( context, instance, network_info, block_device_info, destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.end", network_info=network_info) @periodic_task.periodic_task( spacing=CONF.heal_instance_info_cache_interval) def _heal_instance_info_cache(self, context): """Called periodically. On every call, try to update the info_cache's network information for another instance by calling to the network manager. This is implemented by keeping a cache of uuids of instances that live on this host. On each call, we pop one off of a list, pull the DB record, and try the call to the network API. If anything errors don't fail, as it's possible the instance has been deleted, etc. """ heal_interval = CONF.heal_instance_info_cache_interval if not heal_interval: return instance_uuids = getattr(self, '_instance_uuids_to_heal', []) instance = None LOG.debug('Starting heal instance info cache') if not instance_uuids: # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=[], use_slave=True) for inst in db_instances: # We don't want to refersh the cache for instances # which are building or deleting so don't put them # in the list. If they are building they will get # added to the list next time we build it. if (inst.vm_state == vm_states.BUILDING): LOG.debug('Skipping network cache update for instance ' 'because it is Building.', instance=inst) continue if (inst.task_state == task_states.DELETING): LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) continue if not instance: # Save the first one we find so we don't # have to get it again instance = inst else: instance_uuids.append(inst['uuid']) self._instance_uuids_to_heal = instance_uuids else: # Find the next valid instance on the list while instance_uuids: try: inst = objects.Instance.get_by_uuid( context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache'], use_slave=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue # Check the instance hasn't been migrated if inst.host != self.host: LOG.debug('Skipping network cache update for instance ' 'because it has been migrated to another ' 'host.', instance=inst) # Check the instance isn't being deleting elif inst.task_state == task_states.DELETING: LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) else: instance = inst break if instance: # We have an instance now to refresh try: # Call to network API to get instance info.. this will # force an update to the instance's info_cache self._get_instance_nw_info(context, instance, use_slave=True) LOG.debug('Updated the network info_cache for instance', instance=instance) except Exception: LOG.error(_('An error occurred while refreshing the network ' 'cache.'), instance=instance, exc_info=True) else: LOG.debug("Didn't find any instances for network info cache " "update.") @periodic_task.periodic_task def _poll_rebooting_instances(self, context): if CONF.reboot_timeout > 0: filters = {'task_state': task_states.REBOOTING, 'host': self.host} rebooting = objects.InstanceList.get_by_filters( context, filters, expected_attrs=[], use_slave=True) to_poll = [] for instance in rebooting: if timeutils.is_older_than(instance['updated_at'], CONF.reboot_timeout): to_poll.append(instance) self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll) @periodic_task.periodic_task def _poll_rescued_instances(self, context): if CONF.rescue_timeout > 0: filters = {'vm_state': vm_states.RESCUED, 'host': self.host} rescued_instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=["system_metadata"], use_slave=True) to_unrescue = [] for instance in rescued_instances: if timeutils.is_older_than(instance['launched_at'], CONF.rescue_timeout): to_unrescue.append(instance) for instance in to_unrescue: self.compute_api.unrescue(context, instance) @periodic_task.periodic_task def _poll_unconfirmed_resizes(self, context): if CONF.resize_confirm_window == 0: return migrations = objects.MigrationList.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, use_slave=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) if migrations_info["migration_count"] > 0: LOG.info(_("Found %(migration_count)d unconfirmed migrations " "older than %(confirm_window)d seconds"), migrations_info) def _set_migration_to_error(migration, reason, **kwargs): LOG.warn(_("Setting migration %(migration_id)s to error: " "%(reason)s"), {'migration_id': migration['id'], 'reason': reason}, **kwargs) migration.status = 'error' migration.save(context.elevated()) for migration in migrations: instance_uuid = migration.instance_uuid LOG.info(_("Automatically confirming migration " "%(migration_id)s for instance %(instance_uuid)s"), {'migration_id': migration.id, 'instance_uuid': instance_uuid}) expected_attrs = ['metadata', 'system_metadata'] try: instance = objects.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, use_slave=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) _set_migration_to_error(migration, reason) continue if instance['vm_state'] == vm_states.ERROR: reason = _("In ERROR state") _set_migration_to_error(migration, reason, instance=instance) continue vm_state = instance['vm_state'] task_state = instance['task_state'] if vm_state != vm_states.RESIZED or task_state is not None: reason = (_("In states %(vm_state)s/%(task_state)s, not " "RESIZED/None") % {'vm_state': vm_state, 'task_state': task_state}) _set_migration_to_error(migration, reason, instance=instance) continue try: self.compute_api.confirm_resize(context, instance, migration=migration) except Exception as e: LOG.info(_("Error auto-confirming resize: %s. " "Will retry later."), e, instance=instance) @compute_utils.periodic_task_spacing_warn("shelved_poll_interval") @periodic_task.periodic_task(spacing=CONF.shelved_poll_interval) def _poll_shelved_instances(self, context): if CONF.shelved_offload_time <= 0: return filters = {'vm_state': vm_states.SHELVED, 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], use_slave=True) to_gc = [] for instance in shelved_instances: sys_meta = instance.system_metadata shelved_at = timeutils.parse_strtime(sys_meta['shelved_at']) if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time): to_gc.append(instance) for instance in to_gc: try: instance.task_state = task_states.SHELVING_OFFLOADING instance.save() self.shelve_offload_instance(context, instance) except Exception: LOG.exception(_LE('Periodic task failed to offload instance.'), instance=instance) @periodic_task.periodic_task def _instance_usage_audit(self, context): if not CONF.instance_usage_audit: return if compute_utils.has_audit_been_run(context, self.conductor_api, self.host): return begin, end = utils.last_completed_audit_period() instances = objects.InstanceList.get_active_by_window_joined( context, begin, end, host=self.host, expected_attrs=['system_metadata', 'info_cache', 'metadata']) num_instances = len(instances) errors = 0 successes = 0 LOG.info(_("Running instance usage audit for" " host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s" " instances."), dict(host=self.host, begin_time=begin, end_time=end, number_instances=num_instances)) start_time = time.time() compute_utils.start_instance_usage_audit(context, self.conductor_api, begin, end, self.host, num_instances) for instance in instances: try: self.conductor_api.notify_usage_exists( context, instance, ignore_missing_network_data=False) successes += 1 except Exception: LOG.exception(_LE('Failed to generate usage ' 'audit for instance ' 'on host %s'), self.host, instance=instance) errors += 1 compute_utils.finish_instance_usage_audit(context, self.conductor_api, begin, end, self.host, errors, "Instance usage audit ran " "for host %s, %s instances " "in %s seconds." % ( self.host, num_instances, time.time() - start_time)) @compute_utils.periodic_task_spacing_warn("bandwidth_poll_interval") @periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval) def _poll_bandwidth_usage(self, context): if (CONF.bandwidth_poll_interval <= 0 or not self._bw_usage_supported): return prev_time, start_time = utils.last_completed_audit_period() curr_time = time.time() if (curr_time - self._last_bw_usage_poll > CONF.bandwidth_poll_interval): self._last_bw_usage_poll = curr_time LOG.info(_("Updating bandwidth usage cache")) cells_update_interval = CONF.cells.bandwidth_update_interval if (cells_update_interval > 0 and curr_time - self._last_bw_usage_cell_update > cells_update_interval): self._last_bw_usage_cell_update = curr_time update_cells = True else: update_cells = False instances = objects.InstanceList.get_by_host(context, self.host, use_slave=True) try: bw_counters = self.driver.get_all_bw_counters(instances) except NotImplementedError: # NOTE(mdragon): Not all hypervisors have bandwidth polling # implemented yet. If they don't it doesn't break anything, # they just don't get the info in the usage events. # NOTE(PhilDay): Record that its not supported so we can # skip fast on future calls rather than waste effort getting # the list of instances. LOG.warning(_("Bandwidth usage not supported by hypervisor.")) self._bw_usage_supported = False return refreshed = timeutils.utcnow() for bw_ctr in bw_counters: # Allow switching of greenthreads between queries. greenthread.sleep(0) bw_in = 0 bw_out = 0 last_ctr_in = None last_ctr_out = None # TODO(geekinutah): Once bw_usage_cache object is created # need to revisit this and slaveify. usage = self.conductor_api.bw_usage_get(context, bw_ctr['uuid'], start_time, bw_ctr['mac_address']) if usage: bw_in = usage['bw_in'] bw_out = usage['bw_out'] last_ctr_in = usage['last_ctr_in'] last_ctr_out = usage['last_ctr_out'] else: # TODO(geekinutah): Same here, pls slaveify usage = self.conductor_api.bw_usage_get( context, bw_ctr['uuid'], prev_time, bw_ctr['mac_address']) if usage: last_ctr_in = usage['last_ctr_in'] last_ctr_out = usage['last_ctr_out'] if last_ctr_in is not None: if bw_ctr['bw_in'] < last_ctr_in: # counter rollover bw_in += bw_ctr['bw_in'] else: bw_in += (bw_ctr['bw_in'] - last_ctr_in) if last_ctr_out is not None: if bw_ctr['bw_out'] < last_ctr_out: # counter rollover bw_out += bw_ctr['bw_out'] else: bw_out += (bw_ctr['bw_out'] - last_ctr_out) self.conductor_api.bw_usage_update(context, bw_ctr['uuid'], bw_ctr['mac_address'], start_time, bw_in, bw_out, bw_ctr['bw_in'], bw_ctr['bw_out'], last_refreshed=refreshed, update_cells=update_cells) def _get_host_volume_bdms(self, context): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) return compute_host_bdms def _update_volume_usage_cache(self, context, vol_usages): """Updates the volume usage cache table with a list of stats.""" for usage in vol_usages: # Allow switching of greenthreads between queries. greenthread.sleep(0) self.conductor_api.vol_usage_update(context, usage['volume'], usage['rd_req'], usage['rd_bytes'], usage['wr_req'], usage['wr_bytes'], usage['instance']) @periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval) def _poll_volume_usage(self, context, start_time=None): if CONF.volume_usage_poll_interval == 0: return if not start_time: start_time = utils.last_completed_audit_period()[1] compute_host_bdms = self._get_host_volume_bdms(context) if not compute_host_bdms: return LOG.debug("Updating volume usage cache") try: vol_usages = self.driver.get_all_volume_usage(context, compute_host_bdms) except NotImplementedError: return self._update_volume_usage_cache(context, vol_usages) @compute_utils.periodic_task_spacing_warn("sync_power_state_interval") @periodic_task.periodic_task(spacing=CONF.sync_power_state_interval, run_immediately=True) def _sync_power_states(self, context): """Align power states between the database and the hypervisor. To sync power state data we make a DB call to get the number of virtual machines known by the hypervisor and if the number matches the number of virtual machines known by the database, we proceed in a lazy loop, one database record at a time, checking if the hypervisor has the same power state as is in the database. """ db_instances = objects.InstanceList.get_by_host(context, self.host, use_slave=True) num_vm_instances = self.driver.get_num_instances() num_db_instances = len(db_instances) if num_vm_instances != num_db_instances: LOG.warn(_("Found %(num_db_instances)s in the database and " "%(num_vm_instances)s on the hypervisor."), {'num_db_instances': num_db_instances, 'num_vm_instances': num_vm_instances}) for db_instance in db_instances: # NOTE(melwitt): This must be synchronized as we query state from # two separate sources, the driver and the database. # They are set (in stop_instance) and read, in sync. @utils.synchronized(db_instance.uuid) def query_driver_power_state_and_sync(): self._query_driver_power_state_and_sync(context, db_instance) try: query_driver_power_state_and_sync() except Exception: LOG.exception(_LE("Periodic sync_power_state task had an " "error while processing an instance."), instance=db_instance) def _query_driver_power_state_and_sync(self, context, db_instance): if db_instance.task_state is not None: LOG.info(_LI("During sync_power_state the instance has a " "pending task (%(task)s). Skip."), {'task': db_instance.task_state}, instance=db_instance) return # No pending tasks. Now try to figure out the real vm_power_state. try: vm_instance = self.driver.get_info(db_instance) vm_power_state = vm_instance['state'] except exception.InstanceNotFound: vm_power_state = power_state.NOSTATE # Note(maoy): the above get_info call might take a long time, # for example, because of a broken libvirt driver. try: self._sync_instance_power_state(context, db_instance, vm_power_state, use_slave=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore. pass def _sync_instance_power_state(self, context, db_instance, vm_power_state, use_slave=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, then a stop() API will be called on the instance. """ # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. db_instance.refresh(use_slave=use_slave) db_power_state = db_instance.power_state vm_state = db_instance.vm_state if self.host != db_instance.host: # on the sending end of nova-compute _sync_power_state # may have yielded to the greenthread performing a live # migration; this in turn has changed the resident-host # for the VM; However, the instance is still active, it # is just in the process of migrating to another host. # This implies that the compute source must relinquish # control to the compute destination. LOG.info(_("During the sync_power process the " "instance has moved from " "host %(src)s to host %(dst)s") % {'src': self.host, 'dst': db_instance.host}, instance=db_instance) return elif db_instance.task_state is not None: # on the receiving end of nova-compute, it could happen # that the DB instance already report the new resident # but the actual VM has not showed up on the hypervisor # yet. In this case, let's allow the loop to continue # and run the state sync in a later round LOG.info(_("During sync_power_state the instance has a " "pending task (%(task)s). Skip."), {'task': db_instance.task_state}, instance=db_instance) return if vm_power_state != db_power_state: # power_state is always updated from hypervisor to db db_instance.power_state = vm_power_state db_instance.save() db_power_state = vm_power_state # Note(maoy): Now resolve the discrepancy between vm_state and # vm_power_state. We go through all possible vm_states. if vm_state in (vm_states.BUILDING, vm_states.RESCUED, vm_states.RESIZED, vm_states.SUSPENDED, vm_states.ERROR): # TODO(maoy): we ignore these vm_state for now. pass elif vm_state == vm_states.ACTIVE: # The only rational power state should be RUNNING if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warn(_("Instance shutdown by itself. Calling " "the stop API."), instance=db_instance) try: # Note(maoy): here we call the API instead of # brutally updating the vm_state in the database # to allow all the hooks and checks to be performed. if db_instance.shutdown_terminate: self.compute_api.delete(context, db_instance) else: self.compute_api.stop(context, db_instance) except Exception: # Note(maoy): there is no need to propagate the error # because the same power_state will be retrieved next # time and retried. # For example, there might be another task scheduled. LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.SUSPENDED: LOG.warn(_("Instance is suspended unexpectedly. Calling " "the stop API."), instance=db_instance) try: self.compute_api.stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.PAUSED: # Note(maoy): a VM may get into the paused state not only # because the user request via API calls, but also # due to (temporary) external instrumentations. # Before the virt layer can reliably report the reason, # we simply ignore the state discrepancy. In many cases, # the VM state will go back to running after the external # instrumentation is done. See bug 1097806 for details. LOG.warn(_("Instance is paused unexpectedly. Ignore."), instance=db_instance) elif vm_power_state == power_state.NOSTATE: # Occasionally, depending on the status of the hypervisor, # which could be restarting for example, an instance may # not be found. Therefore just log the condition. LOG.warn(_("Instance is unexpectedly not found. Ignore."), instance=db_instance) elif vm_state == vm_states.STOPPED: if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN, power_state.CRASHED): LOG.warn(_("Instance is not stopped. Calling " "the stop API."), instance=db_instance) try: # NOTE(russellb) Force the stop, because normally the # compute API would not allow an attempt to stop a stopped # instance. self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_state == vm_states.PAUSED: if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warn(_("Paused instance shutdown by itself. Calling " "the stop API."), instance=db_instance) try: self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_state in (vm_states.SOFT_DELETED, vm_states.DELETED): if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN): # Note(maoy): this should be taken care of periodically in # _cleanup_running_deleted_instances(). LOG.warn(_("Instance is not (soft-)deleted."), instance=db_instance) @periodic_task.periodic_task def _reclaim_queued_deletes(self, context): """Reclaim instances that are queued for deletion.""" interval = CONF.reclaim_instance_interval if interval <= 0: LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...") return # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414. # The only case that the quota might be inconsistent is # the compute node died between set instance state to SOFT_DELETED # and quota commit to DB. When compute node starts again # it will have no idea the reservation is committed or not or even # expired, since it's a rare case, so marked as todo. quotas = quotas_obj.Quotas.from_reservations(context, None) filters = {'vm_state': vm_states.SOFT_DELETED, 'task_state': None, 'host': self.host} instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, use_slave=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) LOG.info(_('Reclaiming deleted instance'), instance=instance) try: self._delete_instance(context, instance, bdms, quotas) except Exception as e: LOG.warning(_("Periodic reclaim failed to delete " "instance: %s"), unicode(e), instance=instance) @periodic_task.periodic_task def update_available_resource(self, context): """See driver.get_available_resource() Periodic process that keeps that the compute host's understanding of resource availability and usage in sync with the underlying hypervisor. :param context: security context """ new_resource_tracker_dict = {} nodenames = set(self.driver.get_available_nodes()) for nodename in nodenames: rt = self._get_resource_tracker(nodename) rt.update_available_resource(context) new_resource_tracker_dict[nodename] = rt # Delete orphan compute node not reported by driver but still in db compute_nodes_in_db = self._get_compute_nodes_in_db(context) for cn in compute_nodes_in_db: if cn.hypervisor_hostname not in nodenames: LOG.audit(_("Deleting orphan compute node %s") % cn.id) cn.destroy() self._resource_tracker_dict = new_resource_tracker_dict def _get_compute_nodes_in_db(self, context): service = objects.Service.get_by_compute_host(context, self.host) if not service: LOG.error(_("No service record for host %s"), self.host) return [] return objects.ComputeNodeList.get_by_service(context, service) @periodic_task.periodic_task( spacing=CONF.running_deleted_instance_poll_interval) def _cleanup_running_deleted_instances(self, context): """Cleanup any instances which are erroneously still running after having been deleted. Valid actions to take are: 1. noop - do nothing 2. log - log which instances are erroneously running 3. reap - shutdown and cleanup any erroneously running instances 4. shutdown - power off *and disable* any erroneously running instances The use-case for this cleanup task is: for various reasons, it may be possible for the database to show an instance as deleted but for that instance to still be running on a host machine (see bug https://bugs.launchpad.net/nova/+bug/911366). This cleanup task is a cross-hypervisor utility for finding these zombied instances and either logging the discrepancy (likely what you should do in production), or automatically reaping the instances (more appropriate for dev environments). """ action = CONF.running_deleted_instance_action if action == "noop": return # NOTE(sirp): admin contexts don't ordinarily return deleted records with utils.temporary_mutation(context, read_deleted="yes"): for instance in self._running_deleted_instances(context): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid, use_slave=True) if action == "log": LOG.warning(_("Detected instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) elif action == 'shutdown': LOG.info(_("Powering off instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) try: try: # disable starting the instance self.driver.set_bootable(instance, False) except NotImplementedError: LOG.warn(_("set_bootable is not implemented for " "the current driver")) # and power it off self.driver.power_off(instance) except Exception: msg = _("Failed to power off instance") LOG.warn(msg, instance=instance, exc_info=True) elif action == 'reap': LOG.info(_("Destroying instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) self.instance_events.clear_events_for_instance(instance) try: self._shutdown_instance(context, instance, bdms, notify=False) self._cleanup_volumes(context, instance['uuid'], bdms) except Exception as e: LOG.warning(_("Periodic cleanup failed to delete " "instance: %s"), unicode(e), instance=instance) else: raise Exception(_("Unrecognized value '%s'" " for CONF.running_deleted_" "instance_action") % action) def _running_deleted_instances(self, context): """Returns a list of instances nova thinks is deleted, but the hypervisor thinks is still running. """ timeout = CONF.running_deleted_instance_timeout filters = {'deleted': True, 'soft_deleted': False, 'host': self.host} instances = self._get_instances_on_driver(context, filters) return [i for i in instances if self._deleted_old_enough(i, timeout)] def _deleted_old_enough(self, instance, timeout): deleted_at = instance['deleted_at'] if isinstance(instance, obj_base.NovaObject) and deleted_at: deleted_at = deleted_at.replace(tzinfo=None) return (not deleted_at or timeutils.is_older_than(deleted_at, timeout)) @contextlib.contextmanager def _error_out_instance_on_exception(self, context, instance, quotas=None, instance_state=vm_states.ACTIVE): instance_uuid = instance['uuid'] try: yield except NotImplementedError as error: with excutils.save_and_reraise_exception(): if quotas: quotas.rollback() LOG.info(_("Setting instance back to %(state)s after: " "%(error)s") % {'state': instance_state, 'error': error}, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, vm_state=instance_state, task_state=None) except exception.InstanceFaultRollback as error: if quotas: quotas.rollback() LOG.info(_("Setting instance back to ACTIVE after: %s"), error, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, vm_state=vm_states.ACTIVE, task_state=None) raise error.inner_exception except Exception: LOG.exception(_LE('Setting instance vm_state to ERROR'), instance_uuid=instance_uuid) with excutils.save_and_reraise_exception(): if quotas: quotas.rollback() self._set_instance_error_state(context, instance) @aggregate_object_compat @wrap_exception() def add_aggregate_host(self, context, aggregate, host, slave_info): """Notify hypervisor of change (for hypervisor pools).""" try: self.driver.add_to_aggregate(context, aggregate, host, slave_info=slave_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'add_aggregate_host') except exception.AggregateError: with excutils.save_and_reraise_exception(): self.driver.undo_aggregate_operation( context, aggregate.delete_host, aggregate, host) @aggregate_object_compat @wrap_exception() def remove_aggregate_host(self, context, host, slave_info, aggregate): """Removes a host from a physical hypervisor pool.""" try: self.driver.remove_from_aggregate(context, aggregate, host, slave_info=slave_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'remove_aggregate_host') except (exception.AggregateError, exception.InvalidAggregateAction) as e: with excutils.save_and_reraise_exception(): self.driver.undo_aggregate_operation( context, aggregate.add_host, aggregate, host, isinstance(e, exception.AggregateError)) def _process_instance_event(self, instance, event): _event = self.instance_events.pop_instance_event(instance, event) if _event: LOG.debug('Processing event %(event)s', {'event': event.key}, instance=instance) _event.send(event) @wrap_exception() def external_instance_event(self, context, instances, events): # NOTE(danms): Some event types are handled by the manager, such # as when we're asked to update the instance's info_cache. If it's # not one of those, look for some thread(s) waiting for the event and # unblock them if so. for event in events: instance = [inst for inst in instances if inst.uuid == event.instance_uuid][0] if event.name == 'network-changed': self.network_api.get_instance_nw_info(context, instance) else: self._process_instance_event(instance, event) @compute_utils.periodic_task_spacing_warn("image_cache_manager_interval") @periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval, external_process_ok=True) def _run_image_cache_manager_pass(self, context): """Run a single pass of the image cache manager.""" if not self.driver.capabilities["has_imagecache"]: return if CONF.image_cache_manager_interval == 0: return # Determine what other nodes use this storage storage_users.register_storage_use(CONF.instances_path, CONF.host) nodes = storage_users.get_storage_users(CONF.instances_path) # Filter all_instances to only include those nodes which share this # storage path. # TODO(mikal): this should be further refactored so that the cache # cleanup code doesn't know what those instances are, just a remote # count, and then this logic should be pushed up the stack. filters = {'deleted': False, 'soft_deleted': True, 'host': nodes} filtered_instances = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) self.driver.manage_image_cache(context, filtered_instances) @periodic_task.periodic_task(spacing=CONF.instance_delete_interval) def _run_pending_deletes(self, context): """Retry any pending instance file deletes.""" if CONF.instance_delete_interval == 0: return LOG.debug('Cleaning up deleted instances') filters = {'deleted': True, 'soft_deleted': False, 'host': CONF.host, 'cleaned': False} attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=attrs) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: attempts = int(instance.system_metadata.get('clean_attempts', '0')) LOG.debug('Instance has had %(attempts)s of %(max)s ' 'cleanup attempts', {'attempts': attempts, 'max': CONF.maximum_instance_delete_attempts}, instance=instance) if attempts < CONF.maximum_instance_delete_attempts: success = self.driver.delete_instance_files(instance) instance.system_metadata['clean_attempts'] = str(attempts + 1) if success: instance.cleaned = True with utils.temporary_mutation(context, read_deleted='yes'): instance.save(context)
viggates/nova
nova/compute/manager.py
Python
apache-2.0
269,840
# coding: utf-8 """ Wavefront REST API Documentation <p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer &lt;&lt;API-TOKEN&gt;&gt;\" to your HTTP requests.</p> # noqa: E501 OpenAPI spec version: v2 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from wavefront_api_client.configuration import Configuration class ResponseContainerPagedRecentTracesSearch(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'response': 'PagedRecentTracesSearch', 'status': 'ResponseStatus' } attribute_map = { 'response': 'response', 'status': 'status' } def __init__(self, response=None, status=None, _configuration=None): # noqa: E501 """ResponseContainerPagedRecentTracesSearch - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._response = None self._status = None self.discriminator = None if response is not None: self.response = response self.status = status @property def response(self): """Gets the response of this ResponseContainerPagedRecentTracesSearch. # noqa: E501 :return: The response of this ResponseContainerPagedRecentTracesSearch. # noqa: E501 :rtype: PagedRecentTracesSearch """ return self._response @response.setter def response(self, response): """Sets the response of this ResponseContainerPagedRecentTracesSearch. :param response: The response of this ResponseContainerPagedRecentTracesSearch. # noqa: E501 :type: PagedRecentTracesSearch """ self._response = response @property def status(self): """Gets the status of this ResponseContainerPagedRecentTracesSearch. # noqa: E501 :return: The status of this ResponseContainerPagedRecentTracesSearch. # noqa: E501 :rtype: ResponseStatus """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseContainerPagedRecentTracesSearch. :param status: The status of this ResponseContainerPagedRecentTracesSearch. # noqa: E501 :type: ResponseStatus """ if self._configuration.client_side_validation and status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseContainerPagedRecentTracesSearch, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseContainerPagedRecentTracesSearch): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ResponseContainerPagedRecentTracesSearch): return True return self.to_dict() != other.to_dict()
wavefrontHQ/python-client
wavefront_api_client/models/response_container_paged_recent_traces_search.py
Python
apache-2.0
4,945
#! /usr/bin/python # -*- coding: utf-8 -*- """ - 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting), after 705 epoches' training with GPU, test accurcy of 84.0% was found. - 2. For simplified CNN layers see "Convolutional layer (Simplified)" in read the docs website. - 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !! Links ------- .. paper:https://arxiv.org/abs/1712.05877 Note ------ The optimizers between official code and this code are different. Description ----------- The images are processed as follows: .. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training. .. They are approximately whitened to make the model insensitive to dynamic range. For training, we additionally apply a series of random distortions to artificially increase the data set size: .. Randomly flip the image from left to right. .. Randomly distort the image brightness. .. Randomly distort the image contrast. Speed Up -------- Reading images from disk and distorting them can use a non-trivial amount of processing time. To prevent these operations from slowing down training, we run them inside 16 separate threads which continuously fill a TensorFlow queue. """ import multiprocessing import time import numpy as np import tensorflow as tf import tensorlayer as tl from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense) from tensorlayer.models import Model tl.logging.set_verbosity(tl.logging.DEBUG) # Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py``` # prepare cifar10 data X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) def model(input_shape, n_classes, bitW, bitA): in_net = Input(shape=input_shape, name='input') net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) net = Flatten(name='flatten')(net) net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net) net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net) net = Dense(n_classes, act=None, name='output')(net) net = Model(inputs=in_net, outputs=net, name='dorefanet') return net # training settings bitW = 8 bitA = 8 net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA) batch_size = 128 n_epoch = 50000 learning_rate = 0.0001 print_freq = 5 n_step_epoch = int(len(y_train) / batch_size) n_step = n_epoch * n_step_epoch shuffle_buffer_size = 128 optimizer = tf.optimizers.Adam(learning_rate) cost = tl.cost.cross_entropy def generator_train(): inputs = X_train targets = y_train if len(inputs) != len(targets): raise AssertionError("The length of inputs and targets should be equal") for _input, _target in zip(inputs, targets): # yield _input.encode('utf-8'), _target.encode('utf-8') yield _input, _target def generator_test(): inputs = X_test targets = y_test if len(inputs) != len(targets): raise AssertionError("The length of inputs and targets should be equal") for _input, _target in zip(inputs, targets): # yield _input.encode('utf-8'), _target.encode('utf-8') yield _input, _target def _map_fn_train(img, target): # 1. Randomly crop a [height, width] section of the image. img = tf.image.random_crop(img, [24, 24, 3]) # 2. Randomly flip the image horizontally. img = tf.image.random_flip_left_right(img) # 3. Randomly change brightness. img = tf.image.random_brightness(img, max_delta=63) # 4. Randomly change contrast. img = tf.image.random_contrast(img, lower=0.2, upper=1.8) # 5. Subtract off the mean and divide by the variance of the pixels. img = tf.image.per_image_standardization(img) target = tf.reshape(target, ()) return img, target def _map_fn_test(img, target): # 1. Crop the central [height, width] of the image. img = tf.image.resize_with_pad(img, 24, 24) # 2. Subtract off the mean and divide by the variance of the pixels. img = tf.image.per_image_standardization(img) img = tf.reshape(img, (24, 24, 3)) target = tf.reshape(target, ()) return img, target def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None): with tf.GradientTape() as tape: y_pred = network(X_batch) _loss = cost(y_pred, y_batch) grad = tape.gradient(_loss, network.trainable_weights) train_op.apply_gradients(zip(grad, network.trainable_weights)) if acc is not None: _acc = acc(y_pred, y_batch) return _loss, _acc else: return _loss, None def accuracy(_logits, y_batch): return np.mean(np.equal(np.argmax(_logits, 1), y_batch)) # dataset API and augmentation train_ds = tf.data.Dataset.from_generator( generator_train, output_types=(tf.float32, tf.int32) ) # , output_shapes=((24, 24, 3), (1))) # train_ds = train_ds.repeat(n_epoch) train_ds = train_ds.shuffle(shuffle_buffer_size) train_ds = train_ds.prefetch(buffer_size=4096) train_ds = train_ds.batch(batch_size) train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count()) # value = train_ds.make_one_shot_iterator().get_next() test_ds = tf.data.Dataset.from_generator( generator_test, output_types=(tf.float32, tf.int32) ) # , output_shapes=((24, 24, 3), (1))) # test_ds = test_ds.shuffle(shuffle_buffer_size) # test_ds = test_ds.repeat(n_epoch) test_ds = test_ds.prefetch(buffer_size=4096) test_ds = test_ds.batch(batch_size) test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count()) # value_test = test_ds.make_one_shot_iterator().get_next() for epoch in range(n_epoch): start_time = time.time() train_loss, train_acc, n_iter = 0, 0, 0 net.train() for X_batch, y_batch in train_ds: _loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy) train_loss += _loss train_acc += acc n_iter += 1 # use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) print(" train loss: {}".format(train_loss / n_iter)) print(" train acc: {}".format(train_acc / n_iter)) net.eval() val_loss, val_acc, n_val_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) # is_train=False, disable dropout val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_val_iter += 1 print(" val loss: {}".format(val_loss / n_val_iter)) print(" val acc: {}".format(val_acc / n_val_iter)) # use testing data to evaluate the model net.eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in test_ds: _logits = net(X_batch) test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" test loss: {}".format(test_loss / n_iter)) print(" test acc: {}".format(test_acc / n_iter))
zsdonghao/tensorlayer
examples/quantized_net/tutorial_quanconv_cifar10.py
Python
apache-2.0
7,696
__author__ = 'thorsteinn' def get_all_ship_fields(db): ships = db.keys() fields = [] for ship in ships: shipDB = db[ship] shipKeys = shipDB.keys() for oneKey in shipKeys: if oneKey not in fields: fields.append(oneKey) return fields
ThorsteinnAdal/webcrawls_in_singapore_shippinglane
db_format_helpers/get_all_ship_fields.py
Python
apache-2.0
302
# Numbers 数字 print(2 + 2) # 4 print(50 - 5*6) # 20 print((50 - 5*6) / 4) # 5.0 print(8/5) # 1.6 print(17 / 3) # 5.666666666666667 float print(17 // 3) # 5 取整 print(17 % 3) # 2 取模 print(5*3+2) # 17 先乘除,后加减 print(2+5*3) # 17 先乘除,后加减 print(5**2) # 5的平方 25 print(5**3) # 5的立方 125 print(2**7) # 2的7次方128 print("--华丽的分割线--") # 给变量赋值,使用“ = ”号,不需要定义变量的类型 width=50 height=10*10 print(width*height) # 5000 # n # n 没有定义 NameError: name 'n' is not defined print(4 * 3.75 - 1) tax = 12.5 / 100 price = 100.50 print(price * tax) # print(price+_); #在控制台可行,但是在本文件中不行,提示ameError: name '_' is not defined # round(_, 2) #在控制台可行,但是在本文件中不行,提示ameError: name '_' is not defined print("--华丽的分割线--") # Strings 字符串 print('spam eggs') print( 'doesn\'t') # \' 会进行转义 print("doesn't") # 也可使用双引号来输出,此时‘ 就不需要转义 print('"Yes," he said.') # 被单引号包含的双引号会被当成字符处理 print("\"Yes,\" he said.") # 被双引号包含中的双银行需要转义 print('"Isn\'t," she said.') #被单引号包含的单引号需要进行转义,不是用print函数打印时'"Isn\'t," she said.' s = 'First line.\nSecond line.' print(s) # 使用print打印\n会被转义换行 ,使用命令行是\n不会被转义 First line.\nSecond line. print("----") print('C:\some\name') # 这里 \n 会被转义 print(r'C:\some\name' ) # 声明 r' 后面的字符串不会被转义 print("""\ Usage: thingy [OPTIONS] -h Display this usage message -H hostname Hostname to connect to """) # """...""" or '''...''' 相当于html中p标签的作用,允许多行,排列格式 # 不加\ : 空一行 print(3 * 'un' + 'ium') # 字符串可以跟数字进行相乘 print('Py' 'thon') # Python ,在同一个print方法中,用多个空格隔开,最终会拼接成一个 prefix = 'Py' #prefix 'thon' #不能连接变量和字符串文字 #print(('un' * 3)* 'ium') print(prefix + 'thon') # 字符串用+ 进行拼接 text = ('Put several strings within parentheses to have them joined together.') print(text) word = 'Python' print(word[0]) # 字符串也可以当成数组来取值 print(word[5]) print(word[-1]) # 截取最后一个字符 print(word[-2]) # 截取最后第二个字符 print(word[-6]) # 截取最后第六个字符 print(word[0:2]) # 从第一个字符所在索引截取2个字符 print(word[2:5]) # 从第三个字符所在索引截取5个字符 print(word[:2] + word[2:]) # s[:i] + s[i:] = s ,不管i为某个整数 print(word[:4] + word[4:]) print(word[:70] + word[70:]) print(word[:2]) # 从第一个字符开始取2个 字符 print(word[4:]) # 从第四个字符取到最后的 字符 print(word[-2:]) # 从最后第二个字符取到最后的 字符 print("---------"); # +---+---+---+---+---+---+ # | P | y | t | h | o | n | # +---+---+---+---+---+---+ # 0 1 2 3 4 5 6 # -6 -5 -4 -3 -2 -1 print( word[4:42]) #从第四个字符取到最后的 字符 print(word[42:]) #从第42个字符取到最后的字符 空 # word[0] = 'J' # word[2:] = 'py' 不允许修改字符串 print('J' + word[1:]) #可以重新拼接新字符串 print(word[:2] + 'py') s = 'supercalifragilisticexpialidocious' print(len(s)) # 获取字符串的长度
wurainren/leaningPython
com/wurainren/base/Calculator.py
Python
apache-2.0
3,849
# coding: utf-8 """ Server API Reference for Server API (REST/Json) OpenAPI spec version: 2.0.6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class DirectorsApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def attach_director_to_category(self, category_id, director_id, **kwargs): """ Attach director to category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.attach_director_to_category(category_id, director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int category_id: Category ID to fetch (required) :param int director_id: Director ID to attach (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.attach_director_to_category_with_http_info(category_id, director_id, **kwargs) else: (data) = self.attach_director_to_category_with_http_info(category_id, director_id, **kwargs) return data def attach_director_to_category_with_http_info(self, category_id, director_id, **kwargs): """ Attach director to category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.attach_director_to_category_with_http_info(category_id, director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int category_id: Category ID to fetch (required) :param int director_id: Director ID to attach (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['category_id', 'director_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method attach_director_to_category" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'category_id' is set if ('category_id' not in params) or (params['category_id'] is None): raise ValueError("Missing the required parameter `category_id` when calling `attach_director_to_category`") # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `attach_director_to_category`") collection_formats = {} resource_path = '/categories/{category_id}/directors'.replace('{format}', 'json') path_params = {} if 'category_id' in params: path_params['category_id'] = params['category_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} if 'director_id' in params: form_params.append(('director_id', params['director_id'])) self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded') body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def attach_director_to_product(self, product_id, director_id, **kwargs): """ Attach director to product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.attach_director_to_product(product_id, director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int director_id: Director ID to attach (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.attach_director_to_product_with_http_info(product_id, director_id, **kwargs) else: (data) = self.attach_director_to_product_with_http_info(product_id, director_id, **kwargs) return data def attach_director_to_product_with_http_info(self, product_id, director_id, **kwargs): """ Attach director to product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.attach_director_to_product_with_http_info(product_id, director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int director_id: Director ID to attach (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['product_id', 'director_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method attach_director_to_product" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'product_id' is set if ('product_id' not in params) or (params['product_id'] is None): raise ValueError("Missing the required parameter `product_id` when calling `attach_director_to_product`") # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `attach_director_to_product`") collection_formats = {} resource_path = '/products/{product_id}/directors'.replace('{format}', 'json') path_params = {} if 'product_id' in params: path_params['product_id'] = params['product_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} if 'director_id' in params: form_params.append(('director_id', params['director_id'])) self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded') body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_director(self, body, **kwargs): """ Create new director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_director(body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param CreateDirectorRequest body: Directory settings (required) :return: Director If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_director_with_http_info(body, **kwargs) else: (data) = self.create_director_with_http_info(body, **kwargs) return data def create_director_with_http_info(self, body, **kwargs): """ Create new director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_director_with_http_info(body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param CreateDirectorRequest body: Directory settings (required) :return: Director If the method is called asynchronously, returns the request thread. """ all_params = ['body'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_director" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `create_director`") collection_formats = {} resource_path = '/directors'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] self.api_client.set_default_header('Content-Type', 'application/json') # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Director', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_director(self, director_id, **kwargs): """ Delete director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_director(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_director_with_http_info(director_id, **kwargs) else: (data) = self.delete_director_with_http_info(director_id, **kwargs) return data def delete_director_with_http_info(self, director_id, **kwargs): """ Delete director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_director_with_http_info(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['director_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_director" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `delete_director`") collection_formats = {} resource_path = '/directors/{director_id}'.replace('{format}', 'json') path_params = {} if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def detach_director_from_category(self, category_id, director_id, **kwargs): """ Detach director from category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.detach_director_from_category(category_id, director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int category_id: Category ID to fetch (required) :param int director_id: Director ID to detach (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.detach_director_from_category_with_http_info(category_id, director_id, **kwargs) else: (data) = self.detach_director_from_category_with_http_info(category_id, director_id, **kwargs) return data def detach_director_from_category_with_http_info(self, category_id, director_id, **kwargs): """ Detach director from category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.detach_director_from_category_with_http_info(category_id, director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int category_id: Category ID to fetch (required) :param int director_id: Director ID to detach (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['category_id', 'director_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method detach_director_from_category" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'category_id' is set if ('category_id' not in params) or (params['category_id'] is None): raise ValueError("Missing the required parameter `category_id` when calling `detach_director_from_category`") # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `detach_director_from_category`") collection_formats = {} resource_path = '/categories/{category_id}/directors/{director_id}'.replace('{format}', 'json') path_params = {} if 'category_id' in params: path_params['category_id'] = params['category_id'] if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_category_directors(self, category_id, **kwargs): """ Get directors attached to category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_category_directors(category_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int category_id: Category ID to fetch (required) :param int page: :param int per_page: :return: CategoryDirectorsListResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_category_directors_with_http_info(category_id, **kwargs) else: (data) = self.get_category_directors_with_http_info(category_id, **kwargs) return data def get_category_directors_with_http_info(self, category_id, **kwargs): """ Get directors attached to category This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_category_directors_with_http_info(category_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int category_id: Category ID to fetch (required) :param int page: :param int per_page: :return: CategoryDirectorsListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['category_id', 'page', 'per_page'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_category_directors" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'category_id' is set if ('category_id' not in params) or (params['category_id'] is None): raise ValueError("Missing the required parameter `category_id` when calling `get_category_directors`") collection_formats = {} resource_path = '/categories/{category_id}/directors'.replace('{format}', 'json') path_params = {} if 'category_id' in params: path_params['category_id'] = params['category_id'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CategoryDirectorsListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_director(self, director_id, **kwargs): """ Get Director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :return: DirectorResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_director_with_http_info(director_id, **kwargs) else: (data) = self.get_director_with_http_info(director_id, **kwargs) return data def get_director_with_http_info(self, director_id, **kwargs): """ Get Director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director_with_http_info(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :return: DirectorResponse If the method is called asynchronously, returns the request thread. """ all_params = ['director_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_director" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `get_director`") collection_formats = {} resource_path = '/directors/{director_id}'.replace('{format}', 'json') path_params = {} if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DirectorResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_director_cover_image(self, director_id, **kwargs): """ Get cover image of a director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director_cover_image(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :return: ImageResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_director_cover_image_with_http_info(director_id, **kwargs) else: (data) = self.get_director_cover_image_with_http_info(director_id, **kwargs) return data def get_director_cover_image_with_http_info(self, director_id, **kwargs): """ Get cover image of a director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director_cover_image_with_http_info(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :return: ImageResponse If the method is called asynchronously, returns the request thread. """ all_params = ['director_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_director_cover_image" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `get_director_cover_image`") collection_formats = {} resource_path = '/directors/{director_id}/cover'.replace('{format}', 'json') path_params = {} if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ImageResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_director_products(self, director_id, **kwargs): """ Get director products This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director_products(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :param int page: :param int per_page: :param str sort_by: Sort by this attribute (id by default) :param str sort_direction: Sorting direction (asc by default) :param str ip: Filter by user IP :param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId. :param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). :return: DirectorProductListResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_director_products_with_http_info(director_id, **kwargs) else: (data) = self.get_director_products_with_http_info(director_id, **kwargs) return data def get_director_products_with_http_info(self, director_id, **kwargs): """ Get director products This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director_products_with_http_info(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :param int page: :param int per_page: :param str sort_by: Sort by this attribute (id by default) :param str sort_direction: Sorting direction (asc by default) :param str ip: Filter by user IP :param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId. :param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). :return: DirectorProductListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['director_id', 'page', 'per_page', 'sort_by', 'sort_direction', 'ip', 'features', 'filters'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_director_products" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `get_director_products`") collection_formats = {} resource_path = '/directors/{director_id}/products'.replace('{format}', 'json') path_params = {} if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] if 'sort_by' in params: query_params['sort_by'] = params['sort_by'] if 'sort_direction' in params: query_params['sort_direction'] = params['sort_direction'] if 'ip' in params: query_params['ip'] = params['ip'] if 'features' in params: query_params['features'] = params['features'] if 'filters' in params: query_params['filters'] = params['filters'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DirectorProductListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_director_products_role(self, director_id, **kwargs): """ Get Products linked to Product with their role This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director_products_role(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :param int page: :param int per_page: :return: DirectorProductRoleListResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_director_products_role_with_http_info(director_id, **kwargs) else: (data) = self.get_director_products_role_with_http_info(director_id, **kwargs) return data def get_director_products_role_with_http_info(self, director_id, **kwargs): """ Get Products linked to Product with their role This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_director_products_role_with_http_info(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: Director ID to fetch (required) :param int page: :param int per_page: :return: DirectorProductRoleListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['director_id', 'page', 'per_page'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_director_products_role" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `get_director_products_role`") collection_formats = {} resource_path = '/directors/{director_id}/products-role'.replace('{format}', 'json') path_params = {} if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DirectorProductRoleListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_directors(self, **kwargs): """ Get directors list This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_directors(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page: :param int per_page: :return: DirectorListResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_directors_with_http_info(**kwargs) else: (data) = self.get_directors_with_http_info(**kwargs) return data def get_directors_with_http_info(self, **kwargs): """ Get directors list This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_directors_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int page: :param int per_page: :return: DirectorListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['page', 'per_page'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_directors" % key ) params[key] = val del params['kwargs'] collection_formats = {} resource_path = '/directors'.replace('{format}', 'json') path_params = {} query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DirectorListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_product_directors(self, product_id, **kwargs): """ Get directors of a product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_product_directors(product_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int page: :param int per_page: :param str image_type: :return: DirectorListResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_product_directors_with_http_info(product_id, **kwargs) else: (data) = self.get_product_directors_with_http_info(product_id, **kwargs) return data def get_product_directors_with_http_info(self, product_id, **kwargs): """ Get directors of a product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_product_directors_with_http_info(product_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int page: :param int per_page: :param str image_type: :return: DirectorListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['product_id', 'page', 'per_page', 'image_type'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_product_directors" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'product_id' is set if ('product_id' not in params) or (params['product_id'] is None): raise ValueError("Missing the required parameter `product_id` when calling `get_product_directors`") collection_formats = {} resource_path = '/products/{product_id}/directors'.replace('{format}', 'json') path_params = {} if 'product_id' in params: path_params['product_id'] = params['product_id'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] if 'image_type' in params: query_params['image_type'] = params['image_type'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DirectorListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_product_directors_role(self, product_id, **kwargs): """ Get Directors attached to Product with their role This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_product_directors_role(product_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int page: :param int per_page: :return: DirectorRoleListResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_product_directors_role_with_http_info(product_id, **kwargs) else: (data) = self.get_product_directors_role_with_http_info(product_id, **kwargs) return data def get_product_directors_role_with_http_info(self, product_id, **kwargs): """ Get Directors attached to Product with their role This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_product_directors_role_with_http_info(product_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int page: :param int per_page: :return: DirectorRoleListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['product_id', 'page', 'per_page'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_product_directors_role" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'product_id' is set if ('product_id' not in params) or (params['product_id'] is None): raise ValueError("Missing the required parameter `product_id` when calling `get_product_directors_role`") collection_formats = {} resource_path = '/products/{product_id}/directors-role'.replace('{format}', 'json') path_params = {} if 'product_id' in params: path_params['product_id'] = params['product_id'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='DirectorRoleListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_director(self, director_id, body, **kwargs): """ Update director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_director(director_id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: (required) :param UpdateDirectorRequest body: Directory settings (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_director_with_http_info(director_id, body, **kwargs) else: (data) = self.update_director_with_http_info(director_id, body, **kwargs) return data def update_director_with_http_info(self, director_id, body, **kwargs): """ Update director This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_director_with_http_info(director_id, body, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int director_id: (required) :param UpdateDirectorRequest body: Directory settings (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['director_id', 'body'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_director" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `update_director`") # verify the required parameter 'body' is set if ('body' not in params) or (params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `update_director`") collection_formats = {} resource_path = '/directors/{director_id}'.replace('{format}', 'json') path_params = {} if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] self.api_client.set_default_header('Content-Type', 'application/json') # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def upload_director_cover(self, director_id, **kwargs): """ Upload director cover This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.upload_director_cover(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param float director_id: Director ID to fetch (required) :param file file: :param str hash: :param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256) :return: ImageResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.upload_director_cover_with_http_info(director_id, **kwargs) else: (data) = self.upload_director_cover_with_http_info(director_id, **kwargs) return data def upload_director_cover_with_http_info(self, director_id, **kwargs): """ Upload director cover This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.upload_director_cover_with_http_info(director_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param float director_id: Director ID to fetch (required) :param file file: :param str hash: :param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256) :return: ImageResponse If the method is called asynchronously, returns the request thread. """ all_params = ['director_id', 'file', 'hash', 'hash_algorithm'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method upload_director_cover" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'director_id' is set if ('director_id' not in params) or (params['director_id'] is None): raise ValueError("Missing the required parameter `director_id` when calling `upload_director_cover`") collection_formats = {} resource_path = '/directors/{director_id}/cover'.replace('{format}', 'json') path_params = {} if 'director_id' in params: path_params['director_id'] = params['director_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} if 'file' in params: local_var_files['file'] = params['file'] self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded') if 'hash' in params: form_params.append(('hash', params['hash'])) self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded') if 'hash_algorithm' in params: form_params.append(('hash_algorithm', params['hash_algorithm'])) self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded') body_params = None # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['multipart/form-data']) # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ImageResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
kinow-io/kinow-python-sdk
kinow_client/apis/directors_api.py
Python
apache-2.0
71,299
#!/usr/bin/python """ .. module:: shellscribe Shell-Scribe run.py @author: Keith E. Miller <[email protected]> Expected issues: - cd command is shell-scribe specific so commands that use cd in a non-trivial way might break the cd command """ import cmd import os import sys import argparse as ap import datetime import json from twilio.rest import TwilioRestClient ## Set to false to get rid of debug print statements DEBUG = False ### PASTE FUNCTION DEFINITIONS HERE def bashinator_9000(filename): dic={} inc=1 title = '' author = '' date = datetime.datetime.now() title = raw_input("What is the title: ") author = raw_input("Who is the author: ") dic['welcome']= raw_input("Input a description for the lesson: ") date = datetime.datetime.now() if title =="": title = 'lesson' if author=="": author = 'N/A' dic["title"] = title dic["author"] = author with open(filename,'r') as file: for row in file: print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m' comment=raw_input('- ') tempDic = {'comment':comment,'command':row} dic.update({inc:tempDic}) inc+=1 print('\033[0m') dic['command_count'] = inc - 1 with open(title+'.json','w') as file: json.dump(dic,file) def bashinator_10000(filename): #need sleeeeeep #fname = filename.readFile() #attempting to have json file read-in with open(filename, 'r') as f: json_dict = json.load(f) print json_dict inc=1 # Welcomes them to Hell print json_dict["welcome"], "\n" for x in range(json_dict["command_count"]): x = x + 1 print '\033[91m' +"Line: ", x,'\n' print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n' print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1] outfile = os.popen(json_dict[str(x)]["command"]) output = outfile.read() return_val = outfile.close() if return_val != None: shell-scribe().send_call() print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m' raw_input("-Press Enter-\n") #not sure what to do with the rest of this code. whether or not it is even necessary #with open('test.sh','r') as file: # for row in file: # print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m' # comment=raw_input('- ') # tempDic = {'comment':comment,'command':row} # dic.update({inc:tempDic}) # inc+=1 #dic['welcome']="""This is a welcome message""" #print('\033[0m') #with open(title+'.json','w') as file: # json.dump(dic,file) class Shell_Scribe(cmd.Cmd): """ Shell_Scribe is a commandline interface that automatically saves a history of what commands were typed to a text file as well as creating a shell script for them. """ ## Return value for each command (None == 0) return_value = None ## The prompt to the user prompt = '\033[96m'+'S'+'\033[33m'+'hell-'+'\033[96m'+'S'+'\033[33m'+ \ 'cribe>'+'\033[0m' ## Set to True for Working Directory as prompt" location_prompt = False ## This is a list of commands that will not be stored by Shell-Scribe storage_blacklist = ["ls", "pwd", ""] ## Config File Name config_filename = "config.json" ## Twilio Attributes TWILIO = False ACCOUNT_SID = None AUTH_TOKEN = None message_recipient = None message_sender = None call_url = None alert_type = None ## Properties script_filename = "shell-scribe.sh" script = None def bashinator_9000(self, filename): dic={} inc=1 title = '' author = '' date = datetime.datetime.now() title = raw_input("What is the title: ") author = raw_input("Who is the author: ") dic['welcome']= raw_input("Input a description for the lesson: ") date = datetime.datetime.now() if title =="": title = 'lesson' if author=="": author = 'N/A' dic["title"] = title dic["author"] = author with open(filename,'r') as file: for row in file: print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m' comment=raw_input('- ') tempDic = {'comment':comment,'command':row} dic.update({inc:tempDic}) inc+=1 print('\033[0m') dic['command_count'] = inc - 1 with open(title+'.json','w') as file: json.dump(dic,file) def bashinator_10000(self, filename): #need sleeeeeep #fname = filename.readFile() #attempting to have json file read-in with open(filename, 'r') as f: json_dict = json.load(f) print json_dict inc=1 # Welcomes them to Hell print json_dict["welcome"], "\n" for x in range(json_dict["command_count"]): x = x + 1 print '\033[91m' +"Line: ", x,'\n' print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n' print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1] outfile = os.popen(json_dict[str(x)]["command"]) output = outfile.read() return_val = outfile.close() if return_val != None: self.send_call() print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m' raw_input("-Press Enter-\n") ## File Editing Methods def store_to_script(self, line): """ Stores the shell command to the script """ self.script.write(line + "\n") def load_config_json(self): """ Configures Shell-Scribe based on the JSON configuration file """ with open(self.config_filename, 'r') as f: json_dict = json.load(f) #print "Dict from Json:", json_dict self.TWILIO = (1 == json_dict["twilio"]["TWILIO"]) if self.TWILIO: self.ACCOUNT_SID = json_dict["twilio"]["ACCOUNT_SID"] self.AUTH_TOKEN = json_dict["twilio"]["AUTH_TOKEN"] self.message_recipient = json_dict["twilio"]["TO"] self.message_sender = json_dict["twilio"]["FROM"] if json_dict["twilio"]["ALERT_TYPE"].lower() == "call": self.alert_type = json_dict["twilio"]["ALERT_TYPE"].lower() self.call_url = json_dict["twilio"]["CALL_URL"] if json_dict["appearance"]["prompt"].lower() == 'location': self.location_prompt = True def no_config_subroutine(self): """ Method that is called when there is no config found """ gen_config = input("Generate Default Config File? (Y/n)") if gen_config == "": gen_conifg = "Y" if gen_config.lower() == 'y': self.generate_config() self.load_config_json else: "No Configuration File. Running basic mode" ## Send text via Twilio def send_text(self, line): """ Sends a text message via Twilio """ client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN) client.messages.create(to=self.message_recipient, from_=self.message_sender, body="Failed on command: " + line) def send_call(self): """ Sends said call via Twilio """ print "Calling" client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN) call = client.calls.create(to=self.message_recipient, from_=self.message_sender, url=self.call_url, method="GET", fallback_method="GET", status_callback_method="GET", record="false") print call.sid ## Explicit Shell-Scribe Commands def do_cd(self, line): """ Runs the cd equivalent """ if os.path.isdir(line): os.chdir(line) else: print "Directory ", line, " does not exist" def do_exit(self, line): """ Exits Shell-Scribe """ os.system("chmod +x %s" % self.script_filename) sys.exit() def do_quit(self, line): """ Exits Shell Scribe """ os.system("chmod +x %s" % self.script_filename) sys.exit() ## Misc. Functions def command_not_blank(self, line): """ Checks to make sure the command is not all space characters """ print "line:",line for char in line: if char != " ": return True return False ## CMD Overloads def do_EOF(self, line): """ Method that is called at the end of a batch job. """ return True def precmd(self, line): """ Method that is run just before the shell command is run """ return line def emptyline(self): """ Controls what happens if the user enters an empty line. This is addded to because without overloading this method it defaults to rerunning the command which is not what we are looking for. """ return "" def postcmd(self, stop, line): """ Method that is called after each of command is run """ if self.location_prompt: self.prompt = os.getcwd() + " >" if self.return_value == None: if (line not in self.storage_blacklist) and self.command_not_blank(line): self.store_to_script(line) print "Stored!" def default(self, line): """ This is the default method that is called if the shell command is not a specific shell command (a do_ method_) """ cmd_file = os.popen(line) output = cmd_file.read() self.return_value = cmd_file.close() if self.return_value != None: if self.alert_type == 'text': self.send_text(line) if self.alert_type == 'call': self.send_call() if self.command_not_blank(line): print output def preloop(self): """ Method that is called before the CMD loop begins """ if self.location_prompt: self.prompt = os.getcwd() + " >" if os.path.isfile(self.script_filename): pass self.script = open(self.script_filename, 'a') if __name__ == '__main__': parser = ap.ArgumentParser(description="Documents Shell-Commands") parser.add_argument('--location-prompt', action='store_true') parser.add_argument('-config', help="The name of the configuration JSON file") parser.add_argument('-create-lesson', help="The name of the script that we are building \ a lesson for") parser.add_argument('-run-lesson', help="The name of the lesson (JSON file) that we are \ running in shell-scribe") args = parser.parse_args() ss = Shell_Scribe() ss.location_prompt = args.location_prompt if args.config is not None: if os.path.isfile(args.config): print "Using configuration from file ", args.config ss.config_filename = args.config ss.load_config_json() else: print "Config does not exist" self.no_config_subroutine() elif os.path.isfile("config.json"): print "Found config.json" ss.load_config_json() else: ss.no_config_subroutine() if DEBUG: print args if args.create_lesson != None: ss.bashinator_9000(args.create_lesson) print "RUNNING CREATE LESSON BLOCK" elif args.run_lesson != None: # Run Lesson Function ss.bashinator_10000(args.run_lesson) else: ss.cmdloop()
keithemiller/shell-scribe
shell-scribe.py
Python
apache-2.0
12,528
#!/usr/bin/env python import os import sys import etcd import subprocess import signal import time if len(sys.argv) < 2: print("Please provide a server argument") sys.exit(1) def siginthandler(signum, stackframe): sys.exit(-1) signal.signal(signal.SIGINT, siginthandler) logpath="/log" if len(sys.argv) > 2: logpath=sys.argv[2] while True: try: idx = 0 time.sleep(1) p = 2379 print("Connect to {}:{}".format(sys.argv[1], p)) keyval = etcd.Client(host=sys.argv[1], port=p) while keyval: res = keyval.watch(logpath, index=idx, recursive=True) for e in res.leaves: if e.key == logpath: idx = 0 break print(e.value) idx = e.createdIndex+1 except Exception as e: print(e)
rasros/CloudTranscoderLTH2
readlog.py
Python
apache-2.0
723
from mido import MidiFile from time import sleep import pibrella """ fade test pibrella.light.red.fade(0,100,10) sleep(11) pibrella.light.red.fade(100,0,10) sleep(11) """ """ start pibrella.buzzer.note(-9) sleep(.9) pibrella.buzzer.off() sleep(0.1) pibrella.buzzer.note(-9) sleep(0.9) pibrella.buzzer.off() sleep(0.1) pibrella.buzzer.note(-9) sleep(0.9) pibrella.buzzer.off() sleep(0.1) pibrella.buzzer.note(3) sleep(0.9) pibrella.buzzer.off() """ """ fail pibrella.buzzer.note(0) sleep(1.25) pibrella.buzzer.note(-7) sleep(2) pibrellay.buzzer.off() """ """ Mike notes for success likely bond theme and need a calibration mode push button yellow goes on then as turn the light can change untl the light changes press red button again to go back to operational state """ """ it knows it is a comment """ mid = MidiFile('bond.mid') for i, track in enumerate(mid.tracks): print('Track ') print(track.name) if track.name == '': for message in track: if message.type == 'note_on': # print('Turn on ') note = message.note - 69 print(note) pibrella.buzzer.note(note) duration = 0.0 + message.time elif message.type == 'note_off': print(duration) duration = message.time - duration if duration > 0: sleep(duration/1000.0) pibrella.buzzer.off() pibrella.buzzer.off()
brata-hsdc/brata.station
bin/pibrellaMidi.py
Python
apache-2.0
1,422
from model.contact import Contact import random def test_delete_some_contact(app, db, check_ui): if len(db.get_contact_list()) == 0: app.contact.add(Contact(firstname="test")) old_contacts = db.get_contact_list() contact = random.choice(old_contacts) app.contact.delete_contact_by_id(contact.id) assert len(old_contacts) - 1 == app.contact.count() new_contacts = db.get_contact_list() old_contacts.remove(contact) assert old_contacts == new_contacts if check_ui: assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
AndreyBalabanov/python_training
test/test_del_contact.py
Python
apache-2.0
633
from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle.fluid as fluid from utility import get_gpu_num class NpairsLoss(): def __init__(self, train_batch_size = 160, samples_each_class=2, reg_lambda=0.01): self.samples_each_class = samples_each_class assert(self.samples_each_class == 2) self.train_batch_size = train_batch_size num_gpus = get_gpu_num() assert(train_batch_size % num_gpus == 0) self.cal_loss_batch_size = train_batch_size // num_gpus assert(self.cal_loss_batch_size % samples_each_class == 0) self.reg_lambda = reg_lambda def loss(self, input, label=None): reg_lambda = self.reg_lambda samples_each_class = self.samples_each_class batch_size = self.cal_loss_batch_size num_class = batch_size // samples_each_class fea_dim = input.shape[1] input = fluid.layers.reshape(input, shape = [-1, fea_dim]) feature = fluid.layers.reshape(input, shape = [-1, samples_each_class, fea_dim]) label = fluid.layers.reshape(label, shape = [-1, samples_each_class]) label = fluid.layers.cast(label, dtype='float32') if samples_each_class == 2: anchor_fea, positive_fea = fluid.layers.split(feature, num_or_sections = 2, dim = 1) anchor_lab, positive_lab = fluid.layers.split(label, num_or_sections = 2, dim = 1) else: anchor_fea, positive_fea = fluid.layers.split(feature, num_or_sections = [1, samples_each_class-1], dim = 1) anchor_lab, positive_lab = fluid.layers.split(label, num_or_sections = [1, samples_each_class-1], dim = 1) anchor_fea = fluid.layers.reshape(anchor_fea, shape = [-1, fea_dim]) positive_fea = fluid.layers.reshape(positive_fea, shape = [-1, fea_dim]) positive_fea_trans = fluid.layers.transpose(positive_fea, perm = [1, 0]) similarity_matrix = fluid.layers.mul(anchor_fea, positive_fea_trans) anchor_lab = fluid.layers.expand(x=anchor_lab, expand_times=[1, batch_size-num_class]) positive_lab_tran = fluid.layers.transpose(positive_lab, perm = [1, 0]) positive_lab_tran = fluid.layers.expand(x=positive_lab_tran, expand_times=[num_class, 1]) label_remapped = fluid.layers.equal(anchor_lab, positive_lab_tran) label_remapped = fluid.layers.cast(label_remapped, dtype='float32') / (samples_each_class-1) label_remapped.stop_gradient = True out = fluid.layers.softmax(input=similarity_matrix, use_cudnn=False) xentloss = fluid.layers.cross_entropy(input=out, label=label_remapped, soft_label=True) xentloss = fluid.layers.mean(x=xentloss) reg = fluid.layers.reduce_mean(fluid.layers.reduce_sum(fluid.layers.square(input), dim=1)) l2loss = 0.5 * reg_lambda * reg return xentloss + l2loss
kuke/models
fluid/PaddleCV/metric_learning/losses/npairsloss.py
Python
apache-2.0
2,981
"""Support for INSTEON Modems (PLM and Hub).""" import asyncio from contextlib import suppress import logging from pyinsteon import async_close, async_connect, devices from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import CONF_PLATFORM, EVENT_HOMEASSISTANT_STOP from homeassistant.exceptions import ConfigEntryNotReady from . import api from .const import ( CONF_CAT, CONF_DIM_STEPS, CONF_HOUSECODE, CONF_OVERRIDE, CONF_SUBCAT, CONF_UNITCODE, CONF_X10, DOMAIN, INSTEON_PLATFORMS, ON_OFF_EVENTS, ) from .schemas import convert_yaml_to_config_flow from .utils import ( add_on_off_event_device, async_register_services, get_device_platforms, register_new_device_callback, ) _LOGGER = logging.getLogger(__name__) OPTIONS = "options" async def async_get_device_config(hass, config_entry): """Initiate the connection and services.""" # Make a copy of addresses due to edge case where the list of devices could change during status update # Cannot be done concurrently due to issues with the underlying protocol. for address in list(devices): if devices[address].is_battery: continue with suppress(AttributeError): await devices[address].async_status() await devices.async_load(id_devices=1) for addr in devices: device = devices[addr] flags = True for name in device.operating_flags: if not device.operating_flags[name].is_loaded: flags = False break if flags: for name in device.properties: if not device.properties[name].is_loaded: flags = False break # Cannot be done concurrently due to issues with the underlying protocol. if not device.aldb.is_loaded or not flags: await device.async_read_config() await devices.async_save(workdir=hass.config.config_dir) async def close_insteon_connection(*args): """Close the Insteon connection.""" await async_close() async def async_setup(hass, config): """Set up the Insteon platform.""" if DOMAIN not in config: return True conf = config[DOMAIN] data, options = convert_yaml_to_config_flow(conf) if options: hass.data[DOMAIN] = {} hass.data[DOMAIN][OPTIONS] = options # Create a config entry with the connection data hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=data ) ) return True async def async_setup_entry(hass, entry): """Set up an Insteon entry.""" if not devices.modem: try: await async_connect(**entry.data) except ConnectionError as exception: _LOGGER.error("Could not connect to Insteon modem") raise ConfigEntryNotReady from exception entry.async_on_unload( hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_insteon_connection) ) await devices.async_load( workdir=hass.config.config_dir, id_devices=0, load_modem_aldb=0 ) # If options existed in YAML and have not already been saved to the config entry # add them now if ( not entry.options and entry.source == SOURCE_IMPORT and hass.data.get(DOMAIN) and hass.data[DOMAIN].get(OPTIONS) ): hass.config_entries.async_update_entry( entry=entry, options=hass.data[DOMAIN][OPTIONS], ) for device_override in entry.options.get(CONF_OVERRIDE, []): # Override the device default capabilities for a specific address address = device_override.get("address") if not devices.get(address): cat = device_override[CONF_CAT] subcat = device_override[CONF_SUBCAT] devices.set_id(address, cat, subcat, 0) for device in entry.options.get(CONF_X10, []): housecode = device.get(CONF_HOUSECODE) unitcode = device.get(CONF_UNITCODE) x10_type = "on_off" steps = device.get(CONF_DIM_STEPS, 22) if device.get(CONF_PLATFORM) == "light": x10_type = "dimmable" elif device.get(CONF_PLATFORM) == "binary_sensor": x10_type = "sensor" _LOGGER.debug( "Adding X10 device to Insteon: %s %d %s", housecode, unitcode, x10_type ) device = devices.add_x10_device(housecode, unitcode, x10_type, steps) for platform in INSTEON_PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, platform) ) for address in devices: device = devices[address] platforms = get_device_platforms(device) if ON_OFF_EVENTS in platforms: add_on_off_event_device(hass, device) _LOGGER.debug("Insteon device count: %s", len(devices)) register_new_device_callback(hass) async_register_services(hass) device_registry = await hass.helpers.device_registry.async_get_registry() device_registry.async_get_or_create( config_entry_id=entry.entry_id, identifiers={(DOMAIN, str(devices.modem.address))}, manufacturer="Smart Home", name=f"{devices.modem.description} {devices.modem.address}", model=f"{devices.modem.model} ({devices.modem.cat!r}, 0x{devices.modem.subcat:02x})", sw_version=f"{devices.modem.firmware:02x} Engine Version: {devices.modem.engine_version}", ) api.async_load_api(hass) asyncio.create_task(async_get_device_config(hass, entry)) return True
home-assistant/home-assistant
homeassistant/components/insteon/__init__.py
Python
apache-2.0
5,664
"""Config flow for ReCollect Waste integration.""" from __future__ import annotations from typing import Any from aiorecollect.client import Client from aiorecollect.errors import RecollectError import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_FRIENDLY_NAME from homeassistant.core import callback from homeassistant.data_entry_flow import FlowResult from homeassistant.helpers import aiohttp_client from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER DATA_SCHEMA = vol.Schema( {vol.Required(CONF_PLACE_ID): str, vol.Required(CONF_SERVICE_ID): str} ) class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Handle a config flow for ReCollect Waste.""" VERSION = 1 @staticmethod @callback def async_get_options_flow( config_entry: config_entries.ConfigEntry, ) -> config_entries.OptionsFlow: """Define the config flow to handle options.""" return RecollectWasteOptionsFlowHandler(config_entry) async def async_step_import( self, import_config: dict[str, Any] | None = None ) -> FlowResult: """Handle configuration via YAML import.""" return await self.async_step_user(import_config) async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle configuration via the UI.""" if user_input is None: return self.async_show_form( step_id="user", data_schema=DATA_SCHEMA, errors={} ) unique_id = f"{user_input[CONF_PLACE_ID]}, {user_input[CONF_SERVICE_ID]}" await self.async_set_unique_id(unique_id) self._abort_if_unique_id_configured() session = aiohttp_client.async_get_clientsession(self.hass) client = Client( user_input[CONF_PLACE_ID], user_input[CONF_SERVICE_ID], session=session ) try: await client.async_get_next_pickup_event() except RecollectError as err: LOGGER.error("Error during setup of integration: %s", err) return self.async_show_form( step_id="user", data_schema=DATA_SCHEMA, errors={"base": "invalid_place_or_service_id"}, ) return self.async_create_entry( title=unique_id, data={ CONF_PLACE_ID: user_input[CONF_PLACE_ID], CONF_SERVICE_ID: user_input[CONF_SERVICE_ID], }, ) class RecollectWasteOptionsFlowHandler(config_entries.OptionsFlow): """Handle a Recollect Waste options flow.""" def __init__(self, entry: config_entries.ConfigEntry) -> None: """Initialize.""" self._entry = entry async def async_step_init( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) return self.async_show_form( step_id="init", data_schema=vol.Schema( { vol.Optional( CONF_FRIENDLY_NAME, default=self._entry.options.get(CONF_FRIENDLY_NAME), ): bool } ), )
sander76/home-assistant
homeassistant/components/recollect_waste/config_flow.py
Python
apache-2.0
3,365
import robot_util def sendSettings(ser, args): if args.right_wheel_forward_speed is not None: robot_util.sendSerialCommand(ser, "rwfs " + str(args.right_wheel_forward_speed)) if args.right_wheel_backward_speed is not None: robot_util.sendSerialCommand(ser, "rwbs " + str(args.right_wheel_backward_speed)) if args.left_wheel_forward_speed is not None: robot_util.sendSerialCommand(ser, "lwfs " + str(args.left_wheel_forward_speed)) if args.left_wheel_backward_speed is not None: robot_util.sendSerialCommand(ser, "lwbs " + str(args.left_wheel_backward_speed)) if args.straight_delay is not None: robot_util.sendSerialCommand(ser, "straight-distance " + str(int(args.straight_delay * 255))) if args.turn_delay is not None: robot_util.sendSerialCommand(ser, "turn-distance " + str(int(args.turn_delay * 255))) if args.led_max_brightness is not None: robot_util.sendSerialCommand(ser, "led-max-brightness " + str(args.led_max_brightness))
runmyrobot/runmyrobot
telly.py
Python
apache-2.0
1,037
""" Cisco_IOS_XR_infra_objmgr_cfg This module contains a collection of YANG definitions for Cisco IOS\-XR infra\-objmgr package configuration. This module contains definitions for the following management objects\: object\-group\: Object\-group configuration Copyright (c) 2013\-2016 by Cisco Systems, Inc. All rights reserved. """ import re import collections from enum import Enum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk.errors import YPYError, YPYModelError class EndPortEnum(Enum): """ EndPortEnum End port .. data:: echo = 7 Echo (7) .. data:: discard = 9 Discard (9) .. data:: daytime = 13 Daytime (13) .. data:: chargen = 19 Character generator (19) .. data:: ftp_data = 20 FTP data connections (used infrequently, 20) .. data:: ftp = 21 File Transfer Protocol (21) .. data:: ssh = 22 Secure Shell (22) .. data:: telnet = 23 Telnet (23) .. data:: smtp = 25 Simple Mail Transport Protocol (25) .. data:: time = 37 Time (37) .. data:: nicname = 43 Nicname (43) .. data:: tacacs = 49 TAC Access Control System (49) .. data:: domain = 53 Domain Name Service (53) .. data:: gopher = 70 Gopher (70) .. data:: finger = 79 Finger (79) .. data:: www = 80 World Wide Web (HTTP, 80) .. data:: host_name = 101 NIC hostname server (101) .. data:: pop2 = 109 Post Office Protocol v2 (109) .. data:: pop3 = 110 Post Office Protocol v3 (110) .. data:: sun_rpc = 111 Sun Remote Procedure Call (111) .. data:: ident = 113 Ident Protocol (113) .. data:: nntp = 119 Network News Transport Protocol (119) .. data:: bgp = 179 Border Gateway Protocol (179) .. data:: irc = 194 Internet Relay Chat (194) .. data:: pim_auto_rp = 496 PIM Auto-RP (496) .. data:: exec_ = 512 Exec (rsh, 512) .. data:: login = 513 Login (rlogin, 513) .. data:: cmd = 514 Remote commands (rcmd, 514) .. data:: lpd = 515 Printer service (515) .. data:: uucp = 540 Unix-to-Unix Copy Program (540) .. data:: klogin = 543 Kerberos login (543) .. data:: kshell = 544 Kerberos shell (544) .. data:: talk = 517 Talk (517) .. data:: ldp = 646 LDP session connection attempts (MPLS, 646) """ echo = 7 discard = 9 daytime = 13 chargen = 19 ftp_data = 20 ftp = 21 ssh = 22 telnet = 23 smtp = 25 time = 37 nicname = 43 tacacs = 49 domain = 53 gopher = 70 finger = 79 www = 80 host_name = 101 pop2 = 109 pop3 = 110 sun_rpc = 111 ident = 113 nntp = 119 bgp = 179 irc = 194 pim_auto_rp = 496 exec_ = 512 login = 513 cmd = 514 lpd = 515 uucp = 540 klogin = 543 kshell = 544 talk = 517 ldp = 646 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['EndPortEnum'] class PortEnum(Enum): """ PortEnum Port .. data:: echo = 7 Echo (7) .. data:: discard = 9 Discard (9) .. data:: daytime = 13 Daytime (13) .. data:: chargen = 19 Character generator (19) .. data:: ftp_data = 20 FTP data connections (used infrequently, 20) .. data:: ftp = 21 File Transfer Protocol (21) .. data:: ssh = 22 Secure Shell (22) .. data:: telnet = 23 Telnet (23) .. data:: smtp = 25 Simple Mail Transport Protocol (25) .. data:: time = 37 Time (37) .. data:: nicname = 43 Nicname (43) .. data:: tacacs = 49 TAC Access Control System (49) .. data:: domain = 53 Domain Name Service (53) .. data:: gopher = 70 Gopher (70) .. data:: finger = 79 Finger (79) .. data:: www = 80 World Wide Web (HTTP, 80) .. data:: host_name = 101 NIC hostname server (101) .. data:: pop2 = 109 Post Office Protocol v2 (109) .. data:: pop3 = 110 Post Office Protocol v3 (110) .. data:: sun_rpc = 111 Sun Remote Procedure Call (111) .. data:: ident = 113 Ident Protocol (113) .. data:: nntp = 119 Network News Transport Protocol (119) .. data:: bgp = 179 Border Gateway Protocol (179) .. data:: irc = 194 Internet Relay Chat (194) .. data:: pim_auto_rp = 496 PIM Auto-RP (496) .. data:: exec_ = 512 Exec (rsh, 512) .. data:: login = 513 Login (rlogin, 513) .. data:: cmd = 514 Remote commands (rcmd, 514) .. data:: lpd = 515 Printer service (515) .. data:: uucp = 540 Unix-to-Unix Copy Program (540) .. data:: klogin = 543 Kerberos login (543) .. data:: kshell = 544 Kerberos shell (544) .. data:: talk = 517 Talk (517) .. data:: ldp = 646 LDP session connection attempts (MPLS, 646) """ echo = 7 discard = 9 daytime = 13 chargen = 19 ftp_data = 20 ftp = 21 ssh = 22 telnet = 23 smtp = 25 time = 37 nicname = 43 tacacs = 49 domain = 53 gopher = 70 finger = 79 www = 80 host_name = 101 pop2 = 109 pop3 = 110 sun_rpc = 111 ident = 113 nntp = 119 bgp = 179 irc = 194 pim_auto_rp = 496 exec_ = 512 login = 513 cmd = 514 lpd = 515 uucp = 540 klogin = 543 kshell = 544 talk = 517 ldp = 646 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['PortEnum'] class PortOperatorEnum(Enum): """ PortOperatorEnum Port operator .. data:: equal = 0 Match packets on ports equal to entered port number .. data:: not_equal = 1 Match packets on ports not equal to entered port number .. data:: greater_than = 2 Match packets on ports greater than entered port number .. data:: less_than = 3 Match packets on ports less than entered port number """ equal = 0 not_equal = 1 greater_than = 2 less_than = 3 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['PortOperatorEnum'] class StartPortEnum(Enum): """ StartPortEnum Start port .. data:: echo = 7 Echo (7) .. data:: discard = 9 Discard (9) .. data:: daytime = 13 Daytime (13) .. data:: chargen = 19 Character generator (19) .. data:: ftp_data = 20 FTP data connections (used infrequently, 20) .. data:: ftp = 21 File Transfer Protocol (21) .. data:: ssh = 22 Secure Shell (22) .. data:: telnet = 23 Telnet (23) .. data:: smtp = 25 Simple Mail Transport Protocol (25) .. data:: time = 37 Time (37) .. data:: nicname = 43 Nicname (43) .. data:: tacacs = 49 TAC Access Control System (49) .. data:: domain = 53 Domain Name Service (53) .. data:: gopher = 70 Gopher (70) .. data:: finger = 79 Finger (79) .. data:: www = 80 World Wide Web (HTTP, 80) .. data:: host_name = 101 NIC hostname server (101) .. data:: pop2 = 109 Post Office Protocol v2 (109) .. data:: pop3 = 110 Post Office Protocol v3 (110) .. data:: sun_rpc = 111 Sun Remote Procedure Call (111) .. data:: ident = 113 Ident Protocol (113) .. data:: nntp = 119 Network News Transport Protocol (119) .. data:: bgp = 179 Border Gateway Protocol (179) .. data:: irc = 194 Internet Relay Chat (194) .. data:: pim_auto_rp = 496 PIM Auto-RP (496) .. data:: exec_ = 512 Exec (rsh, 512) .. data:: login = 513 Login (rlogin, 513) .. data:: cmd = 514 Remote commands (rcmd, 514) .. data:: lpd = 515 Printer service (515) .. data:: uucp = 540 Unix-to-Unix Copy Program (540) .. data:: klogin = 543 Kerberos login (543) .. data:: kshell = 544 Kerberos shell (544) .. data:: talk = 517 Talk (517) .. data:: ldp = 646 LDP session connection attempts (MPLS, 646) """ echo = 7 discard = 9 daytime = 13 chargen = 19 ftp_data = 20 ftp = 21 ssh = 22 telnet = 23 smtp = 25 time = 37 nicname = 43 tacacs = 49 domain = 53 gopher = 70 finger = 79 www = 80 host_name = 101 pop2 = 109 pop3 = 110 sun_rpc = 111 ident = 113 nntp = 119 bgp = 179 irc = 194 pim_auto_rp = 496 exec_ = 512 login = 513 cmd = 514 lpd = 515 uucp = 540 klogin = 543 kshell = 544 talk = 517 ldp = 646 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['StartPortEnum'] class ObjectGroup(object): """ Object\-group configuration .. attribute:: network Network object group **type**\: :py:class:`Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network>` .. attribute:: port Port object group **type**\: :py:class:`Port <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.network = ObjectGroup.Network() self.network.parent = self self.port = ObjectGroup.Port() self.port.parent = self class Port(object): """ Port object group .. attribute:: udf_objects Table of port objects groups **type**\: :py:class:`UdfObjects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.udf_objects = ObjectGroup.Port.UdfObjects() self.udf_objects.parent = self class UdfObjects(object): """ Table of port objects groups .. attribute:: udf_object Port object group **type**\: list of :py:class:`UdfObject <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.udf_object = YList() self.udf_object.parent = self self.udf_object.name = 'udf_object' class UdfObject(object): """ Port object group .. attribute:: object_name <key> Port object group name \- maximum 64 characters **type**\: str **length:** 1..64 .. attribute:: description Up to 100 characters describing this object **type**\: str **length:** 1..100 .. attribute:: nested_groups Table of nested port object groups **type**\: :py:class:`NestedGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups>` .. attribute:: operators Table of port operators **type**\: :py:class:`Operators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.Operators>` .. attribute:: port_ranges Table of port range addresses **type**\: :py:class:`PortRanges <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.PortRanges>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.object_name = None self.description = None self.nested_groups = ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups() self.nested_groups.parent = self self.operators = ObjectGroup.Port.UdfObjects.UdfObject.Operators() self.operators.parent = self self.port_ranges = ObjectGroup.Port.UdfObjects.UdfObject.PortRanges() self.port_ranges.parent = self class Operators(object): """ Table of port operators .. attribute:: operator op class **type**\: list of :py:class:`Operator <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.Operators.Operator>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.operator = YList() self.operator.parent = self self.operator.name = 'operator' class Operator(object): """ op class .. attribute:: operator_type <key> operation for ports **type**\: :py:class:`PortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.PortOperatorEnum>` .. attribute:: port <key> Port number **type**\: one of the below types: **type**\: :py:class:`PortEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.PortEnum>` ---- **type**\: int **range:** 0..65535 ---- """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.operator_type = None self.port = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.operator_type is None: raise YPYModelError('Key property operator_type is None') if self.port is None: raise YPYModelError('Key property port is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:operator[Cisco-IOS-XR-infra-objmgr-cfg:operator-type = ' + str(self.operator_type) + '][Cisco-IOS-XR-infra-objmgr-cfg:port = ' + str(self.port) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.operator_type is not None: return True if self.port is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.Operators.Operator']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:operators' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.operator is not None: for child_ref in self.operator: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.Operators']['meta_info'] class NestedGroups(object): """ Table of nested port object groups .. attribute:: nested_group nested object group **type**\: list of :py:class:`NestedGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups.NestedGroup>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.nested_group = YList() self.nested_group.parent = self self.nested_group.name = 'nested_group' class NestedGroup(object): """ nested object group .. attribute:: nested_group_name <key> Name of a nested object group **type**\: str **length:** 1..64 """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.nested_group_name = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.nested_group_name is None: raise YPYModelError('Key property nested_group_name is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-group[Cisco-IOS-XR-infra-objmgr-cfg:nested-group-name = ' + str(self.nested_group_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.nested_group_name is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups.NestedGroup']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-groups' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.nested_group is not None: for child_ref in self.nested_group: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups']['meta_info'] class PortRanges(object): """ Table of port range addresses .. attribute:: port_range Match only packets on a given port range **type**\: list of :py:class:`PortRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.PortRanges.PortRange>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.port_range = YList() self.port_range.parent = self self.port_range.name = 'port_range' class PortRange(object): """ Match only packets on a given port range .. attribute:: start_port <key> Port number **type**\: one of the below types: **type**\: :py:class:`StartPortEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.StartPortEnum>` ---- **type**\: int **range:** 0..65535 ---- .. attribute:: end_port <key> Port number **type**\: one of the below types: **type**\: :py:class:`EndPortEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.EndPortEnum>` ---- **type**\: int **range:** 0..65535 ---- """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.start_port = None self.end_port = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.start_port is None: raise YPYModelError('Key property start_port is None') if self.end_port is None: raise YPYModelError('Key property end_port is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:port-range[Cisco-IOS-XR-infra-objmgr-cfg:start-port = ' + str(self.start_port) + '][Cisco-IOS-XR-infra-objmgr-cfg:end-port = ' + str(self.end_port) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.start_port is not None: return True if self.end_port is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.PortRanges.PortRange']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:port-ranges' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.port_range is not None: for child_ref in self.port_range: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.PortRanges']['meta_info'] @property def _common_path(self): if self.object_name is None: raise YPYModelError('Key property object_name is None') return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:port/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects/Cisco-IOS-XR-infra-objmgr-cfg:udf-object[Cisco-IOS-XR-infra-objmgr-cfg:object-name = ' + str(self.object_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.object_name is not None: return True if self.description is not None: return True if self.nested_groups is not None and self.nested_groups._has_data(): return True if self.operators is not None and self.operators._has_data(): return True if self.port_ranges is not None and self.port_ranges._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:port/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.udf_object is not None: for child_ref in self.udf_object: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port.UdfObjects']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:port' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.udf_objects is not None and self.udf_objects._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Port']['meta_info'] class Network(object): """ Network object group .. attribute:: ipv4 IPv4 object group **type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4>` .. attribute:: ipv6 IPv6 object group **type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.ipv4 = ObjectGroup.Network.Ipv4() self.ipv4.parent = self self.ipv6 = ObjectGroup.Network.Ipv6() self.ipv6.parent = self class Ipv6(object): """ IPv6 object group .. attribute:: udf_objects Table of ipv6 object groups **type**\: :py:class:`UdfObjects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.udf_objects = ObjectGroup.Network.Ipv6.UdfObjects() self.udf_objects.parent = self class UdfObjects(object): """ Table of ipv6 object groups .. attribute:: udf_object IPv6 object group **type**\: list of :py:class:`UdfObject <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.udf_object = YList() self.udf_object.parent = self self.udf_object.name = 'udf_object' class UdfObject(object): """ IPv6 object group .. attribute:: object_name <key> IPv6 object group name \- maximum 64 characters **type**\: str **length:** 1..64 .. attribute:: address_ranges Table of ipv6 address ranges **type**\: :py:class:`AddressRanges <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges>` .. attribute:: addresses Table of ipv6 addresses **type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses>` .. attribute:: description Up to 100 characters describing this object **type**\: str **length:** 1..100 .. attribute:: hosts Table of ipv6 host addresses **type**\: :py:class:`Hosts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts>` .. attribute:: nested_groups Table of nested ipv6 object groups **type**\: :py:class:`NestedGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.object_name = None self.address_ranges = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges() self.address_ranges.parent = self self.addresses = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses() self.addresses.parent = self self.description = None self.hosts = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts() self.hosts.parent = self self.nested_groups = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups() self.nested_groups.parent = self class NestedGroups(object): """ Table of nested ipv6 object groups .. attribute:: nested_group nested object group **type**\: list of :py:class:`NestedGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups.NestedGroup>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.nested_group = YList() self.nested_group.parent = self self.nested_group.name = 'nested_group' class NestedGroup(object): """ nested object group .. attribute:: nested_group_name <key> Enter the name of a nested object group **type**\: str **length:** 1..64 """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.nested_group_name = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.nested_group_name is None: raise YPYModelError('Key property nested_group_name is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-group[Cisco-IOS-XR-infra-objmgr-cfg:nested-group-name = ' + str(self.nested_group_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.nested_group_name is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups.NestedGroup']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-groups' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.nested_group is not None: for child_ref in self.nested_group: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups']['meta_info'] class AddressRanges(object): """ Table of ipv6 address ranges .. attribute:: address_range Range of host addresses **type**\: list of :py:class:`AddressRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges.AddressRange>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.address_range = YList() self.address_range.parent = self self.address_range.name = 'address_range' class AddressRange(object): """ Range of host addresses .. attribute:: start_address <key> IPv6 address **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- .. attribute:: end_address <key> IPv6 address **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.start_address = None self.end_address = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.start_address is None: raise YPYModelError('Key property start_address is None') if self.end_address is None: raise YPYModelError('Key property end_address is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-range[Cisco-IOS-XR-infra-objmgr-cfg:start-address = ' + str(self.start_address) + '][Cisco-IOS-XR-infra-objmgr-cfg:end-address = ' + str(self.end_address) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.start_address is not None: return True if self.end_address is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges.AddressRange']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-ranges' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.address_range is not None: for child_ref in self.address_range: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges']['meta_info'] class Addresses(object): """ Table of ipv6 addresses .. attribute:: address IPv6 address **type**\: list of :py:class:`Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses.Address>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.address = YList() self.address.parent = self self.address.name = 'address' class Address(object): """ IPv6 address .. attribute:: prefix <key> IPv6 prefix x\:x\:\:x/y **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- .. attribute:: prefix_length <key> Prefix of the IP Address **type**\: int **range:** 0..128 """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.prefix = None self.prefix_length = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.prefix is None: raise YPYModelError('Key property prefix is None') if self.prefix_length is None: raise YPYModelError('Key property prefix_length is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address[Cisco-IOS-XR-infra-objmgr-cfg:prefix = ' + str(self.prefix) + '][Cisco-IOS-XR-infra-objmgr-cfg:prefix-length = ' + str(self.prefix_length) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.prefix is not None: return True if self.prefix_length is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses.Address']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:addresses' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.address is not None: for child_ref in self.address: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses']['meta_info'] class Hosts(object): """ Table of ipv6 host addresses .. attribute:: host A single host address **type**\: list of :py:class:`Host <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts.Host>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.host = YList() self.host.parent = self self.host.name = 'host' class Host(object): """ A single host address .. attribute:: host_address <key> host ipv6 address **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.host_address = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.host_address is None: raise YPYModelError('Key property host_address is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:host[Cisco-IOS-XR-infra-objmgr-cfg:host-address = ' + str(self.host_address) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.host_address is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts.Host']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:hosts' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.host is not None: for child_ref in self.host: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts']['meta_info'] @property def _common_path(self): if self.object_name is None: raise YPYModelError('Key property object_name is None') return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv6/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects/Cisco-IOS-XR-infra-objmgr-cfg:udf-object[Cisco-IOS-XR-infra-objmgr-cfg:object-name = ' + str(self.object_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.object_name is not None: return True if self.address_ranges is not None and self.address_ranges._has_data(): return True if self.addresses is not None and self.addresses._has_data(): return True if self.description is not None: return True if self.hosts is not None and self.hosts._has_data(): return True if self.nested_groups is not None and self.nested_groups._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv6/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.udf_object is not None: for child_ref in self.udf_object: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv6' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.udf_objects is not None and self.udf_objects._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv6']['meta_info'] class Ipv4(object): """ IPv4 object group .. attribute:: udf_objects Table of ipv4 object groups **type**\: :py:class:`UdfObjects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.udf_objects = ObjectGroup.Network.Ipv4.UdfObjects() self.udf_objects.parent = self class UdfObjects(object): """ Table of ipv4 object groups .. attribute:: udf_object IPv4 object group **type**\: list of :py:class:`UdfObject <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.udf_object = YList() self.udf_object.parent = self self.udf_object.name = 'udf_object' class UdfObject(object): """ IPv4 object group .. attribute:: object_name <key> IPv4 object group name \- maximum 64 characters **type**\: str **length:** 1..64 .. attribute:: address_ranges Table of ipv4 host address ranges **type**\: :py:class:`AddressRanges <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges>` .. attribute:: addresses Table of addresses **type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses>` .. attribute:: description Up to 100 characters describing this object **type**\: str **length:** 1..100 .. attribute:: hosts Table of host addresses **type**\: :py:class:`Hosts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts>` .. attribute:: nested_groups Table of nested ipv4 object groups **type**\: :py:class:`NestedGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.object_name = None self.address_ranges = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges() self.address_ranges.parent = self self.addresses = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses() self.addresses.parent = self self.description = None self.hosts = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts() self.hosts.parent = self self.nested_groups = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups() self.nested_groups.parent = self class NestedGroups(object): """ Table of nested ipv4 object groups .. attribute:: nested_group Nested object group **type**\: list of :py:class:`NestedGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups.NestedGroup>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.nested_group = YList() self.nested_group.parent = self self.nested_group.name = 'nested_group' class NestedGroup(object): """ Nested object group .. attribute:: nested_group_name <key> Nested object group **type**\: str **length:** 1..64 """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.nested_group_name = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.nested_group_name is None: raise YPYModelError('Key property nested_group_name is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-group[Cisco-IOS-XR-infra-objmgr-cfg:nested-group-name = ' + str(self.nested_group_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.nested_group_name is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups.NestedGroup']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-groups' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.nested_group is not None: for child_ref in self.nested_group: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups']['meta_info'] class AddressRanges(object): """ Table of ipv4 host address ranges .. attribute:: address_range Range of host addresses **type**\: list of :py:class:`AddressRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges.AddressRange>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.address_range = YList() self.address_range.parent = self self.address_range.name = 'address_range' class AddressRange(object): """ Range of host addresses .. attribute:: start_address <key> IPv4 address **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- .. attribute:: end_address <key> IPv4 address **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.start_address = None self.end_address = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.start_address is None: raise YPYModelError('Key property start_address is None') if self.end_address is None: raise YPYModelError('Key property end_address is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-range[Cisco-IOS-XR-infra-objmgr-cfg:start-address = ' + str(self.start_address) + '][Cisco-IOS-XR-infra-objmgr-cfg:end-address = ' + str(self.end_address) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.start_address is not None: return True if self.end_address is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges.AddressRange']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-ranges' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.address_range is not None: for child_ref in self.address_range: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges']['meta_info'] class Addresses(object): """ Table of addresses .. attribute:: address IPv4 address **type**\: list of :py:class:`Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses.Address>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.address = YList() self.address.parent = self self.address.name = 'address' class Address(object): """ IPv4 address .. attribute:: prefix <key> IPv4 address/prefix **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- .. attribute:: prefix_length <key> Prefix of the IP Address **type**\: int **range:** 0..32 """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.prefix = None self.prefix_length = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.prefix is None: raise YPYModelError('Key property prefix is None') if self.prefix_length is None: raise YPYModelError('Key property prefix_length is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address[Cisco-IOS-XR-infra-objmgr-cfg:prefix = ' + str(self.prefix) + '][Cisco-IOS-XR-infra-objmgr-cfg:prefix-length = ' + str(self.prefix_length) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.prefix is not None: return True if self.prefix_length is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses.Address']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:addresses' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.address is not None: for child_ref in self.address: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses']['meta_info'] class Hosts(object): """ Table of host addresses .. attribute:: host A single host address **type**\: list of :py:class:`Host <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts.Host>` """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.host = YList() self.host.parent = self self.host.name = 'host' class Host(object): """ A single host address .. attribute:: host_address <key> Host ipv4 address **type**\: one of the below types: **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? ---- **type**\: str **pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)? ---- """ _prefix = 'infra-objmgr-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.host_address = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.host_address is None: raise YPYModelError('Key property host_address is None') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:host[Cisco-IOS-XR-infra-objmgr-cfg:host-address = ' + str(self.host_address) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.host_address is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts.Host']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:hosts' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.host is not None: for child_ref in self.host: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts']['meta_info'] @property def _common_path(self): if self.object_name is None: raise YPYModelError('Key property object_name is None') return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv4/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects/Cisco-IOS-XR-infra-objmgr-cfg:udf-object[Cisco-IOS-XR-infra-objmgr-cfg:object-name = ' + str(self.object_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.object_name is not None: return True if self.address_ranges is not None and self.address_ranges._has_data(): return True if self.addresses is not None and self.addresses._has_data(): return True if self.description is not None: return True if self.hosts is not None and self.hosts._has_data(): return True if self.nested_groups is not None and self.nested_groups._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv4/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.udf_object is not None: for child_ref in self.udf_object: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv4' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.udf_objects is not None and self.udf_objects._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network.Ipv4']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.ipv4 is not None and self.ipv4._has_data(): return True if self.ipv6 is not None and self.ipv6._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup.Network']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if self.network is not None and self.network._has_data(): return True if self.port is not None and self.port._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta return meta._meta_table['ObjectGroup']['meta_info']
111pontes/ydk-py
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_infra_objmgr_cfg.py
Python
apache-2.0
90,489
from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.db import models from model_utils import Choices from model_utils.models import TimeStampedModel class History(TimeStampedModel): RESOLUTIONS = Choices('second', 'minute', 'hour', 'day', 'week', 'month', 'year') resolution = models.CharField(choices=RESOLUTIONS, default=RESOLUTIONS.day, max_length=6) tag = models.SlugField() datetime = models.DateTimeField() source_type = models.ForeignKey(ContentType) source_id = models.PositiveIntegerField(blank=True, null=True) source_object = GenericForeignKey('source_type', 'source_id') sum = models.IntegerField(default=0) delta = models.IntegerField(default=0) class Meta: get_latest_by = 'datetime' verbose_name_plural = 'histories' def __unicode__(self): return u'%s' % (self.tag) def save(self, *args, **kwargs): try: filters = {'resolution': self.resolution, 'tag': self.tag} previous = self._default_manager.filter(**filters).latest() except self._meta.model.DoesNotExist: pass else: self.delta = self.sum - previous.sum super(History, self).save(*args, **kwargs)
hello-base/web
apps/history/models.py
Python
apache-2.0
1,311
#FLM: --------- pass
impallari/Impallari-Fontlab-Macros
IMP Kerning/20 ---.py
Python
apache-2.0
27
#!/usr/bin/python # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ import sys import re """Baby Names exercise Define the extract_names() function below and change main() to call it. For writing regex, it's nice to include a copy of the target text for inspiration. Here's what the html looks like in the baby.html files: ... <h3 align="center">Popularity in 1990</h3> .... <tr align="right"><td>1</td><td>Michael</td><td>Jessica</td> <tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td> <tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td> ... Suggested milestones for incremental development: -Extract the year and print it -Extract the names and rank numbers and just print them -Get the names data into a dict and print it -Build the [year, 'name rank', ... ] list and print it -Fix main() to use the extract_names list """ print 'Hey there !!' def extract_names(filename): """ Given a file name for baby.html, returns a list starting with the year string followed by the name-rank strings in alphabetical order. ['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...] """ # +++your code here+++ # LAB(begin solution) # The list [year, name_and_rank, name_and_rank, ...] we'll eventually return. names = [] # Open and read the file. f = open(filename, 'rU') text = f.read() # Could process the file line-by-line, but regex on the whole text # at once is even easier. # Get the year. year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', text) if not year_match: # We didn't find a year, so we'll exit with an error message. sys.stderr.write('Couldn\'t find the year!\n') sys.exit(1) year = year_match.group(1) names.append(year) # Extract all the data tuples with a findall() # each tuple is: (rank, boy-name, girl-name) tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', text) #print tuples # Store data into a dict using each name as a key and that # name's rank number as the value. # (if the name is already in there, don't add it, since # this new rank will be bigger than the previous rank). names_to_rank = {} for rank_tuple in tuples: (rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars if boyname not in names_to_rank: names_to_rank[boyname] = rank if girlname not in names_to_rank: names_to_rank[girlname] = rank # You can also write: # for rank, boyname, girlname in tuples: # ... # To unpack the tuples inside a for-loop. # Get the names, sorted in the right order sorted_names = sorted(names_to_rank.keys()) # Build up result list, one element per line for name in sorted_names: names.append(name + " " + names_to_rank[name]) return names # LAB(replace solution) # return # LAB(end solution) def main(): # This command-line parsing code is provided. # Make a list of command line arguments, omitting the [0] element # which is the script itself. args = sys.argv[1:] if not args: print 'usage: [--summaryfile] file [file ...]' sys.exit(1) # Notice the summary flag and remove it from args if it is present. summary = False if args[0] == '--summaryfile': summary = True del args[0] # +++your code here+++ # For each filename, get the names, then either print the text output # or write it to a summary file # LAB(begin solution) for filename in args: names = extract_names(filename) # Make text out of the whole list text = '\n'.join(names) if summary: outf = open(filename + '.summary', 'w') outf.write(text + '\n') outf.close() else: print text # LAB(end solution) if __name__ == '__main__': main()
chintak/google-python-ex
babynames/solution/babynames.py
Python
apache-2.0
3,874
# coding=utf-8 # -*- coding: utf-8 -*- from selenium import webdriver from selenium.common.exceptions import NoSuchElementException, TimeoutException from bs4 import BeautifulSoup from datetime import datetime from decimal import * import sys #phantonPath = "/home/jmartinz/00.py/phantomjs/phantomjs" phantonPath = "../phantomjs/phantomjs" contratacionPage = "https://contrataciondelestado.es/wps/portal/!ut/p/b1/lZDLDoIwEEU_aaYParssrwLxAVZQujEsjMH42Bi_30rcGCPq7CZz7pzkgoOWKC6kYBPYgDt3t37fXfvLuTs-die2PFlEUZpRlJbFSKdxXYvMrybwQOsB_DAah3xopdQh0YislqhFVUXK_0HFnvmARbwpmlLY3CDmWRpPaxKgoeI3_4jgxW_sjPhzwkRAkRhLn_mPAvqn_13wJb8GNyBjDQzAWMXjEgrz7HLaQeuxyVY3SaVzxXARLj1WlLNVaShB5LCCNoGTO6Z-VH7g3R2UoLEz/dl4/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_AVEQAI930OBRD02JPMTPG21004/act/id=0/p=javax.servlet.include.path_info=QCPjspQCPbusquedaQCPBusquedaVIS_UOE.jsp/299420689304/-/" #contratacionPage="https://contrataciondelestado.es" class detalleContrato(): """ Clase que devuelve los detalles de un contrato por nº expediente y Órgano de contratación numExpediente OrgContratacion driverType=1 (Firefox, online) / 2(phantomjs) """ driver = "" driverType = 1 estadoLic = "" procedimiento = "" enlacelic = '' codigocpv = '' resultado = '' adjudicatario ='' numlicitadores = 0 impadjudicacion = '' def __init__(self, numExpediente, OrgContratacion, driverType=1): self.driverType = driverType self.numExpediente = numExpediente self.OrgContratacion = OrgContratacion if driverType == 1: self.driver = webdriver.Firefox() elif driverType == 2: self.driver = webdriver.PhantomJS(phantonPath, service_args=['--ignore-ssl-errors=true']) self.driver.set_window_size(1120, 550) self.extraeDetalles() def cargaPagina(self): #Carga página if self.driverType == 2: self.driver.implicitly_wait(10) self.driver.set_page_load_timeout(10) try: self.driver.get(contratacionPage) except TimeoutException as e: #Handle y #Handle your exception here print(e) def debugPhanton(self): self.cargaPagina() # check phantomjs print(self.driver.page_source) def extraeDetalles(self): self.cargaPagina() #Introduce contrato contrato = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:text71ExpMAQ') contrato.send_keys(self.numExpediente) #Introduce ´organo contrataci´on orgcont = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:texoorganoMAQ') orgcont.send_keys(self.OrgContratacion) # pulsa el botón de buscar self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:button1').click() #Obtener enlace self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:enlaceExpediente_0').click() #sólo sirve para el primer expediente... como es este caso. # Obtiene los datos self.estadoLic = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Estado').text self.procedimiento = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Procedimiento').text self.enlacelic = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_EnlaceLicPLACE').text self.codigocpv = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_CPV').text #Dependiendo del estado los siguientes elementos pueden existir o no try: self.resultado = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Resultado').text self.adjudicatario = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Adjudicatario').text importe_text = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_ImporteAdjudicacion').text.replace(".","").replace(",",".") try: self.impadjudicacion = Decimal(importe_text.strip(' "')) except (ValueError, TypeError, DecimalException) as e: self.impadjudicacion = 0 numlicitadores_text = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_NumeroLicitadores').text try: self.numlicitadores = int(numlicitadores_text) except ValueError: self.numlicitadores =0 print("numlic= ",self.numlicitadores) except NoSuchElementException: resultado = '' adjudicatario ='' numlicitadores = 0 impadjudicacion = '' # En linea saca los documentos de la página html_page = self.driver.page_source soup = BeautifulSoup(html_page, "html5lib") self.Documento={} for row in soup.findAll("tr", {'class': ['rowClass1', 'rowClass2']}): try: fechadoc=datetime.strptime(row.find("td", {'class': 'fechaPubLeft'}).text, '%d/%m/%Y %H:%M:%S') tipodoc=row.find("td", {'class': 'tipoDocumento'}).text docs = row.find("td", {'class': 'documentosPub'}).findAll('div') enlacedoc = docs[0].find('a', href=True)['href'] self.Documento[tipodoc]=[fechadoc,enlacedoc] except: # documentos adicionales try: fechadoc = datetime.strptime(row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:textSfecha1PadreGen').text, '%d/%m/%Y %H:%M:%S') tipodoc = row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:textStipo1PadreGen').text enlace =row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:linkVerDocPadreGen')['href'] self.Documento[tipodoc]=[fechadoc,enlacedoc] except: pass # Cierra el driver self.driver.quit() # Sólo para probar que funcina def main(nExp,orgCon): # detalles=detalleContrato(numExpediente = u'2015/213/00008', OrgContratacion=u'Secretaría General de la Agencia Española de Medicamentos y Productos Sanitarios', driverType=2) # detalles=detalleContrato(numExpediente = u'CR0228/2012', OrgContratacion=u'Secretaría General del Instituto de Salud Carlos III', driverType=2) detalles=detalleContrato(numExpediente = nExp, OrgContratacion=orgCon, driverType=2) print(detalles.estadoLic) print(detalles.procedimiento) print(detalles.enlacelic) print(detalles.codigocpv) print(detalles.resultado) print(detalles.adjudicatario) print(detalles.numlicitadores) print(detalles.impadjudicacion) for docs in detalles.Documento.keys(): print(docs,"-",detalles.Documento[docs][0],detalles.Documento[docs][1]) if __name__ == "__main__": if not len(sys.argv) == 3: print ('Usage: pce_extrae_detalle_contrato.py numExpediente orgContratacion') sys.exit(1) sys.exit(main(sys.argv[1], # TODO comprobar 1 ó 2 sys.argv[2], # TODO comprobar entre 6 y 20 ))
jmartinz/pyCrawler
10.contratacionE/pce_extrae_detalle_contrato.py
Python
apache-2.0
7,393
from python_kemptech_api import * # Specify the LoadMaster connection credentials here: loadmaster_ip = "" username = "" password = "" vs_ip_1 = "" vs_ip_2 = "" rs_ip_1 = "" rs_ip_2 = "" vs_port = "" rs_port = "" class RealServerPool(object): healthcheck_parameters = [ "checktype", "checkport", "checkurl", "checkheaders", "checkuse1_1", "checkuseget", "checkpostdata", "checkpattern", "checkcodes", "matchlen", "enhancedhealthchecks", "rsminimum" ] rs_parameters = [ "enable", "forward", "weight", "limit", "critical", "follow" ] def __init__(self, rs_list=None, vs=None): if rs_list is not None: self.rs = [] for rs in rs_list: if isinstance(rs, RealServer): self.rs.append(rs) else: ip, port = rs.split(":") mock_lm = {"endpoint": "", "ip_address": "", "vs": ""} self.rs.append(RealServer(mock_lm, ip, port)) self.checktype = None self.checkport = None self.checkurl = None self.checkheaders = None self.checkuse1_1 = None self.checkuseget = None self.checkpostdata = None self.checkpattern = None self.checkcodes = None self.matchlen = None self.enhancedhealthchecks = None self.rsminimum = None elif vs is not None: self.rs = vs.servers.values() self.checktype = vs.checktype self.checkport = vs.checkport self.checkurl = vs.checkurl self.checkheaders = vs.checkheaders self.checkuse1_1 = vs.checkuse1_1 self.checkuseget = vs.checkuseget self.checkpostdata = vs.checkpostdata self.checkpattern = vs.checkpattern self.checkcodes = vs.checkcodes self.matchlen = vs.matchlen self.enhancedhealthchecks = vs.enhancedhealthchecks self.rsminimum = vs.rsminimum def apply(self, vs): [rs.delete() for rs in vs.servers.values()] for rs in self.rs: new_rs = vs.create_real_server(rs.rs, rs.rsport) # Apply other settings new_rs.save() for attr in self.rs_parameters: print("attr: {}".format(attr)) if hasattr(rs, attr) and rs.__getattribute__(attr) is not None: print("set attr: {}={}".format(attr, rs.__getattribute__(attr))) new_rs.__setattr__(attr, rs.__getattribute__(attr)) new_rs.update() for attr in self.healthcheck_parameters: print("attr: {}".format(attr)) if hasattr(self, attr) and self.__getattribute__(attr) is not None: print("set attr: {}={}".format(attr, self.__getattribute__(attr))) vs.__setattr__(attr, self.__getattribute__(attr)) vs.update() # Create the LoadMaster object lm = LoadMaster(loadmaster_ip, username, password) # Delete all the existing VSs [vs.delete() for vs in lm.vs.values()] # Create a new VS vs = lm.create_virtual_service(vs_ip_1, vs_port, "tcp") vs.save() # Configure some healthcheck options vs.checktype = 'HTTPS' vs.checkport = "8443" vs.update() # Add and save the first real server rs1 = vs.create_real_server(rs_ip_1, rs_port) rs1.save() # Configure the weighting rs1.weight = 200 rs1.update() # Add and save the second real server rs2 = vs.create_real_server(rs_ip_2, rs_port) rs2.save() # Disable the server rs2.enable = 'N' rs2.update() # This will create a pool based on the VS and healthcheck settings of the VS pool1 = RealServerPool(vs=vs) # Create the second VS vs2 = lm.create_virtual_service(vs_ip_2, vs_port, "tcp") vs2.save() # Apply the pool to the new VS. The RS and healthcheck settings will be applied pool1.apply(vs2) # Alternately, you can use a list of IP and ports to create a pool rs_list = ["172.22.100.6:88", "172.22.100.7:88", "172.22.100.8:88", "172.22.100.9:88"] pool2 = RealServerPool(rs_list) # You can also apply healthcheck settings directly to a pool pool2.checktype = "ICMP" # Apply the pool to both VSs pool2.apply(vs) pool2.apply(vs2)
KEMPtechnologies/python-kemptech-api
examples/real_server_pooling.py
Python
apache-2.0
4,377
# Copyright (c) 2015 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import constants as common_constants from neutron_lib import context from neutron_lib.db import constants as db_consts from neutron_lib.services.qos import constants as qos_consts from oslo_utils import uuidutils from neutron.agent.l2.extensions import qos from neutron.agent.l2.extensions import qos_linux from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources from neutron.api.rpc.handlers import resources_rpc from neutron import manager from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.plugins.ml2.drivers.openvswitch.agent import ( ovs_agent_extension_api as ovs_ext_api) from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import ( ovs_bridge) from neutron.tests import base BASE_TEST_POLICY = {'context': None, 'name': 'test1', 'id': uuidutils.generate_uuid()} TEST_POLICY = policy.QosPolicy(**BASE_TEST_POLICY) TEST_POLICY_DESCR = policy.QosPolicy(description='fake_descr', **BASE_TEST_POLICY) TEST_POLICY2 = policy.QosPolicy(context=None, name='test2', id=uuidutils.generate_uuid()) TEST_PORT = {'port_id': 'test_port_id', 'qos_policy_id': TEST_POLICY.id} TEST_PORT2 = {'port_id': 'test_port_id_2', 'qos_policy_id': TEST_POLICY2.id} FAKE_RULE_ID = uuidutils.generate_uuid() FAKE_RULE_ID_2 = uuidutils.generate_uuid() REALLY_FAKE_RULE_ID = uuidutils.generate_uuid() class FakeDriver(qos_linux.QosLinuxAgentDriver): SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [common_constants.EGRESS_DIRECTION, common_constants.INGRESS_DIRECTION]} }, } def __init__(self): super(FakeDriver, self).__init__() self.create_bandwidth_limit = mock.Mock() self.update_bandwidth_limit = mock.Mock() self.delete_bandwidth_limit = mock.Mock() self.delete_bandwidth_limit_ingress = mock.Mock() def initialize(self): pass class QosFakeRule(rule.QosRule): rule_type = 'fake_type' class QosAgentDriverTestCase(base.BaseTestCase): def setUp(self): super(QosAgentDriverTestCase, self).setUp() self.driver = FakeDriver() self.policy = TEST_POLICY self.egress_bandwidth_limit_rule = ( rule.QosBandwidthLimitRule( context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=200, direction=common_constants.EGRESS_DIRECTION)) self.ingress_bandwidth_limit_rule = ( rule.QosBandwidthLimitRule( context=None, id=FAKE_RULE_ID_2, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=200, direction=common_constants.INGRESS_DIRECTION)) self.policy.rules = [self.egress_bandwidth_limit_rule, self.ingress_bandwidth_limit_rule] self.port = {'qos_policy_id': None, 'network_qos_policy_id': None, 'device_owner': 'random-device-owner'} self.fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID, qos_policy_id=self.policy.id) def test_create(self): self.driver.create(self.port, self.policy) self.driver.create_bandwidth_limit.assert_has_calls([ mock.call(self.port, self.egress_bandwidth_limit_rule), mock.call(self.port, self.ingress_bandwidth_limit_rule) ]) def test_update(self): self.driver.update(self.port, self.policy) self.driver.update_bandwidth_limit.assert_has_calls([ mock.call(self.port, self.egress_bandwidth_limit_rule), mock.call(self.port, self.ingress_bandwidth_limit_rule) ]) def test_delete(self): self.driver.delete(self.port, self.policy) self.driver.delete_bandwidth_limit.assert_called_with(self.port) self.driver.delete_bandwidth_limit_ingress.assert_called_with( self.port) def test_delete_no_policy(self): self.driver.delete(self.port, qos_policy=None) self.driver.delete_bandwidth_limit.assert_called_with(self.port) self.driver.delete_bandwidth_limit_ingress.assert_called_with( self.port) def test__iterate_rules_with_unknown_rule_type(self): self.policy.rules.append(self.fake_rule) rules = list(self.driver._iterate_rules(self.policy.rules)) self.assertEqual(2, len(rules)) self.assertIsInstance(rules[0], rule.QosBandwidthLimitRule) self.assertIsInstance(rules[1], rule.QosBandwidthLimitRule) def test__handle_update_create_rules_checks_should_apply_to_port(self): self.egress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=False) self.ingress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=False) self.driver.create(self.port, self.policy) self.assertFalse(self.driver.create_bandwidth_limit.called) self.egress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=True) self.ingress_bandwidth_limit_rule.should_apply_to_port = mock.Mock( return_value=True) self.driver.create(self.port, self.policy) self.assertTrue(self.driver.create_bandwidth_limit.called) def test__get_max_burst_value(self): rule = self.egress_bandwidth_limit_rule rule.max_burst_kbps = 0 expected_burst = rule.max_kbps * qos_consts.DEFAULT_BURST_RATE self.assertEqual( expected_burst, self.driver._get_egress_burst_value(rule) ) def test__rule_type_has_ingress_direction(self): self.assertTrue( self.driver._rule_type_has_ingress_direction( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)) # Should return False for rule type other than # RULE_TYPE_BANDWIDTH_LIMIT supported_rules = { qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: { 'type:values': common_constants.VALID_DSCP_MARKS} } } with mock.patch.dict(self.driver.SUPPORTED_RULES, supported_rules): self.assertFalse( self.driver._rule_type_has_ingress_direction( qos_consts.RULE_TYPE_DSCP_MARKING)) # Should return False for rule type RULE_TYPE_BANDWIDTH_LIMIT but # without INGRESS_DIRECTION in supported values supported_rules = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { 'type:values': [common_constants.EGRESS_DIRECTION] } } with mock.patch.dict(self.driver.SUPPORTED_RULES, supported_rules): self.assertFalse( self.driver._rule_type_has_ingress_direction( qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)) def test__rule_is_ingress_direction(self): self.assertFalse( self.driver._rule_is_ingress_direction( self.egress_bandwidth_limit_rule)) self.assertFalse( self.driver._rule_is_ingress_direction( self.fake_rule)) self.assertTrue( self.driver._rule_is_ingress_direction( self.ingress_bandwidth_limit_rule)) class QosExtensionBaseTestCase(base.BaseTestCase): def setUp(self): super(QosExtensionBaseTestCase, self).setUp() conn_patcher = mock.patch( 'neutron.agent.ovsdb.impl_idl._connection') conn_patcher.start() self.addCleanup(conn_patcher.stop) self.qos_ext = qos.QosAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() self.agent_api = ovs_ext_api.OVSAgentExtensionAPI( ovs_bridge.OVSAgentBridge('br-int'), ovs_bridge.OVSAgentBridge('br-tun')) self.qos_ext.consume_api(self.agent_api) # Don't rely on used driver mock.patch.object( manager.NeutronManager, 'load_class_for_provider', return_value=lambda: mock.Mock( spec=qos_linux.QosLinuxAgentDriver)).start() class QosExtensionRpcTestCase(QosExtensionBaseTestCase): def setUp(self): super(QosExtensionRpcTestCase, self).setUp() self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.pull_mock = mock.patch.object( self.qos_ext.resource_rpc, 'pull', return_value=TEST_POLICY).start() def _create_test_port_dict(self, qos_policy_id=None): return {'port_id': uuidutils.generate_uuid(), 'qos_policy_id': qos_policy_id or TEST_POLICY.id} def test_handle_port_with_no_policy(self): port = self._create_test_port_dict() del port['qos_policy_id'] self.qos_ext._process_reset_port = mock.Mock() self.qos_ext.handle_port(self.context, port) self.qos_ext._process_reset_port.assert_called_with(port) def test_handle_unknown_port(self): port = self._create_test_port_dict() qos_policy_id = port['qos_policy_id'] port_id = port['port_id'] self.qos_ext.handle_port(self.context, port) # we make sure the underlying qos driver is called with the # right parameters self.qos_ext.qos_driver.create.assert_called_once_with( port, TEST_POLICY) self.assertEqual(port, self.qos_ext.policy_map.qos_policy_ports[qos_policy_id][port_id]) self.assertIn(port_id, self.qos_ext.policy_map.port_policies) self.assertEqual(TEST_POLICY, self.qos_ext.policy_map.known_policies[qos_policy_id]) def test_handle_known_port(self): port_obj1 = self._create_test_port_dict() port_obj2 = dict(port_obj1) self.qos_ext.handle_port(self.context, port_obj1) self.qos_ext.qos_driver.reset_mock() self.qos_ext.handle_port(self.context, port_obj2) self.assertFalse(self.qos_ext.qos_driver.create.called) def test_handle_known_port_change_policy_id(self): port = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port) self.qos_ext.resource_rpc.pull.reset_mock() port['qos_policy_id'] = uuidutils.generate_uuid() self.qos_ext.handle_port(self.context, port) self.pull_mock.assert_called_once_with( self.context, resources.QOS_POLICY, port['qos_policy_id']) def test_handle_diff_ports_same_policy_id(self): port_obj1 = self._create_test_port_dict() port_obj2 = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port_obj1) self.pull_mock.assert_called_once_with( self.context, resources.QOS_POLICY, port_obj1['qos_policy_id']) self.assertIsNotNone( self.qos_ext.policy_map.get_port_policy(port_obj1)) self.assertIsNone( self.qos_ext.policy_map.get_port_policy(port_obj2)) self.qos_ext.resource_rpc.pull.reset_mock() self.qos_ext.handle_port(self.context, port_obj2) self.assertFalse(self.pull_mock.called) self.assertIsNotNone( self.qos_ext.policy_map.get_port_policy(port_obj2)) self.assertEqual( self.qos_ext.policy_map.get_port_policy(port_obj1), self.qos_ext.policy_map.get_port_policy(port_obj2)) def test_delete_known_port(self): port = self._create_test_port_dict() self.qos_ext.handle_port(self.context, port) self.qos_ext.qos_driver.reset_mock() self.qos_ext.delete_port(self.context, port) self.qos_ext.qos_driver.delete.assert_called_with(port) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port)) def test_delete_unknown_port(self): port = self._create_test_port_dict() self.qos_ext.delete_port(self.context, port) self.assertTrue(self.qos_ext.qos_driver.delete.called) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port)) def test__handle_notification_ignores_all_event_types_except_updated(self): with mock.patch.object( self.qos_ext, '_process_update_policy') as update_mock: for event_type in set(events.VALID) - {events.UPDATED}: self.qos_ext._handle_notification(mock.Mock(), 'QOS', object(), event_type) self.assertFalse(update_mock.called) def test__handle_notification_passes_update_events(self): with mock.patch.object( self.qos_ext, '_process_update_policy') as update_mock: policy_obj = mock.Mock() self.qos_ext._handle_notification(mock.Mock(), 'QOS', [policy_obj], events.UPDATED) update_mock.assert_called_with(policy_obj) def test__process_update_policy(self): port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id) self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY) self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2) self.qos_ext._policy_rules_modified = mock.Mock(return_value=True) policy_obj = mock.Mock() policy_obj.id = port1['qos_policy_id'] self.qos_ext._process_update_policy(policy_obj) self.qos_ext.qos_driver.update.assert_called_with(port1, policy_obj) self.qos_ext.qos_driver.update.reset_mock() policy_obj.id = port2['qos_policy_id'] self.qos_ext._process_update_policy(policy_obj) self.qos_ext.qos_driver.update.assert_called_with(port2, policy_obj) def test__process_update_policy_descr_not_propagated_into_driver(self): port = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) self.qos_ext.policy_map.set_port_policy(port, TEST_POLICY) self.qos_ext._policy_rules_modified = mock.Mock(return_value=False) self.qos_ext._process_update_policy(TEST_POLICY_DESCR) self.qos_ext._policy_rules_modified.assert_called_with(TEST_POLICY, TEST_POLICY_DESCR) self.assertFalse(self.qos_ext.qos_driver.delete.called) self.assertFalse(self.qos_ext.qos_driver.update.called) self.assertEqual(TEST_POLICY_DESCR, self.qos_ext.policy_map.get_policy(TEST_POLICY.id)) def test__process_update_policy_not_known(self): self.qos_ext._policy_rules_modified = mock.Mock() self.qos_ext._process_update_policy(TEST_POLICY_DESCR) self.assertFalse(self.qos_ext._policy_rules_modified.called) self.assertFalse(self.qos_ext.qos_driver.delete.called) self.assertFalse(self.qos_ext.qos_driver.update.called) self.assertIsNone(self.qos_ext.policy_map.get_policy( TEST_POLICY_DESCR.id)) def test__process_reset_port(self): port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id) port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id) self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY) self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2) self.qos_ext._process_reset_port(port1) self.qos_ext.qos_driver.delete.assert_called_with(port1) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port1)) self.assertIsNotNone(self.qos_ext.policy_map.get_port_policy(port2)) self.qos_ext.qos_driver.delete.reset_mock() self.qos_ext._process_reset_port(port2) self.qos_ext.qos_driver.delete.assert_called_with(port2) self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port2)) class QosExtensionInitializeTestCase(QosExtensionBaseTestCase): @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock): self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic(resource_type), [rpc_mock()], fanout=True) for resource_type in self.qos_ext.SUPPORTED_RESOURCE_TYPES] ) subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY) class QosExtensionReflushRulesTestCase(QosExtensionBaseTestCase): def setUp(self): super(QosExtensionReflushRulesTestCase, self).setUp() self.qos_ext.initialize( self.connection, constants.EXTENSION_DRIVER_TYPE) self.pull_mock = mock.patch.object( self.qos_ext.resource_rpc, 'pull', return_value=TEST_POLICY).start() self.policy = policy.QosPolicy(**BASE_TEST_POLICY) self.rule = ( rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=100, max_burst_kbps=10)) self.policy.rules = [self.rule] self.port = {'port_id': uuidutils.generate_uuid(), 'qos_policy_id': TEST_POLICY.id} self.new_policy = policy.QosPolicy(description='descr', **BASE_TEST_POLICY) def test_is_reflush_required_change_policy_descr(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [self.rule] self.assertFalse(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_change_policy_rule(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) updated_rule = (rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID, qos_policy_id=self.policy.id, max_kbps=200, max_burst_kbps=20)) self.new_policy.rules = [updated_rule] self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_remove_rules(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [] self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) def test_is_reflush_required_add_rules(self): self.qos_ext.policy_map.set_port_policy(self.port, self.policy) self.new_policy.rules = [self.rule] fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID, qos_policy_id=self.policy.id) self.new_policy.rules.append(fake_rule) self.assertTrue(self.qos_ext._policy_rules_modified(self.policy, self.new_policy)) class PortPolicyMapTestCase(base.BaseTestCase): def setUp(self): super(PortPolicyMapTestCase, self).setUp() self.policy_map = qos.PortPolicyMap() def test_update_policy(self): self.policy_map.update_policy(TEST_POLICY) self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) def _set_ports(self): self.policy_map.set_port_policy(TEST_PORT, TEST_POLICY) self.policy_map.set_port_policy(TEST_PORT2, TEST_POLICY2) def test_set_port_policy(self): self._set_ports() self.assertEqual(TEST_POLICY, self.policy_map.known_policies[TEST_POLICY.id]) self.assertIn(TEST_PORT['port_id'], self.policy_map.qos_policy_ports[TEST_POLICY.id]) def test_get_port_policy(self): self._set_ports() self.assertEqual(TEST_POLICY, self.policy_map.get_port_policy(TEST_PORT)) self.assertEqual(TEST_POLICY2, self.policy_map.get_port_policy(TEST_PORT2)) def test_get_ports(self): self._set_ports() self.assertEqual([TEST_PORT], list(self.policy_map.get_ports(TEST_POLICY))) self.assertEqual([TEST_PORT2], list(self.policy_map.get_ports(TEST_POLICY2))) def test_clean_by_port(self): self._set_ports() self.policy_map.clean_by_port(TEST_PORT) self.assertNotIn(TEST_POLICY.id, self.policy_map.known_policies) self.assertNotIn(TEST_PORT['port_id'], self.policy_map.port_policies) self.assertIn(TEST_POLICY2.id, self.policy_map.known_policies) def test_clean_by_port_for_unknown_port(self): self.policy_map._clean_policy_info = mock.Mock() self.policy_map.clean_by_port(TEST_PORT) self.policy_map._clean_policy_info.assert_not_called() def test_has_policy_changed(self): self._set_ports() self.assertTrue( self.policy_map.has_policy_changed(TEST_PORT, 'a_new_policy_id')) self.assertFalse( self.policy_map.has_policy_changed(TEST_PORT, TEST_POLICY.id))
noironetworks/neutron
neutron/tests/unit/agent/l2/extensions/test_qos.py
Python
apache-2.0
22,770
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Python DB-API (PEP 249) interface to SQL Service. http://www.python.org/dev/peps/pep-0249/ """ import collections import datetime import exceptions import os import time import types from google.storage.speckle.proto import client_error_code_pb2 from google.storage.speckle.proto import client_pb2 from google.storage.speckle.proto import jdbc_type from google.storage.speckle.proto import sql_pb2 from google.storage.speckle.python import api from google.storage.speckle.python.api import converters __path__ = api.__path__ OAUTH_CREDENTIALS_PATH = os.path.expanduser('~/.googlesql_oauth2.dat') apilevel = '2.0' threadsafety = 1 paramstyle = 'format' version_info = (1, 2, 2, 'final', 0) class Warning(StandardError, exceptions.Warning): pass class Error(StandardError): pass class InterfaceError(Error): pass class DatabaseError(Error): pass class DataError(DatabaseError): pass class OperationalError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InternalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class NotSupportedError(DatabaseError): pass Blob = converters.Blob def Date(year, month, day): return datetime.date(year, month, day) def Time(hour, minute, second): return datetime.time(hour, minute, second) def Timestamp(year, month, day, hour, minute, second): return datetime.datetime(year, month, day, hour, minute, second) def DateFromTicks(ticks): return Date(*time.localtime(ticks)[:3]) def TimeFromTicks(ticks): return Time(*time.localtime(ticks)[3:6]) def TimestampFromTicks(ticks): return Timestamp(*time.localtime(ticks)[:6]) def Binary(string): return Blob(string) STRING = unicode BINARY = Blob NUMBER = float DATETIME = datetime.datetime ROWID = int _PYTHON_TYPE_TO_JDBC_TYPE = { types.IntType: jdbc_type.INTEGER, types.LongType: jdbc_type.INTEGER, types.FloatType: jdbc_type.DOUBLE, types.BooleanType: jdbc_type.BOOLEAN, types.StringType: jdbc_type.VARCHAR, types.UnicodeType: jdbc_type.VARCHAR, datetime.date: jdbc_type.DATE, datetime.datetime: jdbc_type.TIMESTAMP, datetime.time: jdbc_type.TIME, converters.Blob: jdbc_type.BLOB, } def _ConvertFormatToQmark(statement, args): """Replaces '%s' with '?'. The server actually supports '?' for bind parameters, but the MySQLdb implementation of PEP 249 uses '%s'. Most clients don't bother checking the paramstyle member and just hardcode '%s' in their statements. This function converts a format-style statement into a qmark-style statement. Args: statement: A string, a SQL statement. args: A sequence of arguments matching the statement's bind variables, if any. Returns: The converted string. """ if args: qmarks = tuple('?' * len(args)) return statement % qmarks return statement class Cursor(object): def __init__(self, conn, use_dict_cursor=False): """Initializer. Args: conn: A Connection object. use_dict_cursor: Optional boolean to convert each row of results into a dictionary. Defaults to False. """ self._conn = conn self._description = None self._rowcount = -1 self.arraysize = 1 self._open = True self.lastrowid = None self._use_dict_cursor = use_dict_cursor @property def description(self): return self._description @property def rowcount(self): return self._rowcount def close(self): """Marks the cursor as unusable for further operations.""" self._CheckOpen() self._open = False def _GetJdbcTypeForArg(self, arg): """Get the JDBC type which corresponds to the given Python object type.""" arg_jdbc_type = _PYTHON_TYPE_TO_JDBC_TYPE.get(type(arg)) if arg_jdbc_type: return arg_jdbc_type for python_t, jdbc_t in _PYTHON_TYPE_TO_JDBC_TYPE.items(): if isinstance(arg, python_t): return jdbc_t try: return self._GetJdbcTypeForArg(arg[0]) except TypeError: raise TypeError('unknown type') def _EncodeVariable(self, arg): """Converts a variable to a type and value. Args: arg: Any tuple, string, numeric, or datetime object. Returns: A (int, str) tuple, representing a JDBC type and encoded value. Raises: TypeError: The argument is not a recognized type. """ arg_jdbc_type = self._GetJdbcTypeForArg(arg) value = self._conn.encoders[type(arg)](arg, self._conn.encoders) return arg_jdbc_type, value def _DecodeVariable(self, datatype, value): """Converts a type and value to a variable. Args: datatype: An integer. value: A string. Returns: An object of some appropriate type. Raises: InterfaceError: datatype is not a recognized JDBC type. ValueError: The value could not be parsed. """ converter = self._conn.converter.get(datatype) if converter is None: raise InterfaceError('unknown JDBC type %d' % datatype) return converter(value) def execute(self, statement, args=None): """Prepares and executes a database operation (query or command). Args: statement: A string, a SQL statement. args: A sequence of arguments matching the statement's bind variables, if any. Raises: InterfaceError: Unknown type used as a bind variable. DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self._CheckOpen() request = sql_pb2.ExecRequest() request.options.include_generated_keys = True if args is not None: if not hasattr(args, '__iter__'): args = [args] for i, arg in enumerate(args): bv = request.bind_variable.add() bv.position = i + 1 if arg is None: bv.type = jdbc_type.NULL else: try: bv.type, bv.value = self._EncodeVariable(arg) except TypeError: raise InterfaceError('unknown type %s for arg %d' % (type(arg), i)) request.statement = _ConvertFormatToQmark(statement, args) response = self._conn.MakeRequest('Exec', request) result = response.result if result.HasField('sql_exception'): raise DatabaseError('%d: %s' % (result.sql_exception.code, result.sql_exception.message)) self._rows = collections.deque() if result.rows.columns: self._description = [] for column in result.rows.columns: self._description.append( (column.label, column.type, column.display_size, None, column.precision, column.scale, column.nullable)) else: self._description = None if result.rows.tuples: assert self._description, 'Column descriptions do not exist.' column_names = [col[0] for col in self._description] self._rowcount = len(result.rows.tuples) for tuple_proto in result.rows.tuples: row = [] nulls = set(tuple_proto.nulls) value_index = 0 for i, column_descr in enumerate(self._description): if i in nulls: row.append(None) else: row.append(self._DecodeVariable(column_descr[1], tuple_proto.values[value_index])) value_index += 1 if self._use_dict_cursor: assert len(column_names) == len(row) row = dict(zip(column_names, row)) else: row = tuple(row) self._rows.append(row) else: self._rowcount = result.rows_updated if result.generated_keys: self.lastrowid = long(result.generated_keys[-1]) def executemany(self, statement, seq_of_args): """Calls execute() for each value of seq_of_args. Args: statement: A string, a SQL statement. seq_of_args: A sequence, each entry of which is a sequence of arguments matching the statement's bind variables, if any. """ self._CheckOpen() rowcount = 0 for args in seq_of_args: self.execute(statement, args) rowcount += self.rowcount self._rowcount = rowcount def fetchone(self): """Fetches the next row of a query result set. Returns: A sequence, or None when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchone() called before execute') try: return self._rows.popleft() except IndexError: return None def fetchmany(self, size=None): """Fetches the next set of rows of a query result. Args: size: The maximum number of rows to return; by default, self.arraysize. Returns: A sequence of sequences, or an empty sequence when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchmany() called before execute') if size is None: size = self.arraysize if size >= len(self._rows): return self.fetchall() else: result = [] for _ in xrange(size): result.append(self._rows.popleft()) return tuple(result) def fetchall(self): """Fetches all remaining rows of a query result. Returns: A sequence of sequences, or an empty sequence when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchall() called before execute') rows = self._rows self._rows = collections.deque() return tuple(rows) def setinputsizes(self, unused_sizes): self._CheckOpen() def setoutputsize(self, unused_size, unused_column=None): self._CheckOpen() def _CheckOpen(self): self._conn.CheckOpen() if not self._open: raise InternalError('cursor has been closed') def __iter__(self): return iter(self.fetchone, None) class Connection(object): def __init__(self, dsn, instance, database=None, user='root', password=None, deadline_seconds=30.0, conv=None, query_deadline_seconds=86400.0, retry_interval_seconds=30.0): """Creates a new SQL Service connection. Args: dsn: A string, the SQL Service job path or host:port. instance: A string, the SQL Service instance name, often a username. database: A string, semantics defined by the backend. user: A string, database user name. password: A string, database password. deadline_seconds: A float, request deadline in seconds. conv: A dict, maps types to a conversion function. See converters.py. query_deadline_seconds: A float, query deadline in seconds. retry_interval_seconds: A float, seconds to wait between each retry. Raises: OperationalError: Transport failure. DatabaseError: Error from SQL Service server. """ self._dsn = dsn self._instance = instance self._database = database self._user = user self._password = password self._deadline_seconds = deadline_seconds self._connection_id = None self._idempotent_request_id = 0 if not conv: conv = converters.conversions self._query_deadline_seconds = query_deadline_seconds self._retry_interval_seconds = retry_interval_seconds self.converter = {} self.encoders = {} for key, value in conv.items(): if isinstance(key, int): self.converter[key] = value else: self.encoders[key] = value self.OpenConnection() def OpenConnection(self): """Opens a connection to SQL Service.""" request = sql_pb2.OpenConnectionRequest() request.client_type = client_pb2.CLIENT_TYPE_PYTHON_DBAPI prop = request.property.add() prop.key = 'autoCommit' prop.value = 'false' if self._user: prop = request.property.add() prop.key = 'user' prop.value = self._user if self._password: prop = request.property.add() prop.key = 'password' prop.value = self._password if self._database: prop = request.property.add() prop.key = 'database' prop.value = self._database self.SetupClient() response = self.MakeRequest('OpenConnection', request) self._connection_id = response.connection_id def SetupClient(self): """Setup a transport client to communicate with rdbms. This is a template method to provide subclasses with a hook to perform any necessary client initialization while opening a connection to rdbms. """ pass def close(self): """Makes the connection and all its cursors unusable. The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. """ self.CheckOpen() request = sql_pb2.CloseConnectionRequest() self.MakeRequest('CloseConnection', request) self._connection_id = None def CheckOpen(self): if self._connection_id is None: raise InternalError('connection has been closed') def commit(self): """Commits any pending transaction to the database. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.COMMIT self.MakeRequest('ExecOp', request) def rollback(self): """Rolls back any pending transaction to the database. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.ROLLBACK self.MakeRequest('ExecOp', request) def autocommit(self, value): """Changes whether there is an implicit commit after each statement. By default, transactions must be explicitly committed. Args: value: A boolean. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.SET_AUTO_COMMIT request.op.auto_commit = value self.MakeRequest('ExecOp', request) def cursor(self, **kwargs): """Returns a cursor for the current connection. Args: **kwargs: Optional keyword args to pass into cursor. Returns: A Cursor object. """ return Cursor(self, **kwargs) def MakeRequest(self, stub_method, request): """Makes an ApiProxy request, and possibly raises an appropriate exception. Args: stub_method: A string, the name of the method to call. request: A protobuf; 'instance' and 'connection_id' will be set when available. Returns: A protobuf. Raises: DatabaseError: Error from SQL Service server. """ if self._instance: request.instance = self._instance if self._connection_id is not None: request.connection_id = self._connection_id if stub_method in ('Exec', 'ExecOp', 'GetMetadata'): self._idempotent_request_id += 1 request.request_id = self._idempotent_request_id response = self._MakeRetriableRequest(stub_method, request) else: response = self.MakeRequestImpl(stub_method, request) if (hasattr(response, 'sql_exception') and response.HasField('sql_exception')): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message)) return response def _MakeRetriableRequest(self, stub_method, request): """Makes a retriable request. Args: stub_method: A string, the name of the method to call. request: A protobuf. Returns: A protobuf. Raises: DatabaseError: Error from SQL Service server. """ absolute_deadline_seconds = time.clock() + self._query_deadline_seconds response = self.MakeRequestImpl(stub_method, request) if not response.HasField('sql_exception'): return response sql_exception = response.sql_exception if (sql_exception.application_error_code != client_error_code_pb2.SqlServiceClientError.ERROR_TIMEOUT): raise DatabaseError('%d: %s' % (sql_exception.code, sql_exception.message)) if time.clock() >= absolute_deadline_seconds: raise DatabaseError('%d: %s' % (sql_exception.code, sql_exception.message)) return self._Retry(stub_method, request.request_id, absolute_deadline_seconds) def _Retry(self, stub_method, request_id, absolute_deadline_seconds): """Retries request with the given request id. Continues to retry until either the deadline has expired or the response has been received. Args: stub_method: A string, the name of the original method that triggered the retry. request_id: An integer, the request id used in the original request absolute_deadline_seconds: An integer, absolute deadline in seconds. Returns: A protobuf. Raises: DatabaseError: If the ExecOpResponse contains a SqlException that it not related to retry. InternalError: If the ExceOpResponse is not valid. """ request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.RETRY request.op.request_id = request_id request.connection_id = self._connection_id request.instance = self._instance while True: seconds_remaining = absolute_deadline_seconds - time.clock() if seconds_remaining <= 0: raise InternalError('Request [%d] timed out' % (request_id)) time.sleep(min(self._retry_interval_seconds, seconds_remaining)) self._idempotent_request_id += 1 request.request_id = self._idempotent_request_id response = self.MakeRequestImpl('ExecOp', request) if not response.HasField('sql_exception'): return self._ConvertCachedResponse(stub_method, response) sql_exception = response.sql_exception if (sql_exception.application_error_code != client_error_code_pb2.SqlServiceClientError.ERROR_RESPONSE_PENDING): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message)) def _ConvertCachedResponse(self, stub_method, exec_op_response): """Converts the cached response or RPC error. Args: stub_method: A string, the name of the original method that triggered the retry. exec_op_response: A protobuf, the retry response that contains either the RPC error or the cached response. Returns: A protobuf, the cached response. Raises: DatabaseError: If the cached response contains SqlException. InternalError: If a cached RpcErrorProto exists. """ if exec_op_response.HasField('cached_rpc_error'): raise InternalError('%d: %s' % ( exec_op_response.cached_rpc_error.error_code, exec_op_response.cached_rpc_error.error_message)) if not exec_op_response.HasField('cached_payload'): raise InternalError('Invalid exec op response for retry request') if stub_method == 'Exec': response = sql_pb2.ExecResponse() elif stub_method == 'ExecOp': response = sql_pb2.ExecOpResponse() elif stub_method == 'GetMetadata': response = sql_pb2.MetadataResponse() else: raise InternalError('Found unexpected stub_method: %s' % (stub_method)) response.ParseFromString(exec_op_response.cached_payload) if response.HasField('sql_exception'): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message)) return response def MakeRequestImpl(self, stub_method, request): raise InternalError('No transport defined. Try using rdbms_[transport]') def get_server_info(self): """Returns a string that represents the server version number. Non-standard; Provided for API compatibility with MySQLdb. Returns: The server version number string. """ self.CheckOpen() request = sql_pb2.MetadataRequest() request.metadata = client_pb2.METADATATYPE_DATABASE_METADATA_BASIC response = self.MakeRequest('GetMetadata', request) return response.jdbc_database_metadata.database_product_version def ping(self, reconnect=False): """Checks whether or not the connection to the server is working. If it has gone down, an automatic reconnection is attempted. This function can be used by clients that remain idle for a long while, to check whether or not the server has closed the connection and reconnect if necessary. Non-standard. You should assume that ping() performs an implicit rollback; use only when starting a new transaction. You have been warned. Args: reconnect: Whether to perform an automatic reconnection. Raises: DatabaseError: The connection to the server is not working. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.PING try: self.MakeRequest('ExecOp', request) except DatabaseError: if not reconnect: raise self._connection_id = None self.OpenConnection() Warning = Warning Error = Error InterfaceError = InterfaceError DatabaseError = DatabaseError DataError = DataError OperationalError = OperationalError IntegrityError = IntegrityError InternalError = InternalError ProgrammingError = ProgrammingError NotSupportedError = NotSupportedError connect = Connection
adviti/melange
thirdparty/google_appengine/google/storage/speckle/python/api/rdbms.py
Python
apache-2.0
22,582
# Copyright 2020 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START gae_detected_tag] # [END gae_detected_tag] """ [START gae_block_comment_tag] [END gae_block_comment_tag] """
GoogleCloudPlatform/repo-automation-playground
xunit-autolabeler-v2/ast_parser/lib/test_data/appengine/gae_sample.py
Python
apache-2.0
695
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from future import standard_library standard_library.install_aliases() import logging import sys from thrift.transport.TTransport import * from desktop.lib.rest.http_client import HttpClient from desktop.lib.rest.resource import Resource if sys.version_info[0] > 2: from io import BytesIO as buffer_writer else: from cStringIO import StringIO as buffer_writer LOG = logging.getLogger(__name__) class THttpClient(TTransportBase): """ HTTP transport mode for Thrift. HTTPS and Kerberos support with Request. e.g. mode = THttpClient('http://hbase-thrift-v1.com:9090') mode = THttpClient('http://hive-localhost:10001/cliservice') """ def __init__(self, base_url): self._base_url = base_url self._client = HttpClient(self._base_url, logger=LOG) self._data = None self._headers = None self._wbuf = buffer_writer() def open(self): pass def set_kerberos_auth(self, service="HTTP"): self._client.set_kerberos_auth(service=service) def set_basic_auth(self, username, password): self._client.set_basic_auth(username, password) def set_verify(self, verify=True): self._client.set_verify(verify) def close(self): self._headers = None # Close session too? def isOpen(self): return self._client is not None def setTimeout(self, ms): if not self._headers: self._headers = {} self._headers.update(timeout=str(int(ms / 1000))) def setCustomHeaders(self, headers): self._headers = headers def read(self, sz): return self._data def write(self, buf): self._wbuf.write(buf) def flush(self): data = self._wbuf.getvalue() self._wbuf = buffer_writer() # POST self._root = Resource(self._client) self._data = self._root.post('', data=data, headers=self._headers)
kawamon/hue
desktop/core/src/desktop/lib/thrift_/http_client.py
Python
apache-2.0
2,588
# # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Python wrapper for gcd.sh.""" __author__ = '[email protected] (Ed Davisson)' import logging import os import shutil import socket import subprocess import tempfile import time import urllib import zipfile import httplib2 import portpicker from googledatastore import connection _DEFAULT_GCD_OPTIONS = ['--allow_remote_shutdown', '--testing'] class LocalCloudDatastoreFactory(object): """A factory for constructing LocalCloudDatastore objects.""" def __init__(self, working_directory, gcd_zip, java=None): """Constructs a factory for building local datastore instances. Args: working_directory: path to a directory where temporary files will be stored gcd_zip: path to the gcd zip file java: path to a java executable Raises: ValueError: if gcd.sh cannot be located in the gcd zip file """ self._working_directory = working_directory self._remote_datastores = {} # Extract GCD. zipped_file = zipfile.ZipFile(gcd_zip) self._gcd_dir = os.path.join(self._working_directory, 'gcd') os.mkdir(self._gcd_dir) zipped_file.extractall(self._gcd_dir) # Locate gcd.sh in the unzipped directory (it may be in a directory which # contains a version string). gcd_dirs = [d for d in os.listdir(self._gcd_dir) if os.path.isdir(os.path.join(self._gcd_dir, d))] for d in gcd_dirs: if d.startswith('gcd'): self._gcd_sh = os.path.join(self._gcd_dir, d, 'gcd.sh') break else: raise ValueError('could not find gcd.sh in zip file') os.chmod(self._gcd_sh, 0700) # executable # Make GCD use our copy of Java. if java: os.environ['JAVA'] = java def Get(self, project_id): """Returns an existing local datastore instance for the provided project_id. If a local datastore instance doesn't yet exist, it creates one. """ if project_id in self._remote_datastores: return self._remote_datastores[project_id] datastore = self.Create(project_id) self._remote_datastores[project_id] = datastore return datastore def Create(self, project_id, start_options=None, deadline=10): """Creates a local datastore instance. This method will wait for up to 'deadline' seconds for the datastore to start. Args: project_id: project ID start_options: a list of additional command-line options to pass to the gcd.sh start command deadline: number of seconds to wait for the datastore to respond Returns: a LocalCloudDatastore Raises: IOError: if the local datastore could not be started within the deadline """ return LocalCloudDatastore(self._gcd_sh, self._working_directory, project_id, deadline, start_options) def __del__(self): # Delete temp files. shutil.rmtree(self._gcd_dir) class LocalCloudDatastore(object): """A local datastore (based on gcd.sh).""" def __init__(self, gcd_sh, working_directory, project_id, deadline, start_options): """Constructs a local datastore. Clients should use LocalCloudDatastoreFactory to construct LocalCloudDatastore instances. Args: gcd_sh: path to gcd.sh working_directory: directory file where temporary files will be stored project_id: project ID deadline: number of seconds to wait for the datastore to start start_options: a list of additional command-line options to pass to the gcd.sh start command Raises: IOError: if the datastore failed to start within the deadline """ self._project_id = project_id self._gcd_sh = gcd_sh self._http = httplib2.Http() self.__running = False self._tmp_dir = tempfile.mkdtemp(dir=working_directory) self._project_directory = os.path.join(self._tmp_dir, self._project_id) p = subprocess.Popen([gcd_sh, 'create', '--project_id=%s' % self._project_id, self._project_directory]) if p.wait() != 0: raise IOError('could not create project in directory: %s' % self._project_directory) # Start GCD and wait for it to start responding to requests. port = portpicker.PickUnusedPort() self._host = 'http://localhost:%d' % port cmd = [self._gcd_sh, 'start', '--port=%d' % port] cmd.extend(_DEFAULT_GCD_OPTIONS) if start_options: cmd.extend(start_options) cmd.append(self._project_directory) subprocess.Popen(cmd) if not self._WaitForStartup(deadline): raise IOError('datastore did not respond within %ds' % deadline) endpoint = '%s/datastore/v1beta3/projects/%s' % (self._host, self._project_id) self.__datastore = connection.Datastore(project_endpoint=endpoint) self.__running = True def GetDatastore(self): """Returns a googledatatsore.Datastore that is connected to the gcd tool.""" return self.__datastore def _WaitForStartup(self, deadline): """Waits for the datastore to start. Args: deadline: deadline in seconds Returns: True if the instance responds within the deadline, False otherwise. """ start = time.time() sleep = 0.05 def Elapsed(): return time.time() - start while True: try: response, _ = self._http.request(self._host) if response.status == 200: logging.info('local server responded after %f seconds', Elapsed()) return True except socket.error: pass if Elapsed() >= deadline: # Out of time; give up. return False else: time.sleep(sleep) sleep *= 2 def Clear(self): """Clears all data from the local datastore instance. Returns: True if the data was successfully cleared, False otherwise. """ body = urllib.urlencode({'action': 'Clear Datastore'}) headers = {'Content-type': 'application/x-www-form-urlencoded', 'Content-length': str(len(body))} response, _ = self._http.request('%s/_ah/admin/datastore' % self._host, method='POST', headers=headers, body=body) if response.status == 200: return True else: logging.warning('failed to clear datastore; response was: %s', response) def Stop(self): if not self.__running: return logging.info('shutting down the datastore running at %s', self._host) # Shut down the datastore. headers = {'Content-length': '0'} response, _ = self._http.request('%s/_ah/admin/quit' % self._host, method='POST', headers=headers) if response.status != 200: logging.warning('failed to shut down datastore; response: %s', response) self.__running = False # Delete temp files. shutil.rmtree(self._tmp_dir) def __del__(self): # If the user forgets to call Stop() logging.warning('datastore shutting down due to ' 'LocalCloudDatastore object deletion') self.Stop()
pcostell/google-cloud-datastore
python/googledatastore/local_cloud_datastore.py
Python
apache-2.0
7,713
# coding=utf-8 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import logging from pants.engine.fs import EMPTY_SNAPSHOT from pants.engine.rules import RootRule, rule from pants.engine.selectors import Select from pants.util.objects import datatype logger = logging.getLogger(__name__) class ExecuteProcessRequest(datatype('ExecuteProcessRequest', ['argv', 'env', 'input_files_digest', 'digest_length'])): """Request for execution with args and snapshots to extract.""" @classmethod def create_from_snapshot(cls, argv, env, snapshot): return ExecuteProcessRequest( argv=argv, env=env, input_files_digest=snapshot.fingerprint, digest_length=snapshot.digest_length, ) @classmethod def create_with_empty_snapshot(cls, argv, env): return cls.create_from_snapshot(argv, env, EMPTY_SNAPSHOT) def __new__(cls, argv, env, input_files_digest, digest_length): """ :param args: Arguments to the process being run. :param env: A tuple of environment variables and values. """ if not isinstance(argv, tuple): raise ValueError('argv must be a tuple.') if not isinstance(env, tuple): raise ValueError('env must be a tuple.') if not isinstance(input_files_digest, str): raise ValueError('input_files_digest must be a str.') if not isinstance(digest_length, int): raise ValueError('digest_length must be an int.') if digest_length < 0: raise ValueError('digest_length must be >= 0.') return super(ExecuteProcessRequest, cls).__new__(cls, argv, env, input_files_digest, digest_length) class ExecuteProcessResult(datatype('ExecuteProcessResult', ['stdout', 'stderr', 'exit_code'])): pass def create_process_rules(): """Intrinsically replaced on the rust side.""" return [execute_process_noop, RootRule(ExecuteProcessRequest)] @rule(ExecuteProcessResult, [Select(ExecuteProcessRequest)]) def execute_process_noop(*args): raise Exception('This task is replaced intrinsically, and should never run.')
UnrememberMe/pants
src/python/pants/engine/isolated_process.py
Python
apache-2.0
2,261
## Adding ./getresults to the Python path so that the modules in folder can be imported import sys sys.path.insert(0, './getresults') import datetime from flightsearch import flightsearch, flightresult import os import uuid import time from pprint import pprint def main(): flyfrom = 'YYZ' #input("Enter departure city or airport code, e.g. Toronto or YYZ:\n") datefrom = '2017-04-26' #input("Enter departure date and time, e.g. 2017-03-31 12:00:\n") flyto = 'LHR' #input("Enter arrival city or airport code, e.g. London or LHR:\n") dateto = '2017-05-26' #input("Enter arrival date and time, e.g. 2017-03-31 20:00:\n") searchuuid = uuid.uuid4() searchbegintime = time.time() search = flightsearch(searchuuid = searchuuid, searchbegintime = searchbegintime, flyfrom = flyfrom, datefrom = datefrom, flyto = flyto, dateto = dateto) results = aggregatedflights(search) search.searchendtime = time.time() for key, value in results.items(): for item in value: pprint(vars(item)) ## This function aggegates the various results obtained from the modules in the ./getresults folder def aggregatedflights(flightsearch): getresultsdir = './getresults' resultdict = {} for filename in os.listdir(getresultsdir): if filename.startswith("get") and filename.endswith(".py"): modulename = filename.split('.')[0] mod = __import__(modulename) resultdict[modulename] = mod.getresult(flightsearch) else: continue return sortbyprice(resultdict) def sortbyprice(flightresult): ## Coming soon return flightresult if __name__ == '__main__': main()
brahul90/cheapflights
cheapflights/main.py
Python
apache-2.0
1,721
# pylint: disable=no-self-use,invalid-name import pytest import torch from torch.autograd import Variable from allennlp.common import Params from allennlp.common.checks import ConfigurationError from allennlp.data import Vocabulary from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder from allennlp.common.testing import AllenNlpTestCase class TestBasicTextFieldEmbedder(AllenNlpTestCase): def setUp(self): super(TestBasicTextFieldEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace("1") self.vocab.add_token_to_namespace("2") self.vocab.add_token_to_namespace("3") self.vocab.add_token_to_namespace("4") params = Params({ "words1": { "type": "embedding", "embedding_dim": 2 }, "words2": { "type": "embedding", "embedding_dim": 5 }, "words3": { "type": "embedding", "embedding_dim": 3 } }) self.token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params) self.inputs = { "words1": Variable(torch.LongTensor([[0, 2, 3, 5]])), "words2": Variable(torch.LongTensor([[1, 4, 3, 2]])), "words3": Variable(torch.LongTensor([[1, 5, 1, 2]])) } def test_get_output_dim_aggregates_dimension_from_each_embedding(self): assert self.token_embedder.get_output_dim() == 10 def test_forward_asserts_input_field_match(self): self.inputs['words4'] = self.inputs['words3'] del self.inputs['words3'] with pytest.raises(ConfigurationError): self.token_embedder(self.inputs) self.inputs['words3'] = self.inputs['words4'] del self.inputs['words4'] def test_forward_concats_resultant_embeddings(self): assert self.token_embedder(self.inputs).size() == (1, 4, 10)
nafitzgerald/allennlp
tests/modules/text_field_embedders/basic_token_embedder_test.py
Python
apache-2.0
2,108
#! /home/nsanthony/miniconda3/bin/python import inventory.inventory_class as inv import weapons.weapon_class as wp class people: """This is the people class with attributes:""" def name(): n = '' return n def health(): hp = 0 return hp def descript(): d = 'Description of the person or creature' return d def equiped(): e = inv.inventory() e.weapon = wp.weapon() e.armor = 0 return e def bag(): b = {} return b def hostile(): h = 0 return h
nsanthony/super-fortnight
wwk/py/people/people_class.py
Python
apache-2.0
583
az.plot_dist(b, rug=True, quantiles=[.25, .5, .75], cumulative=True)
mcmcplotlib/mcmcplotlib
api/generated/arviz-plot_dist-5.py
Python
apache-2.0
69
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tests for parser and parser plugin presets.""" from __future__ import unicode_literals import unittest from plaso.containers import artifacts from plaso.parsers import presets from tests import test_lib as shared_test_lib class ParserPresetTest(shared_test_lib.BaseTestCase): """Tests for the parser and parser plugin preset.""" def testInitialize(self): """Tests the __init__ function.""" test_definition = presets.ParserPreset('test', ['parser1', 'parser2']) self.assertIsNotNone(test_definition) class ParserPresetsManagerTest(shared_test_lib.BaseTestCase): """Tests for the parser and parser plugin presets manager.""" _LINUX_PARSERS = [ 'bash_history', 'bencode', 'czip/oxml', 'dockerjson', 'dpkg', 'filestat', 'gdrive_synclog', 'olecf', 'pls_recall', 'popularity_contest', 'selinux', 'sqlite/google_drive', 'sqlite/skype', 'sqlite/zeitgeist', 'syslog', 'systemd_journal', 'utmp', 'vsftpd', 'webhist', 'xchatlog', 'xchatscrollback', 'zsh_extended_history'] _MACOS_PARSERS = [ 'asl_log', 'bash_history', 'bencode', 'bsm_log', 'cups_ipp', 'czip/oxml', 'filestat', 'fseventsd', 'gdrive_synclog', 'mac_appfirewall_log', 'mac_keychain', 'mac_securityd', 'macwifi', 'olecf', 'plist', 'sqlite/appusage', 'sqlite/google_drive', 'sqlite/imessage', 'sqlite/ls_quarantine', 'sqlite/mac_document_versions', 'sqlite/mackeeper_cache', 'sqlite/skype', 'syslog', 'utmpx', 'webhist', 'zsh_extended_history'] # TODO add tests for _ReadPresetDefinitionValues # TODO add tests for _ReadPresetsFromFileObject def testGetNames(self): """Tests the GetNames function.""" test_file_path = self._GetTestFilePath(['presets.yaml']) self._SkipIfPathNotExists(test_file_path) test_manager = presets.ParserPresetsManager() test_manager.ReadFromFile(test_file_path) test_names = list(test_manager.GetNames()) self.assertEqual(len(test_names), 7) expected_names = sorted([ 'android', 'linux', 'macos', 'webhist', 'win7', 'win_gen', 'winxp']) self.assertEqual(test_names, expected_names) def testGetParsersByPreset(self): """Tests the GetParsersByPreset function.""" test_file_path = self._GetTestFilePath(['presets.yaml']) self._SkipIfPathNotExists(test_file_path) test_manager = presets.ParserPresetsManager() test_manager.ReadFromFile(test_file_path) parser_names = test_manager.GetParsersByPreset('linux') self.assertEqual(parser_names, self._LINUX_PARSERS) with self.assertRaises(KeyError): test_manager.GetParsersByPreset('bogus') def testGetPresetByName(self): """Tests the GetPresetByName function.""" test_file_path = self._GetTestFilePath(['presets.yaml']) self._SkipIfPathNotExists(test_file_path) test_manager = presets.ParserPresetsManager() test_manager.ReadFromFile(test_file_path) test_preset = test_manager.GetPresetByName('linux') self.assertIsNotNone(test_preset) self.assertEqual(test_preset.name, 'linux') self.assertEqual(test_preset.parsers, self._LINUX_PARSERS) test_preset = test_manager.GetPresetByName('bogus') self.assertIsNone(test_preset) def testGetPresetsByOperatingSystem(self): """Tests the GetPresetsByOperatingSystem function.""" test_file_path = self._GetTestFilePath(['presets.yaml']) self._SkipIfPathNotExists(test_file_path) test_manager = presets.ParserPresetsManager() test_manager.ReadFromFile(test_file_path) operating_system = artifacts.OperatingSystemArtifact(family='MacOS') test_presets = test_manager.GetPresetsByOperatingSystem(operating_system) self.assertEqual(len(test_presets), 1) self.assertEqual(test_presets[0].name, 'macos') self.assertEqual(test_presets[0].parsers, self._MACOS_PARSERS) operating_system = artifacts.OperatingSystemArtifact(family='bogus') test_presets = test_manager.GetPresetsByOperatingSystem(operating_system) self.assertEqual(len(test_presets), 0) def testGetPresetsInformation(self): """Tests the GetPresetsInformation function.""" test_file_path = self._GetTestFilePath(['presets.yaml']) self._SkipIfPathNotExists(test_file_path) test_manager = presets.ParserPresetsManager() test_manager.ReadFromFile(test_file_path) parser_presets_information = test_manager.GetPresetsInformation() self.assertGreaterEqual(len(parser_presets_information), 1) available_parser_names = [name for name, _ in parser_presets_information] self.assertIn('linux', available_parser_names) # TODO add tests for ReadFromFile if __name__ == '__main__': unittest.main()
rgayon/plaso
tests/parsers/presets.py
Python
apache-2.0
4,926
# _*_ coding: utf-8 _*_ # filename: pic.py import csv import numpy import matplotlib.pyplot as plt # 读取 house.csv 文件中价格和面积列 price, size = numpy.loadtxt('house.csv', delimiter='|', usecols=(1, 2), unpack=True) print price print size plt.figure() plt.subplot(211) # plt.title("price") plt.title("/ 10000RMB") plt.hist(price, bins=20) plt.subplot(212) # plt.title("area") plt.xlabel("/ m**2") plt.hist(size, bins=20) plt.figure(2) plt.title("price") plt.plot(price) plt.show() # 求价格和面积的平均值 price_mean = numpy.mean(price) size_mean = numpy.mean(size) # 求价格和面积的方差 price_var = numpy.var(price) size_var = numpy.var(size) print "价格的方差为:", price_var print "面积的方差为:", size_var
tongxindao/shiyanlou
shiyanlou_cs869/ershoufang_info/pic.py
Python
apache-2.0
766
#!python3 import requests import yaml import time import enum import sys import re import logging import ssl from requests.auth import HTTPDigestAuth from requests.auth import HTTPBasicAuth from lxml import etree as ET from requests.packages.urllib3.exceptions import InsecureRequestWarning from requests.adapters import HTTPAdapter from requests.packages.urllib3.poolmanager import PoolManager logger = logging.getLogger(__name__) with open("config/config.yaml") as f: config = yaml.load(f) # Silence self signed certificate security warning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) # Specify better cipher. Default causes errors on some systems with outdated ssl libs requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL' try: requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL' except AttributeError: # no pyopenssl support used / needed / available pass class Ssl3HttpAdapter(HTTPAdapter): """"Transport adapter" that allows us to use SSLv3.""" def init_poolmanager(self, connections, maxsize, block=False): self.poolmanager = PoolManager( num_pools=connections, maxsize=maxsize, block=block, ssl_version=ssl.PROTOCOL_SSLv3) session = requests.Session() session.mount('https://wbgrp-crawl',Ssl3HttpAdapter()) class Crawl_Status(): none = "None" unbuilt = "Unbuilt" ready = "Ready" paused = "Active: PAUSED" running = "Active: RUNNING" finished = "Finished: ABORTED" class Crawl_Actions(): build = "build" launch = "launch" unpause = "unpause" pause = "pause" checkpoint = "checkpoint" terminate = "terminate" teardown = "teardown" class Crawl_Reports(): summary = "CrawlSummaryReport" seeds = "SeedsReport" source = "SourceTagsReport" hosts = "HostsReport" mime = "MimetypesReport" response = "ResponseCodeReport" processors = "ProcessorsReport" frontier = "FrontierSummaryReport" thread = "ToeThreadsReport" def get_crawl_status(url): response = session.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'}) if (response.status_code & 200) == 200: root=ET.fromstring(response.text) if root.find('statusDescription') is not None: return root.find('statusDescription').text elif root.find('crawlControllerState') is not None: return root.find('crawlControllerState').text def get_available_actions(url): response = session.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'}) actions = [] if (response.status_code & 200) == 200: root=ET.fromstring(response.text) for action in root.find('availableActions'): actions.append(action.text) return actions def main(): url = 'https://localhost:6440/engine/job/monthly_test' test_full_cycle(url) def get_crawljob_page(url): response = session.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'}) if (response.status_code & 200) == 200: return response def get_crawljob_text_page(url): response = requests.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False) if (response.status_code & 200) == 200: return response def get_config_path(url): response = get_crawljob_page(url) root=ET.fromstring(response.text) config_path = root.find('primaryConfig').text return config_path def increment_crawl_number(url, source_config_file, dest_config_file): parser = ET.XMLParser(remove_comments=False) config_tree = ET.parse(source_config_file,parser=parser) ns = {'beans': 'http://www.springframework.org/schema/beans'} properties = config_tree.getroot().findall("./beans:bean[@id='simpleOverrides']/beans:property/beans:value",ns)[0].text m = re.finditer('(?m)^[^\.]*[wW]arcWriter\.prefix=[^\d]*-(?P<warcid>\d{3})(-.*)?',properties) for i in m: warc_id=int(i.group('warcid')) warc_id=warc_id+1 properties_incremented = re.sub('(?m)^(?P<prefix>[^\.]*[wW]arcWriter\.prefix=[^\d]*-)(?P<warcid>\d{3})(?P<suffix>(-.*)?)','\g<prefix>'+str(warc_id).zfill(3)+'\g<suffix>',properties) config_tree.getroot().findall("./beans:bean[@id='simpleOverrides']/beans:property/beans:value",ns)[0].text = properties_incremented config_tree.write(dest_config_file,xml_declaration=True,encoding="utf-8") def find_replace_xpath(url, source_config_file, dest_config_file, xpath, regex, replacement): parser = ET.XMLParser(remove_comments=False) config_tree = ET.parse(source_config_file,parser=parser) ns = {'beans': 'http://www.springframework.org/schema/beans'} config_field = config_tree.getroot().findall(xpath,ns)[0].text #print(config_field) modified_field = re.sub(re.compile(regex,re.MULTILINE),replacement,config_field) #print(modified_field) config_tree.getroot().findall(xpath,ns)[0].text=modified_field config_tree.write(dest_config_file,xml_declaration=True,encoding="utf-8") def test_full_cycle(url): status = get_crawl_status(url) logger.info("Status: %s" %status) available_actions = get_available_actions(url) if status == Crawl_Status.unbuilt and "build" in available_actions: build(url) status = get_crawl_status(url) available_actions = get_available_actions(url) if status == Crawl_Status.ready and "launch" in available_actions: launch(url) status = get_crawl_status(url) available_actions = get_available_actions(url) if status == Crawl_Status.paused and "unpause" in available_actions: unpause(url) time.sleep(5) status = get_crawl_status(url) available_actions = get_available_actions(url) if status == Crawl_Status.running and "pause" in available_actions: pause(url) runScript(url,'rawOut.println("testing")') runScript(url,'htmlOut.println("testing")') status = get_crawl_status(url) available_actions = get_available_actions(url) if status == Crawl_Status.paused and "checkpoint" in available_actions: checkpoint(url) status = get_crawl_status(url) available_actions = get_available_actions(url) if status == Crawl_Status.paused and "terminate" in available_actions: terminate(url) status = get_crawl_status(url) available_actions = get_available_actions(url) if status == Crawl_Status.finished and "teardown" in available_actions: teardown(url) def do_crawl_action_until_status(url, action, expected_status): logger.info("-Doing action: %s" %action) response = send_command(url,{"action":action}) if (response.status_code & 200) == 200: retries=0 while get_crawl_status(url) != expected_status: if retries > config["max_retries"]: logger.info("Max retries exceeded while waiting for: %s" % expected_status) sys.exit() logger.info("...") time.sleep(config["retry_delay_seconds"]) retries+=1 logger.error("Status: %s" %expected_status) def build(url): do_crawl_action_until_status(url, Crawl_Actions.build, Crawl_Status.ready) def launch(url): do_crawl_action_until_status(url,Crawl_Actions.launch, Crawl_Status.paused) def unpause(url): do_crawl_action_until_status(url,Crawl_Actions.unpause,Crawl_Status.running) def pause(url): do_crawl_action_until_status(url, Crawl_Actions.pause, Crawl_Status.paused) def checkpoint(url): do_crawl_action_until_status(url, Crawl_Actions.checkpoint, Crawl_Status.paused) def terminate(url): do_crawl_action_until_status(url, Crawl_Actions.terminate, Crawl_Status.finished) def teardown(url): do_crawl_action_until_status(url, Crawl_Actions.teardown, Crawl_Status.unbuilt) def runScript(url, script): response = send_command(url + '/script',{'engine':'groovy','script':script}) if (response.status_code & 200) == 200: logger.debug(response.text) root = ET.fromstring(response.text) return_script = root.find('script') raw_out = root.find('rawOutput') html_out = root.find('htmlOutput') lines_executed = root.find('linesExecuted') if return_script is not None: logger.info("Script run: %s" % return_script.text) if lines_executed is not None: logger.info("%s lines executed" % lines_executed.text) if raw_out is not None: logger.info("Output:\n %s" % raw_out.text) if html_out is not None: logger.info("Output:\n %s" % html_out.text) def send_command(url, data): response = session.post(url,data=data,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'}) return response if __name__ == "__main__": main()
adam-miller/h3_py
h3_py/h3.py
Python
apache-2.0
8,597
import os import subprocess import sys import pytest sys.path.append("tests/python") import testing as tm import test_demos as td # noqa @pytest.mark.skipif(**tm.no_cupy()) def test_data_iterator(): script = os.path.join(td.PYTHON_DEMO_DIR, 'quantile_data_iterator.py') cmd = ['python', script] subprocess.check_call(cmd) def test_update_process_demo(): script = os.path.join(td.PYTHON_DEMO_DIR, 'update_process.py') cmd = ['python', script] subprocess.check_call(cmd) def test_categorical_demo(): script = os.path.join(td.PYTHON_DEMO_DIR, 'categorical.py') cmd = ['python', script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_dask()) @pytest.mark.skipif(**tm.no_dask_cuda()) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.mgpu def test_dask_training(): script = os.path.join(tm.PROJECT_ROOT, 'demo', 'dask', 'gpu_training.py') cmd = ['python', script, '--ddqdm=1'] subprocess.check_call(cmd) cmd = ['python', script, '--ddqdm=0'] subprocess.check_call(cmd)
dmlc/xgboost
tests/python-gpu/test_gpu_demos.py
Python
apache-2.0
1,044
""" Tests for zipline.pipeline.loaders.frame.DataFrameLoader. """ from unittest import TestCase from mock import patch from numpy import arange, ones from numpy.testing import assert_array_equal from pandas import ( DataFrame, DatetimeIndex, Int64Index, ) from zipline.lib.adjustment import ( Float64Add, Float64Multiply, Float64Overwrite, ) from zipline.pipeline.data import USEquityPricing from zipline.pipeline.loaders.frame import ( ADD, DataFrameLoader, MULTIPLY, OVERWRITE, ) from zipline.utils.tradingcalendar import trading_day class DataFrameLoaderTestCase(TestCase): def setUp(self): self.nsids = 5 self.ndates = 20 self.sids = Int64Index(range(self.nsids)) self.dates = DatetimeIndex( start='2014-01-02', freq=trading_day, periods=self.ndates, ) self.mask = ones((len(self.dates), len(self.sids)), dtype=bool) def tearDown(self): pass def test_bad_input(self): data = arange(100).reshape(self.ndates, self.nsids) baseline = DataFrame(data, index=self.dates, columns=self.sids) loader = DataFrameLoader( USEquityPricing.close, baseline, ) with self.assertRaises(ValueError): # Wrong column. loader.load_adjusted_array( [USEquityPricing.open], self.dates, self.sids, self.mask ) with self.assertRaises(ValueError): # Too many columns. loader.load_adjusted_array( [USEquityPricing.open, USEquityPricing.close], self.dates, self.sids, self.mask, ) def test_baseline(self): data = arange(100).reshape(self.ndates, self.nsids) baseline = DataFrame(data, index=self.dates, columns=self.sids) loader = DataFrameLoader(USEquityPricing.close, baseline) dates_slice = slice(None, 10, None) sids_slice = slice(1, 3, None) [adj_array] = loader.load_adjusted_array( [USEquityPricing.close], self.dates[dates_slice], self.sids[sids_slice], self.mask[dates_slice, sids_slice], ) for idx, window in enumerate(adj_array.traverse(window_length=3)): expected = baseline.values[dates_slice, sids_slice][idx:idx + 3] assert_array_equal(window, expected) def test_adjustments(self): data = arange(100).reshape(self.ndates, self.nsids) baseline = DataFrame(data, index=self.dates, columns=self.sids) # Use the dates from index 10 on and sids 1-3. dates_slice = slice(10, None, None) sids_slice = slice(1, 4, None) # Adjustments that should actually affect the output. relevant_adjustments = [ { 'sid': 1, 'start_date': None, 'end_date': self.dates[15], 'apply_date': self.dates[16], 'value': 0.5, 'kind': MULTIPLY, }, { 'sid': 2, 'start_date': self.dates[5], 'end_date': self.dates[15], 'apply_date': self.dates[16], 'value': 1.0, 'kind': ADD, }, { 'sid': 2, 'start_date': self.dates[15], 'end_date': self.dates[16], 'apply_date': self.dates[17], 'value': 1.0, 'kind': ADD, }, { 'sid': 3, 'start_date': self.dates[16], 'end_date': self.dates[17], 'apply_date': self.dates[18], 'value': 99.0, 'kind': OVERWRITE, }, ] # These adjustments shouldn't affect the output. irrelevant_adjustments = [ { # Sid Not Requested 'sid': 0, 'start_date': self.dates[16], 'end_date': self.dates[17], 'apply_date': self.dates[18], 'value': -9999.0, 'kind': OVERWRITE, }, { # Sid Unknown 'sid': 9999, 'start_date': self.dates[16], 'end_date': self.dates[17], 'apply_date': self.dates[18], 'value': -9999.0, 'kind': OVERWRITE, }, { # Date Not Requested 'sid': 2, 'start_date': self.dates[1], 'end_date': self.dates[2], 'apply_date': self.dates[3], 'value': -9999.0, 'kind': OVERWRITE, }, { # Date Before Known Data 'sid': 2, 'start_date': self.dates[0] - (2 * trading_day), 'end_date': self.dates[0] - trading_day, 'apply_date': self.dates[0] - trading_day, 'value': -9999.0, 'kind': OVERWRITE, }, { # Date After Known Data 'sid': 2, 'start_date': self.dates[-1] + trading_day, 'end_date': self.dates[-1] + (2 * trading_day), 'apply_date': self.dates[-1] + (3 * trading_day), 'value': -9999.0, 'kind': OVERWRITE, }, ] adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments) loader = DataFrameLoader( USEquityPricing.close, baseline, adjustments=adjustments, ) expected_baseline = baseline.iloc[dates_slice, sids_slice] formatted_adjustments = loader.format_adjustments( self.dates[dates_slice], self.sids[sids_slice], ) expected_formatted_adjustments = { 6: [ Float64Multiply( first_row=0, last_row=5, first_col=0, last_col=0, value=0.5, ), Float64Add( first_row=0, last_row=5, first_col=1, last_col=1, value=1.0, ), ], 7: [ Float64Add( first_row=5, last_row=6, first_col=1, last_col=1, value=1.0, ), ], 8: [ Float64Overwrite( first_row=6, last_row=7, first_col=2, last_col=2, value=99.0, ) ], } self.assertEqual(formatted_adjustments, expected_formatted_adjustments) mask = self.mask[dates_slice, sids_slice] with patch('zipline.pipeline.loaders.frame.adjusted_array') as m: loader.load_adjusted_array( columns=[USEquityPricing.close], dates=self.dates[dates_slice], assets=self.sids[sids_slice], mask=mask, ) self.assertEqual(m.call_count, 1) args, kwargs = m.call_args assert_array_equal(kwargs['data'], expected_baseline.values) assert_array_equal(kwargs['mask'], mask) self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
ChinaQuants/zipline
tests/pipeline/test_frameload.py
Python
apache-2.0
7,612
''' Implements the RTS ALUA Target Port Group class. This file is part of RTSLib. Copyright (c) 2016 by Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from .node import CFSNode from .utils import RTSLibError, RTSLibALUANotSupported, fread, fwrite import six alua_rw_params = ['alua_access_state', 'alua_access_status', 'alua_write_metadata', 'alua_access_type', 'preferred', 'nonop_delay_msecs', 'trans_delay_msecs', 'implicit_trans_secs', 'alua_support_offline', 'alua_support_standby', 'alua_support_transitioning', 'alua_support_active_nonoptimized', 'alua_support_unavailable', 'alua_support_active_optimized'] alua_ro_params = ['tg_pt_gp_id', 'members', 'alua_support_lba_dependent'] alua_types = ['None', 'Implicit', 'Explicit', 'Implicit and Explicit'] alua_statuses = ['None', 'Altered by Explicit STPG', 'Altered by Implicit ALUA'] class ALUATargetPortGroup(CFSNode): """ ALUA Target Port Group interface """ def __repr__(self): return "<ALUA TPG %s>" % self.name def __init__(self, storage_object, name, tag=None): """ @param storage_object: backstore storage object to create ALUA group for @param name: name of ALUA group @param tag: target port group id. If not passed in, try to look up existing ALUA TPG with the same name """ if storage_object.alua_supported is False: raise RTSLibALUANotSupported("Backend does not support ALUA setup") # default_tg_pt_gp takes tag 1 if tag is not None and (tag > 65535 or tag < 1): raise RTSLibError("The TPG Tag must be between 1 and 65535") super(ALUATargetPortGroup, self).__init__() self.name = name self.storage_object = storage_object self._path = "%s/alua/%s" % (storage_object.path, name) if tag is not None: try: self._create_in_cfs_ine('create') except OSError as msg: raise RTSLibError(msg) try: fwrite("%s/tg_pt_gp_id" % self._path, tag) except IOError as msg: self.delete() raise RTSLibError("Cannot set id to %d: %s" % (tag, str(msg))) else: try: self._create_in_cfs_ine('lookup') except OSError as msg: raise RTSLibError(msg) # Public def delete(self): """ Delete ALUA TPG and unmap from LUNs """ self._check_self() # default_tg_pt_gp created by the kernel and cannot be deleted if self.name == "default_tg_pt_gp": raise RTSLibError("Can not delete default_tg_pt_gp") # This will reset the ALUA tpg to default_tg_pt_gp super(ALUATargetPortGroup, self).delete() def _get_alua_access_state(self): self._check_self() path = "%s/alua_access_state" % self.path return int(fread(path)) def _set_alua_access_state(self, newstate): self._check_self() path = "%s/alua_access_state" % self.path try: fwrite(path, str(int(newstate))) except IOError as e: raise RTSLibError("Cannot change ALUA state: %s" % e) def _get_alua_access_status(self): self._check_self() path = "%s/alua_access_status" % self.path status = fread(path) return alua_statuses.index(status) def _set_alua_access_status(self, newstatus): self._check_self() path = "%s/alua_access_status" % self.path try: fwrite(path, str(int(newstatus))) except IOError as e: raise RTSLibError("Cannot change ALUA status: %s" % e) def _get_alua_access_type(self): self._check_self() path = "%s/alua_access_type" % self.path alua_type = fread(path) return alua_types.index(alua_type) def _set_alua_access_type(self, access_type): self._check_self() path = "%s/alua_access_type" % self.path try: fwrite(path, str(int(access_type))) except IOError as e: raise RTSLibError("Cannot change ALUA access type: %s" % e) def _get_preferred(self): self._check_self() path = "%s/preferred" % self.path return int(fread(path)) def _set_preferred(self, pref): self._check_self() path = "%s/preferred" % self.path try: fwrite(path, str(int(pref))) except IOError as e: raise RTSLibError("Cannot set preferred: %s" % e) def _get_alua_write_metadata(self): self._check_self() path = "%s/alua_write_metadata" % self.path return int(fread(path)) def _set_alua_write_metadata(self, pref): self._check_self() path = "%s/alua_write_metadata" % self.path try: fwrite(path, str(int(pref))) except IOError as e: raise RTSLibError("Cannot set alua_write_metadata: %s" % e) def _get_alua_support_active_nonoptimized(self): self._check_self() path = "%s/alua_support_active_nonoptimized" % self.path return int(fread(path)) def _set_alua_support_active_nonoptimized(self, enabled): self._check_self() path = "%s/alua_support_active_nonoptimized" % self.path try: fwrite(path, str(int(enabled))) except IOError as e: raise RTSLibError("Cannot set alua_support_active_nonoptimized: %s" % e) def _get_alua_support_active_optimized(self): self._check_self() path = "%s/alua_support_active_optimized" % self.path return int(fread(path)) def _set_alua_support_active_optimized(self, enabled): self._check_self() path = "%s/alua_support_active_optimized" % self.path try: fwrite(path, str(int(enabled))) except IOError as e: raise RTSLibError("Cannot set alua_support_active_optimized: %s" % e) def _get_alua_support_offline(self): self._check_self() path = "%s/alua_support_offline" % self.path return int(fread(path)) def _set_alua_support_offline(self, enabled): self._check_self() path = "%s/alua_support_offline" % self.path try: fwrite(path, str(int(enabled))) except IOError as e: raise RTSLibError("Cannot set alua_support_offline: %s" % e) def _get_alua_support_unavailable(self): self._check_self() path = "%s/alua_support_unavailable" % self.path return int(fread(path)) def _set_alua_support_unavailable(self, enabled): self._check_self() path = "%s/alua_support_unavailable" % self.path try: fwrite(path, str(int(enabled))) except IOError as e: raise RTSLibError("Cannot set alua_support_unavailable: %s" % e) def _get_alua_support_standby(self): self._check_self() path = "%s/alua_support_standby" % self.path return int(fread(path)) def _set_alua_support_standby(self, enabled): self._check_self() path = "%s/alua_support_standby" % self.path try: fwrite(path, str(int(enabled))) except IOError as e: raise RTSLibError("Cannot set alua_support_standby: %s" % e) def _get_alua_support_transitioning(self): self._check_self() path = "%s/alua_support_transitioning" % self.path return int(fread(path)) def _set_alua_support_transitioning(self, enabled): self._check_self() path = "%s/alua_support_transitioning" % self.path try: fwrite(path, str(int(enabled))) except IOError as e: raise RTSLibError("Cannot set alua_support_transitioning: %s" % e) def _get_alua_support_lba_dependent(self): self._check_self() path = "%s/alua_support_lba_dependent" % self.path return int(fread(path)) def _get_members(self): self._check_self() path = "%s/members" % self.path member_list = [] for member in fread(path).splitlines(): lun_path = member.split("/") if len(lun_path) != 4: continue member_list.append({ 'driver': lun_path[0], 'target': lun_path[1], 'tpgt': int(lun_path[2].split("_", 1)[1]), 'lun': int(lun_path[3].split("_", 1)[1]) }) return member_list def _get_tg_pt_gp_id(self): self._check_self() path = "%s/tg_pt_gp_id" % self.path return int(fread(path)) def _get_trans_delay_msecs(self): self._check_self() path = "%s/trans_delay_msecs" % self.path return int(fread(path)) def _set_trans_delay_msecs(self, secs): self._check_self() path = "%s/trans_delay_msecs" % self.path try: fwrite(path, str(int(secs))) except IOError as e: raise RTSLibError("Cannot set trans_delay_msecs: %s" % e) def _get_implicit_trans_secs(self): self._check_self() path = "%s/implicit_trans_secs" % self.path return int(fread(path)) def _set_implicit_trans_secs(self, secs): self._check_self() path = "%s/implicit_trans_secs" % self.path try: fwrite(path, str(int(secs))) except IOError as e: raise RTSLibError("Cannot set implicit_trans_secs: %s" % e) def _get_nonop_delay_msecs(self): self._check_self() path = "%s/nonop_delay_msecs" % self.path return int(fread(path)) def _set_nonop_delay_msecs(self, delay): self._check_self() path = "%s/nonop_delay_msecs" % self.path try: fwrite(path, str(int(delay))) except IOError as e: raise RTSLibError("Cannot set nonop_delay_msecs: %s" % e) def dump(self): d = super(ALUATargetPortGroup, self).dump() d['name'] = self.name d['tg_pt_gp_id'] = self.tg_pt_gp_id for param in alua_rw_params: d[param] = getattr(self, param, None) return d alua_access_state = property(_get_alua_access_state, _set_alua_access_state, doc="Get or set ALUA state. " "0 = Active/optimized, " "1 = Active/non-optimized, " "2 = Standby, " "3 = Unavailable, " "4 = LBA Dependent, " "14 = Offline, " "15 = Transitioning") alua_access_type = property(_get_alua_access_type, _set_alua_access_type, doc="Get or set ALUA access type. " "1 = Implicit, 2 = Explicit, 3 = Both") alua_access_status = property(_get_alua_access_status, _set_alua_access_status, doc="Get or set ALUA access status. " "0 = None, " "1 = Altered by Explicit STPG, " "2 = Altered by Implicit ALUA") preferred = property(_get_preferred, _set_preferred, doc="Get or set preferred bit. 1 = Pref, 0 Not-Pre") alua_write_metadata = property(_get_alua_write_metadata, _set_alua_write_metadata, doc="Get or set alua_write_metadata flag. " "enable (1) or disable (0)") tg_pt_gp_id = property(_get_tg_pt_gp_id, doc="Get ALUA Target Port Group ID") members = property(_get_members, doc="Get LUNs in Target Port Group") alua_support_active_nonoptimized = property(_get_alua_support_active_nonoptimized, _set_alua_support_active_nonoptimized, doc="Enable (1) or disable (0) " "Active/non-optimized support") alua_support_active_optimized = property(_get_alua_support_active_optimized, _set_alua_support_active_optimized, doc="Enable (1) or disable (0) " "Active/optimized support") alua_support_offline = property(_get_alua_support_offline, _set_alua_support_offline, doc="Enable (1) or disable (0) " "offline support") alua_support_unavailable = property(_get_alua_support_unavailable, _set_alua_support_unavailable, doc="enable (1) or disable (0) " "unavailable support") alua_support_standby = property(_get_alua_support_standby, _set_alua_support_standby, doc="enable (1) or disable (0) " "standby support") alua_support_lba_dependent = property(_get_alua_support_lba_dependent, doc="show lba_dependent support " "enabled (1) or disabled (0)") alua_support_transitioning = property(_get_alua_support_transitioning, _set_alua_support_transitioning, doc="enable (1) or disable (0) " "transitioning support") trans_delay_msecs = property(_get_trans_delay_msecs, _set_trans_delay_msecs, doc="msecs to delay state transition") implicit_trans_secs = property(_get_implicit_trans_secs, _set_implicit_trans_secs, doc="implicit transition time limit") nonop_delay_msecs = property(_get_nonop_delay_msecs, _set_nonop_delay_msecs, doc="msecs to delay IO when non-optimized") @classmethod def setup(cls, storage_obj, alua_tpg, err_func): name = alua_tpg['name'] if name == 'default_tg_pt_gp': return alua_tpg_obj = cls(storage_obj, name, alua_tpg['tg_pt_gp_id']) for param, value in six.iteritems(alua_tpg): if param != 'name' and param != 'tg_pt_gp_id': try: setattr(alua_tpg_obj, param, value) except: raise RTSLibError("Could not set attribute '%s' for alua tpg '%s'" % (param, alua_tpg['name']))
cvubrugier/rtslib-fb
rtslib/alua.py
Python
apache-2.0
15,637
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved. # # This work is made available under the Nvidia Source Code License-NC. # To view a copy of this license, visit # https://nvlabs.github.io/stylegan2/license.html from . import submission from .submission.run_context import RunContext from .submission.submit import SubmitTarget from .submission.submit import PathType from .submission.submit import SubmitConfig from .submission.submit import submit_run from .submission.submit import get_path_from_template from .submission.submit import convert_path from .submission.submit import make_run_dir_path from .util import EasyDict submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
google/lecam-gan
third_party/dnnlib/__init__.py
Python
apache-2.0
774
# Copyright 2019 The dm_control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A soccer ball that keeps track of ball-player contacts.""" import os from dm_control import mjcf from dm_control.entities import props import numpy as np from dm_control.utils import io as resources _ASSETS_PATH = os.path.join(os.path.dirname(__file__), 'assets', 'soccer_ball') # FIFA regulation parameters for a size 5 ball. _REGULATION_RADIUS = 0.117 # Meters. _REGULATION_MASS = 0.45 # Kilograms. _DEFAULT_FRICTION = (0.7, 0.05, 0.04) # (slide, spin, roll). _DEFAULT_DAMP_RATIO = 0.4 def _get_texture(name): contents = resources.GetResource( os.path.join(_ASSETS_PATH, '{}.png'.format(name))) return mjcf.Asset(contents, '.png') def regulation_soccer_ball(): return SoccerBall( radius=_REGULATION_RADIUS, mass=_REGULATION_MASS, friction=_DEFAULT_FRICTION, damp_ratio=_DEFAULT_DAMP_RATIO) class SoccerBall(props.Primitive): """A soccer ball that keeps track of entities that come into contact.""" def _build(self, radius=0.35, mass=0.045, friction=(0.7, 0.075, 0.075), damp_ratio=1.0, name='soccer_ball'): """Builds this soccer ball. Args: radius: The radius (in meters) of this target sphere. mass: Mass (in kilograms) of the ball. friction: Friction parameters of the ball geom with the three dimensions corresponding to (slide, spin, roll) frictions. damp_ratio: A real positive number. Lower implies less dampening upon contacts. name: The name of this entity. """ super()._build(geom_type='sphere', size=(radius,), name=name) texture = self._mjcf_root.asset.add( 'texture', name='soccer_ball', type='cube', fileup=_get_texture('up'), filedown=_get_texture('down'), filefront=_get_texture('front'), fileback=_get_texture('back'), fileleft=_get_texture('left'), fileright=_get_texture('right')) material = self._mjcf_root.asset.add( 'material', name='soccer_ball', texture=texture) if damp_ratio < 0.0: raise ValueError( f'Invalid `damp_ratio` parameter ({damp_ratio} is not positive).') self._geom.set_attributes( pos=[0, 0, radius], size=[radius], condim=6, priority=1, mass=mass, friction=friction, solref=[0.02, damp_ratio], material=material) # Add some tracking cameras for visualization and logging. self._mjcf_root.worldbody.add( 'camera', name='ball_cam_near', pos=[0, -2, 2], zaxis=[0, -1, 1], fovy=70, mode='trackcom') self._mjcf_root.worldbody.add( 'camera', name='ball_cam', pos=[0, -7, 7], zaxis=[0, -1, 1], fovy=70, mode='trackcom') self._mjcf_root.worldbody.add( 'camera', name='ball_cam_far', pos=[0, -10, 10], zaxis=[0, -1, 1], fovy=70, mode='trackcom') # Keep track of entities to team mapping. self._players = [] # Initialize tracker attributes. self.initialize_entity_trackers() def register_player(self, player): self._players.append(player) def initialize_entity_trackers(self): self._last_hit = None self._hit = False self._repossessed = False self._intercepted = False # Tracks distance traveled by the ball in between consecutive hits. self._pos_at_last_step = None self._dist_since_last_hit = None self._dist_between_last_hits = None def initialize_episode(self, physics, unused_random_state): self._geom_id = physics.model.name2id(self._geom.full_identifier, 'geom') self._geom_id_to_player = {} for player in self._players: geoms = player.walker.mjcf_model.find_all('geom') for geom in geoms: geom_id = physics.model.name2id(geom.full_identifier, 'geom') self._geom_id_to_player[geom_id] = player self.initialize_entity_trackers() def after_substep(self, physics, unused_random_state): """Resolve contacts and update ball-player contact trackers.""" if self._hit: # Ball has already registered a valid contact within step (during one of # previous after_substep calls). return # Iterate through all contacts to find the first contact between the ball # and one of the registered entities. for contact in physics.data.contact: # Keep contacts that involve the ball and one of the registered entities. has_self = False for geom_id in (contact.geom1, contact.geom2): if geom_id == self._geom_id: has_self = True else: player = self._geom_id_to_player.get(geom_id) if has_self and player: # Detected a contact between the ball and an registered player. if self._last_hit is not None: self._intercepted = player.team != self._last_hit.team else: self._intercepted = True # Register repossessed before updating last_hit player. self._repossessed = player is not self._last_hit self._last_hit = player # Register hit event. self._hit = True break def before_step(self, physics, random_state): super().before_step(physics, random_state) # Reset per simulation step indicator. self._hit = False self._repossessed = False self._intercepted = False def after_step(self, physics, random_state): super().after_step(physics, random_state) pos = physics.bind(self._geom).xpos if self._hit: # SoccerBall is hit on this step. Update dist_between_last_hits # to dist_since_last_hit before resetting dist_since_last_hit. self._dist_between_last_hits = self._dist_since_last_hit self._dist_since_last_hit = 0. self._pos_at_last_step = pos.copy() if self._dist_since_last_hit is not None: # Accumulate distance traveled since last hit event. self._dist_since_last_hit += np.linalg.norm(pos - self._pos_at_last_step) self._pos_at_last_step = pos.copy() @property def last_hit(self): """The player that last came in contact with the ball or `None`.""" return self._last_hit @property def hit(self): """Indicates if the ball is hit during the last simulation step. For a timeline shown below: ..., agent.step, simulation, agent.step, ... Returns: True: if the ball is hit by a registered player during simulation step. False: if not. """ return self._hit @property def repossessed(self): """Indicates if the ball has been repossessed by a different player. For a timeline shown below: ..., agent.step, simulation, agent.step, ... Returns: True if the ball is hit by a registered player during simulation step and that player is different from `last_hit`. False: if the ball is not hit, or the ball is hit by `last_hit` player. """ return self._repossessed @property def intercepted(self): """Indicates if the ball has been intercepted by a different team. For a timeline shown below: ..., agent.step, simulation, agent.step, ... Returns: True: if the ball is hit for the first time, or repossessed by an player from a different team. False: if the ball is not hit, not repossessed, or repossessed by a teammate to `last_hit`. """ return self._intercepted @property def dist_between_last_hits(self): """Distance between last consecutive hits. Returns: Distance between last two consecutive hit events or `None` if there has not been two consecutive hits on the ball. """ return self._dist_between_last_hits
deepmind/dm_control
dm_control/locomotion/soccer/soccer_ball.py
Python
apache-2.0
8,413
# http://remotescripts.blogspot.com """ Track Control User Modes component originally designed for use with the APC40. Copyright (C) 2010 Hanz Petrov <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import Live from _Framework.ModeSelectorComponent import ModeSelectorComponent from _Framework.ButtonElement import ButtonElement from _Framework.DeviceComponent import DeviceComponent class EncoderUserModesComponent(ModeSelectorComponent): ' SelectorComponent that assigns encoders to different user functions ' __module__ = __name__ def __init__(self, parent, encoder_modes, param_controls, bank_buttons, mixer, device, encoder_device_modes, encoder_eq_modes): #, mixer, sliders): assert (len(bank_buttons) == 4) ModeSelectorComponent.__init__(self) self._parent = parent self._encoder_modes = encoder_modes self._param_controls = param_controls self._bank_buttons = bank_buttons self._mixer = mixer self._device = device self._encoder_device_modes = encoder_device_modes self._encoder_eq_modes = encoder_eq_modes self._mode_index = 0 self._modes_buttons = [] self._user_buttons = [] self._last_mode = 0 def disconnect(self): ModeSelectorComponent.disconnect(self) self._parent = None self._encoder_modes = None self._param_controls = None self._bank_buttons = None self._mixer = None self._device = None self._encoder_device_modes = None self._encoder_eq_modes = None self._modes_buttons = None self._user_buttons = None def on_enabled_changed(self): pass def set_mode(self, mode): assert isinstance(mode, int) assert (mode in range(self.number_of_modes())) if (self._mode_index != mode): self._last_mode = self._mode_index # keep track of previous mode, to allow conditional actions self._mode_index = mode self._set_modes() def set_mode_buttons(self, buttons): assert isinstance(buttons, (tuple, type(None))) for button in self._modes_buttons: button.remove_value_listener(self._mode_value) self._modes_buttons = [] if (buttons != None): for button in buttons: assert isinstance(button, ButtonElement) identify_sender = True button.add_value_listener(self._mode_value, identify_sender) self._modes_buttons.append(button) assert (self._mode_index in range(self.number_of_modes())) def number_of_modes(self): return 4 def update(self): pass def _mode_value(self, value, sender): assert (len(self._modes_buttons) > 0) assert isinstance(value, int) assert isinstance(sender, ButtonElement) assert (self._modes_buttons.count(sender) == 1) if ((value is not 0) or (not sender.is_momentary())): self.set_mode(self._modes_buttons.index(sender)) def _set_modes(self): if self.is_enabled(): assert (self._mode_index in range(self.number_of_modes())) for index in range(len(self._modes_buttons)): if (index <= self._mode_index): self._modes_buttons[index].turn_on() else: self._modes_buttons[index].turn_off() for button in self._modes_buttons: button.release_parameter() button.use_default_message() for control in self._param_controls: control.release_parameter() control.use_default_message() #control.set_needs_takeover(False) self._encoder_modes.set_enabled(False) self._encoder_device_modes.set_lock_button(None) self._encoder_device_modes._alt_device.set_bank_nav_buttons(None, None) self._encoder_device_modes._alt_device.set_on_off_button(None) if self._encoder_device_modes._alt_device._parameter_controls != None: for control in self._encoder_device_modes._alt_device._parameter_controls: control.release_parameter() self._encoder_device_modes.set_enabled(False) self._encoder_eq_modes.set_enabled(False) self._encoder_eq_modes.set_lock_button(None) if self._encoder_eq_modes._track_eq != None: self._encoder_eq_modes._track_eq.set_cut_buttons(None) if self._encoder_eq_modes._track_eq._gain_controls != None: for control in self._encoder_eq_modes._track_eq._gain_controls: control.release_parameter() if self._encoder_eq_modes._strip != None: self._encoder_eq_modes._strip.set_send_controls(None) self._user_buttons = [] if (self._mode_index == 0): self._encoder_modes.set_enabled(True) elif (self._mode_index == 1): self._encoder_device_modes.set_enabled(True) self._encoder_device_modes.set_controls_and_buttons(self._param_controls, self._modes_buttons) elif (self._mode_index == 2): self._encoder_eq_modes.set_enabled(True) self._encoder_eq_modes.set_controls_and_buttons(self._param_controls, self._modes_buttons) elif (self._mode_index == 3): self._encoder_eq_modes._ignore_buttons = True if self._encoder_eq_modes._track_eq != None: self._encoder_eq_modes._track_eq._ignore_cut_buttons = True self._encoder_device_modes._ignore_buttons = True for button in self._modes_buttons: self._user_buttons.append(button) for control in self._param_controls: control.set_identifier((control.message_identifier() - 9)) control._ring_mode_button.send_value(0) else: pass #self._rebuild_callback() # local variables: # tab-width: 4
jim-cooley/abletonremotescripts
remote-scripts/samples/APC_64_40_r1b/APC_64_40/EncoderUserModesComponent.py
Python
apache-2.0
6,872
# Copyright 2019 The dm_control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the Jaco arm class.""" import itertools import unittest from absl.testing import absltest from absl.testing import parameterized from dm_control import composer from dm_control import mjcf from dm_control.entities.manipulators import kinova from dm_control.entities.manipulators.kinova import jaco_arm from dm_control.mujoco.wrapper import mjbindings import numpy as np mjlib = mjbindings.mjlib class JacoArmTest(parameterized.TestCase): def test_can_compile_and_step_model(self): arm = kinova.JacoArm() physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model) physics.step() def test_can_attach_hand(self): arm = kinova.JacoArm() hand = kinova.JacoHand() arm.attach(hand) physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model) physics.step() # TODO(b/159974149): Investigate why the mass does not match the datasheet. @unittest.expectedFailure def test_mass(self): arm = kinova.JacoArm() physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model) mass = physics.bind(arm.mjcf_model.worldbody).subtreemass expected_mass = 4.4 self.assertAlmostEqual(mass, expected_mass) @parameterized.parameters([ dict(actuator_index=0, control_input=0, expected_velocity=0.), dict(actuator_index=0, control_input=jaco_arm._LARGE_JOINT_MAX_VELOCITY, expected_velocity=jaco_arm._LARGE_JOINT_MAX_VELOCITY), dict(actuator_index=4, control_input=jaco_arm._SMALL_JOINT_MAX_VELOCITY, expected_velocity=jaco_arm._SMALL_JOINT_MAX_VELOCITY), dict(actuator_index=0, control_input=-jaco_arm._LARGE_JOINT_MAX_VELOCITY, expected_velocity=-jaco_arm._LARGE_JOINT_MAX_VELOCITY), dict(actuator_index=0, control_input=2*jaco_arm._LARGE_JOINT_MAX_VELOCITY, # Test clipping expected_velocity=jaco_arm._LARGE_JOINT_MAX_VELOCITY), ]) def test_velocity_actuation( self, actuator_index, control_input, expected_velocity): arm = kinova.JacoArm() physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model) actuator = arm.actuators[actuator_index] bound_actuator = physics.bind(actuator) bound_joint = physics.bind(actuator.joint) acceleration_threshold = 1e-6 with physics.model.disable('contact', 'gravity'): bound_actuator.ctrl = control_input # Step until the joint has stopped accelerating. while abs(bound_joint.qacc) > acceleration_threshold: physics.step() self.assertAlmostEqual(bound_joint.qvel[0], expected_velocity, delta=0.01) @parameterized.parameters([ dict(joint_index=0, min_expected_torque=1.7, max_expected_torque=5.2), dict(joint_index=5, min_expected_torque=0.8, max_expected_torque=7.0)]) def test_backdriving_torque( self, joint_index, min_expected_torque, max_expected_torque): arm = kinova.JacoArm() physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model) bound_joint = physics.bind(arm.joints[joint_index]) torque = min_expected_torque * 0.8 velocity_threshold = 0.1*2*np.pi/60. # 0.1 RPM torque_increment = 0.01 seconds_per_torque_increment = 1. max_torque = max_expected_torque * 1.1 while torque < max_torque: # Ensure that no other forces are acting on the arm. with physics.model.disable('gravity', 'contact', 'actuation'): # Reset the simulation so that the initial velocity is zero. physics.reset() bound_joint.qfrc_applied = torque while physics.time() < seconds_per_torque_increment: physics.step() if bound_joint.qvel[0] >= velocity_threshold: self.assertBetween(torque, min_expected_torque, max_expected_torque) return # If we failed to accelerate the joint to the target velocity within the # time limit we'll reset the simulation and increase the torque. torque += torque_increment self.fail('Torque of {} Nm insufficient to backdrive joint.'.format(torque)) @parameterized.parameters([ dict(joint_pos=0., expected_obs=[0., 1.]), dict(joint_pos=-0.5*np.pi, expected_obs=[-1., 0.]), dict(joint_pos=np.pi, expected_obs=[0., -1.]), dict(joint_pos=10*np.pi, expected_obs=[0., 1.])]) def test_joints_pos_observables(self, joint_pos, expected_obs): joint_index = 0 arm = kinova.JacoArm() physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model) physics.bind(arm.joints).qpos[joint_index] = joint_pos actual_obs = arm.observables.joints_pos(physics)[joint_index] np.testing.assert_array_almost_equal(expected_obs, actual_obs) @parameterized.parameters( dict(joint_index=idx, applied_torque=t) for idx, t in itertools.product([0, 2, 4], [0., -6.8, 30.5])) def test_joints_torque_observables(self, joint_index, applied_torque): arm = kinova.JacoArm() joint = arm.joints[joint_index] physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model) with physics.model.disable('gravity', 'limit', 'contact', 'actuation'): # Apply a cartesian torque to the body containing the joint. We use # `xfrc_applied` rather than `qfrc_applied` because forces in # `qfrc_applied` are not measured by the torque sensor). physics.bind(joint.parent).xfrc_applied[3:] = ( applied_torque * physics.bind(joint).xaxis) observed_torque = arm.observables.joints_torque(physics)[joint_index] # Note the change in sign, since the sensor measures torques in the # child->parent direction. self.assertAlmostEqual(observed_torque, -applied_torque, delta=0.1) class JacoHandTest(parameterized.TestCase): def test_can_compile_and_step_model(self): hand = kinova.JacoHand() physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model) physics.step() # TODO(b/159974149): Investigate why the mass does not match the datasheet. @unittest.expectedFailure def test_hand_mass(self): hand = kinova.JacoHand() physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model) mass = physics.bind(hand.mjcf_model.worldbody).subtreemass expected_mass = 0.727 self.assertAlmostEqual(mass, expected_mass) def test_grip_force(self): arena = composer.Arena() hand = kinova.JacoHand() arena.attach(hand) # A sphere with a touch sensor for measuring grip force. prop_model = mjcf.RootElement(model='grip_target') prop_model.worldbody.add('geom', type='sphere', size=[0.02]) touch_site = prop_model.worldbody.add('site', type='sphere', size=[0.025]) touch_sensor = prop_model.sensor.add('touch', site=touch_site) prop = composer.ModelWrapperEntity(prop_model) # Add some slide joints to allow movement of the target in the XY plane. # This helps the contact solver to converge more reliably. prop_frame = arena.attach(prop) prop_frame.add('joint', name='slide_x', type='slide', axis=(1, 0, 0)) prop_frame.add('joint', name='slide_y', type='slide', axis=(0, 1, 0)) physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model) bound_pinch_site = physics.bind(hand.pinch_site) bound_actuators = physics.bind(hand.actuators) bound_joints = physics.bind(hand.joints) bound_touch = physics.bind(touch_sensor) # Position the grip target at the pinch site. prop.set_pose(physics, position=bound_pinch_site.xpos) # Close the fingers with as much force as the actuators will allow. bound_actuators.ctrl = bound_actuators.ctrlrange[:, 1] # Run the simulation forward until the joints stop moving. physics.step() qvel_thresh = 1e-3 # radians / s while max(abs(bound_joints.qvel)) > qvel_thresh: physics.step() expected_min_grip_force = 20. expected_max_grip_force = 30. grip_force = bound_touch.sensordata self.assertBetween( grip_force, expected_min_grip_force, expected_max_grip_force, msg='Expected grip force to be between {} and {} N, got {} N.'.format( expected_min_grip_force, expected_max_grip_force, grip_force)) @parameterized.parameters([dict(opening=True), dict(opening=False)]) def test_finger_travel_time(self, opening): hand = kinova.JacoHand() physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model) bound_actuators = physics.bind(hand.actuators) bound_joints = physics.bind(hand.joints) min_ctrl, max_ctrl = bound_actuators.ctrlrange.T min_qpos, max_qpos = bound_joints.range.T # Measure the time taken for the finger joints to traverse 99.9% of their # total range. qpos_tol = 1e-3 * (max_qpos - min_qpos) if opening: hand.set_grasp(physics=physics, close_factors=1.) # Fully closed. np.testing.assert_array_almost_equal(bound_joints.qpos, max_qpos) target_pos = min_qpos # Fully open. ctrl = min_ctrl # Open the fingers as fast as the actuators will allow. else: hand.set_grasp(physics=physics, close_factors=0.) # Fully open. np.testing.assert_array_almost_equal(bound_joints.qpos, min_qpos) target_pos = max_qpos # Fully closed. ctrl = max_ctrl # Close the fingers as fast as the actuators will allow. # Run the simulation until all joints have reached their target positions. bound_actuators.ctrl = ctrl while np.any(abs(bound_joints.qpos - target_pos) > qpos_tol): with physics.model.disable('gravity'): physics.step() expected_travel_time = 1.2 # Seconds. self.assertAlmostEqual(physics.time(), expected_travel_time, delta=0.1) @parameterized.parameters([ dict(pos=np.r_[0., 0., 0.3], quat=np.r_[0., 1., 0., 1.]), dict(pos=np.r_[0., -0.1, 0.5], quat=np.r_[1., 1., 0., 0.]), ]) def test_pinch_site_observables(self, pos, quat): arm = kinova.JacoArm() hand = kinova.JacoHand() arena = composer.Arena() arm.attach(hand) arena.attach(arm) physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model) # Normalize the quaternion. quat /= np.linalg.norm(quat) # Drive the arm so that the pinch site is at the desired position and # orientation. success = arm.set_site_to_xpos( physics=physics, random_state=np.random.RandomState(0), site=hand.pinch_site, target_pos=pos, target_quat=quat) self.assertTrue(success) # Check that the observations are as expected. observed_pos = hand.observables.pinch_site_pos(physics) np.testing.assert_allclose(observed_pos, pos, atol=1e-3) observed_rmat = hand.observables.pinch_site_rmat(physics).reshape(3, 3) expected_rmat = np.empty((3, 3), np.double) mjlib.mju_quat2Mat(expected_rmat.ravel(), quat) difference_rmat = observed_rmat.dot(expected_rmat.T) # `difference_rmat` might not be perfectly orthonormal, which could lead to # an invalid value being passed to arccos. u, _, vt = np.linalg.svd(difference_rmat, full_matrices=False) ortho_difference_rmat = u.dot(vt) angular_difference = np.arccos((np.trace(ortho_difference_rmat) - 1) / 2) self.assertLess(angular_difference, 1e-3) if __name__ == '__main__': absltest.main()
deepmind/dm_control
dm_control/entities/manipulators/kinova/kinova_test.py
Python
apache-2.0
11,745
# -*- coding: utf-8 -*- # Copyright 2014 Foxdog Studios # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from ddp.messages.client import MethodMessage from ddp.messages.client import MethodMessageParser class MethodMessageParserTestCase(unittest.TestCase): def setUp(self): self.parser = MethodMessageParser() def test_parse(self): id = 'id' method = 'method' params = [True, 1.0] message = self.parser.parse({'msg': 'method', 'id': id, 'method': method, 'params': params}) self.assertEqual(message, MethodMessage(id, method, params))
foxdog-studios/pyddp
tests/messages/client/test_method_message_parser.py
Python
apache-2.0
1,258