code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
import subprocess from constants import get_migration_directory, JobStatus from models import ConvertConfigJob from multi_process import WorkUnit import os import re NOX_64_BINARY = "nox-linux-64.bin" NOX_64_MAC = "nox-mac64.bin" class ConvertConfigWorkUnit(WorkUnit): def __init__(self, job_id): WorkUnit.__init__(self) self.job_id = job_id def start(self, db_session, logger, process_name): self.db_session = db_session try: self.convert_config_job = self.db_session.query(ConvertConfigJob).filter(ConvertConfigJob.id == self.job_id).first() if self.convert_config_job is None: logger.error('Unable to retrieve convert config job: %s' % self.job_id) return self.convert_config_job.set_status("Converting the configurations") self.db_session.commit() file_path = self.convert_config_job.file_path nox_to_use = get_migration_directory() + NOX_64_BINARY # nox_to_use = get_migration_directory() + NOX_64_MAC print "start executing nox conversion..." try: commands = [subprocess.Popen(["chmod", "+x", nox_to_use]), subprocess.Popen([nox_to_use, "-f", file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) ] nox_output, nox_error = commands[1].communicate() print "the nox finished its job." except OSError: self.convert_config_job.set_status(JobStatus.FAILED) self.db_session.commit() logger.exception("Running the configuration migration tool " + "{} on config file {} hit OSError.".format(nox_to_use, file_path)) conversion_successful = False if nox_error: self.convert_config_job.set_status(JobStatus.FAILED) self.db_session.commit() logger.exception("Running the configuration migration tool {} ".format(nox_to_use) + "on config file {} hit error:\n {}".format(file_path, nox_error)) if re.search("Done \[.*\]", nox_output): path = "" filename = file_path if file_path.count("/") > 0: path_filename = file_path.rsplit("/", 1) path = path_filename[0] filename = path_filename[1] converted_filename = filename.rsplit('.', 1)[0] + ".csv" if os.path.isfile(os.path.join(path, converted_filename)): self.convert_config_job.set_status(JobStatus.COMPLETED) self.db_session.commit() conversion_successful = True if not conversion_successful: self.convert_config_job.set_status(JobStatus.FAILED) self.db_session.commit() logger.exception("Configuration migration tool failed to convert {}".format(file_path) + ": {}".format(nox_output)) finally: self.db_session.close() def get_unique_key(self): return 'convert_config_job_{}'.format(self.job_id)
csm-aut/csm
csmserver/work_units/convert_config_work_unit.py
Python
apache-2.0
3,413
from mod_base import * class CmdPrefix(Command): """Check or set the command prefix that the bot will respond to.""" def run(self, win, user, data, caller=None): args = Args(data) if args.Empty(): cp = self.bot.config["cmd_prefix"] win.Send("current command prefix is: " + cp) return False self.bot.config["cmd_prefix"] = args[0] win.Send("done") module = { "class": CmdPrefix, "type": MOD_COMMAND, "level": 5, "zone": IRC_ZONE_BOTH }
richrd/bx
modules/cmdprefix.py
Python
apache-2.0
528
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2012-2017 Snowflake Computing Inc. All right reserved. # """ Concurrent test module """ from logging import getLogger from multiprocessing.pool import ThreadPool import pytest from parameters import (CONNECTION_PARAMETERS_ADMIN) logger = getLogger(__name__) import snowflake.connector from snowflake.connector.compat import TO_UNICODE def _run_more_query(meta): logger.debug("running queries in %s%s", meta['user'], meta['idx']) cnx = meta['cnx'] try: cnx.cursor().execute(""" select count(*) from (select seq8() seq from table(generator(timelimit => 4))) """) logger.debug("completed queries in %s%s", meta['user'], meta['idx']) return {'user': meta['user'], 'result': 1} except snowflake.connector.errors.ProgrammingError: logger.exception('failed to select') return {'user': meta['user'], 'result': 0} @pytest.mark.skipif(True or not CONNECTION_PARAMETERS_ADMIN, reason=""" Flaky tests. To be fixed """) def test_concurrent_multiple_user_queries(conn_cnx, db_parameters): """ Multithreaded multiple users tests """ max_per_user = 10 max_per_account = 20 max_per_instance = 10 with conn_cnx(user=db_parameters['sf_user'], password=db_parameters['sf_password'], account=db_parameters['sf_account']) as cnx: cnx.cursor().execute( "alter system set QUERY_GATEWAY_ENABLED=true") cnx.cursor().execute( "alter system set QUERY_GATEWAY_MAX_PER_USER={0}".format( max_per_user)) cnx.cursor().execute( "alter system set QUERY_GATEWAY_MAX_PER_ACCOUNT={0}".format( max_per_account)) cnx.cursor().execute( "alter system set QUERY_GATEWAY_MAX_PER_INSTANCE={0}".format( max_per_instance)) try: with conn_cnx() as cnx: cnx.cursor().execute( "create or replace warehouse regress1 " "warehouse_type='medium' warehouse_size=small") cnx.cursor().execute( "create or replace warehouse regress2 " "warehouse_type='medium' warehouse_size=small") cnx.cursor().execute("use role securityadmin") cnx.cursor().execute("create or replace user snowwoman " "password='test'") cnx.cursor().execute("use role accountadmin") cnx.cursor().execute("grant role sysadmin to user snowwoman") cnx.cursor().execute("grant all on warehouse regress2 to sysadmin") cnx.cursor().execute( "alter user snowwoman set default_role=sysadmin") suc_cnt1 = 0 suc_cnt2 = 0 with conn_cnx() as cnx1: with conn_cnx(user='snowwoman', password='test') as cnx2: cnx1.cursor().execute('use warehouse regress1') cnx2.cursor().execute('use warehouse regress2') number_of_threads = 50 meta = [] for i in range(number_of_threads): cnx = cnx1 if i < number_of_threads / 2 else cnx2 user = 'A' if i < number_of_threads / 2 else 'B' idx = TO_UNICODE(i + 1) \ if i < number_of_threads / 2 \ else TO_UNICODE(i + 1) meta.append({'user': user, 'idx': idx, 'cnx': cnx}) pool = ThreadPool(processes=number_of_threads) all_results = pool.map(_run_more_query, meta) assert len(all_results) == number_of_threads, \ 'total number of jobs' for r in all_results: if r['user'] == 'A' and r['result'] > 0: suc_cnt1 += 1 elif r['user'] == 'B' and r['result'] > 0: suc_cnt2 += 1 logger.debug("A success: %s", suc_cnt1) logger.debug("B success: %s", suc_cnt2) # NOTE: if the previous test cancels a query, the incoming # query counter may not be reduced asynchrously, so # the maximum number of runnable queries can be one less assert suc_cnt1 + suc_cnt2 in (max_per_instance * 2, max_per_instance * 2 - 1), \ 'success queries for user A and B' finally: with conn_cnx() as cnx: cnx.cursor().execute("use role accountadmin") cnx.cursor().execute("drop warehouse if exists regress2") cnx.cursor().execute("drop warehouse if exists regress1") cnx.cursor().execute("use role securityadmin") cnx.cursor().execute("drop user if exists snowwoman") with conn_cnx(user=db_parameters['sf_user'], password=db_parameters['sf_password'], account=db_parameters['sf_account']) as cnx: cnx.cursor().execute( "alter system set QUERY_GATEWAY_MAX_PER_USER=default") cnx.cursor().execute( "alter system set QUERY_GATEWAY_MAX_PER_INSTANCE=default") cnx.cursor().execute( "alter system set QUERY_GATEWAY_MAX_PER_ACCOUNT=default")
mayfield/snowflake-connector-python
test/test_concurrent_multi_users.py
Python
apache-2.0
5,324
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v9.resources.types import bidding_strategy from google.ads.googleads.v9.services.types import bidding_strategy_service from .base import BiddingStrategyServiceTransport, DEFAULT_CLIENT_INFO class BiddingStrategyServiceGrpcTransport(BiddingStrategyServiceTransport): """gRPC backend transport for BiddingStrategyService. Service to manage bidding strategies. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__( self, *, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning, ) host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) if credentials is None: credentials, _ = google.auth.default( scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id ) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, ) def close(self): self.grpc_channel.close() @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def get_bidding_strategy( self, ) -> Callable[ [bidding_strategy_service.GetBiddingStrategyRequest], bidding_strategy.BiddingStrategy, ]: r"""Return a callable for the get bidding strategy method over gRPC. Returns the requested bidding strategy in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetBiddingStrategyRequest], ~.BiddingStrategy]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_bidding_strategy" not in self._stubs: self._stubs["get_bidding_strategy"] = self.grpc_channel.unary_unary( "/google.ads.googleads.v9.services.BiddingStrategyService/GetBiddingStrategy", request_serializer=bidding_strategy_service.GetBiddingStrategyRequest.serialize, response_deserializer=bidding_strategy.BiddingStrategy.deserialize, ) return self._stubs["get_bidding_strategy"] @property def mutate_bidding_strategies( self, ) -> Callable[ [bidding_strategy_service.MutateBiddingStrategiesRequest], bidding_strategy_service.MutateBiddingStrategiesResponse, ]: r"""Return a callable for the mutate bidding strategies method over gRPC. Creates, updates, or removes bidding strategies. Operation statuses are returned. List of thrown errors: `AdxError <>`__ `AuthenticationError <>`__ `AuthorizationError <>`__ `BiddingError <>`__ `BiddingStrategyError <>`__ `ContextError <>`__ `DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__ `FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__ `IdError <>`__ `InternalError <>`__ `MutateError <>`__ `NewResourceCreationError <>`__ `NotEmptyError <>`__ `NullError <>`__ `OperationAccessDeniedError <>`__ `OperatorError <>`__ `QuotaError <>`__ `RangeError <>`__ `RequestError <>`__ `SizeLimitError <>`__ `StringFormatError <>`__ `StringLengthError <>`__ Returns: Callable[[~.MutateBiddingStrategiesRequest], ~.MutateBiddingStrategiesResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_bidding_strategies" not in self._stubs: self._stubs[ "mutate_bidding_strategies" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v9.services.BiddingStrategyService/MutateBiddingStrategies", request_serializer=bidding_strategy_service.MutateBiddingStrategiesRequest.serialize, response_deserializer=bidding_strategy_service.MutateBiddingStrategiesResponse.deserialize, ) return self._stubs["mutate_bidding_strategies"] __all__ = ("BiddingStrategyServiceGrpcTransport",)
googleads/google-ads-python
google/ads/googleads/v9/services/services/bidding_strategy_service/transports/grpc.py
Python
apache-2.0
12,551
__author__ = 'wangxun' """def double(list): i=0 while i <len(list): print i list[i]=list[i]*2 i+=1 return(list) """ def doulbe(list): i=0 [list[i]=list[i]*2 while i<len(list)] return(list) print double([1,2,3,4])
wonstonx/wxgittest
day2 test1.py
Python
apache-2.0
266
''' Created on 04/11/2015 @author: S41nz ''' class TBTAFMetadataType(object): ''' Simple enumeration class that describes the types of metadata that can be discovered within a source code asset ''' #Verdict enumeration types #The verdict for a passing test TEST_CODE="Test Code" #The verdict for a failed test PRODUCT_CODE="Product Code"
S41nz/TBTAF
tbtaf/common/enums/metadata_type.py
Python
apache-2.0
379
input = """ f(1). :- f(X). """ output = """ f(1). :- f(X). """
veltri/DLV2
tests/parser/grounding_only.2.test.py
Python
apache-2.0
71
# Copyright 2015-2018 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals from azure_common import BaseTest, arm_template class NetworkSecurityGroupTest(BaseTest): def setUp(self): super(NetworkSecurityGroupTest, self).setUp() @arm_template('networksecuritygroup.json') def test_find_by_name(self): p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}], }) resources = p.run() self.assertEqual(len(resources), 1) @arm_template('networksecuritygroup.json') def test_allow_single_port(self): p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'ingress', 'ports': '80', 'access': 'Allow'}], }) resources = p.run() self.assertEqual(len(resources), 1) @arm_template('networksecuritygroup.json') def test_allow_multiple_ports(self): p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'ingress', 'ports': '80,8080-8084,88-90', 'match': 'all', 'access': 'Allow'}], }) resources = p.run() self.assertEqual(len(resources), 1) @arm_template('networksecuritygroup.json') def test_allow_ports_range_any(self): p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'ingress', 'ports': '40-100', 'match': 'any', 'access': 'Allow'}] }) resources = p.run() self.assertEqual(len(resources), 1) @arm_template('networksecuritygroup.json') def test_deny_port(self): p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'ingress', 'ports': '8086', 'access': 'Deny'}], }) resources = p.run() self.assertEqual(len(resources), 1) @arm_template('networksecuritygroup.json') def test_egress_policy_protocols(self): p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'egress', 'ports': '22', 'ipProtocol': 'TCP', 'access': 'Allow'}], }) resources = p.run() self.assertEqual(len(resources), 1) p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'egress', 'ports': '22', 'ipProtocol': 'UDP', 'access': 'Allow'}], }) resources = p.run() self.assertEqual(len(resources), 0) @arm_template('networksecuritygroup.json') def test_open_ports(self): p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'ingress', 'ports': '1000-1100', 'match': 'any', 'access': 'Deny'}], 'actions': [ { 'type': 'open', 'ports': '1000-1100', 'direction': 'Inbound'} ] }) resources = p.run() self.assertEqual(len(resources), 1) p = self.load_policy({ 'name': 'test-azure-nsg', 'resource': 'azure.networksecuritygroup', 'filters': [ {'type': 'value', 'key': 'name', 'op': 'eq', 'value_type': 'normalize', 'value': 'c7n-nsg'}, {'type': 'ingress', 'ports': '1000-1100', 'match': 'any', 'access': 'Deny'}], 'actions': [ {'type': 'open', 'ports': '1000-1100', 'direction': 'Inbound'}] }) resources = p.run() self.assertEqual(len(resources), 0)
taohungyang/cloud-custodian
tools/c7n_azure/tests/test_networksecuritygroup.py
Python
apache-2.0
6,524
# Copyright 2021 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Sequence from pyquil import get_qc from pyquil.api import QuantumComputer import cirq from cirq_rigetti import circuit_transformers as transformers from cirq_rigetti import circuit_sweep_executors as executors _default_executor = executors.with_quilc_compilation_and_cirq_parameter_resolution class RigettiQCSSampler(cirq.Sampler): """This class supports running circuits on QCS quantum hardware as well as pyQuil's quantum virtual machine (QVM). It implements the `cirq.Sampler` interface and thereby supports sampling parameterized circuits across parameter sweeps. """ def __init__( self, quantum_computer: QuantumComputer, executor: executors.CircuitSweepExecutor = _default_executor, transformer: transformers.CircuitTransformer = transformers.default, ): """Initializes a `RigettiQCSSampler`. Args: quantum_computer: A `pyquil.api.QuantumComputer` against which to run the `cirq.Circuit`s. executor: A callable that first uses the below `transformer` on `cirq.Circuit` s and then executes the transformed circuit on the `quantum_computer`. You may pass your own callable or any static method on `CircuitSweepExecutors`. transformer: A callable that transforms the `cirq.Circuit` into a `pyquil.Program`. You may pass your own callable or any static method on `CircuitTransformers`. """ self._quantum_computer = quantum_computer self.executor = executor self.transformer = transformer def run_sweep( self, program: cirq.AbstractCircuit, params: cirq.Sweepable, repetitions: int = 1, ) -> Sequence[cirq.Result]: """This will evaluate results on the circuit for every set of parameters in `params`. Args: program: Circuit to evaluate for each set of parameters in `params`. params: `cirq.Sweepable` of parameters which this function passes to `cirq.protocols.resolve_parameters` for evaluating the circuit. repetitions: Number of times to run each iteration through the `params`. For a given set of parameters, the `cirq.Result` will include a measurement for each repetition. Returns: A list of `cirq.Result` s. """ resolvers = [r for r in cirq.to_resolvers(params)] return self.executor( quantum_computer=self._quantum_computer, circuit=program.unfreeze(copy=False), resolvers=resolvers, repetitions=repetitions, transformer=self.transformer, ) def get_rigetti_qcs_sampler( quantum_processor_id: str, *, as_qvm: Optional[bool] = None, noisy: Optional[bool] = None, executor: executors.CircuitSweepExecutor = _default_executor, transformer: transformers.CircuitTransformer = transformers.default, ) -> RigettiQCSSampler: """Calls `pyquil.get_qc` to initialize a `pyquil.api.QuantumComputer` and uses this to initialize `RigettiQCSSampler`. Args: quantum_processor_id: The name of the desired quantum computer. This should correspond to a name returned by `pyquil.api.list_quantum_computers`. Names ending in "-qvm" will return a QVM. Names ending in "-pyqvm" will return a `pyquil.PyQVM`. Otherwise, we will return a Rigetti QCS QPU if one exists with the requested name. as_qvm: An optional flag to force construction of a QVM (instead of a QPU). If specified and set to `True`, a QVM-backed quantum computer will be returned regardless of the name's suffix noisy: An optional flag to force inclusion of a noise model. If specified and set to `True`, a quantum computer with a noise model will be returned. The generic QVM noise model is simple T1 and T2 noise plus readout error. At the time of this writing, this has no effect on a QVM initialized based on a Rigetti QCS `qcs_api_client.models.InstructionSetArchitecture`. executor: A callable that first uses the below transformer on cirq.Circuit s and then executes the transformed circuit on the quantum_computer. You may pass your own callable or any static method on CircuitSweepExecutors. transformer: A callable that transforms the cirq.Circuit into a pyquil.Program. You may pass your own callable or any static method on CircuitTransformers. Returns: A `RigettiQCSSampler` with the specified quantum processor, executor, and transformer. """ qc = get_qc( quantum_processor_id, as_qvm=as_qvm, noisy=noisy, ) return RigettiQCSSampler( quantum_computer=qc, executor=executor, transformer=transformer, )
quantumlib/Cirq
cirq-rigetti/cirq_rigetti/sampler.py
Python
apache-2.0
5,548
"""Reverse complement reads with Seqtk.""" import os from plumbum import TEE from resolwe.process import ( Cmd, DataField, FileField, FileHtmlField, ListField, Process, StringField, ) class ReverseComplementSingle(Process): """Reverse complement single-end FASTQ reads file using Seqtk.""" slug = "seqtk-rev-complement-single" process_type = "data:reads:fastq:single:seqtk" name = "Reverse complement FASTQ (single-end)" requirements = { "expression-engine": "jinja", "executor": { "docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}, }, "resources": { "cores": 1, "memory": 16384, }, } entity = { "type": "sample", } data_name = '{{ reads|sample_name|default("?") }}' version = "1.2.0" class Input: """Input fields to process ReverseComplementSingle.""" reads = DataField("reads:fastq:single", label="Reads") class Output: """Output fields.""" fastq = ListField(FileField(), label="Reverse complemented FASTQ file") fastqc_url = ListField(FileHtmlField(), label="Quality control with FastQC") fastqc_archive = ListField(FileField(), label="Download FastQC archive") def run(self, inputs, outputs): """Run the analysis.""" basename = os.path.basename(inputs.reads.output.fastq[0].path) assert basename.endswith(".fastq.gz") name = basename[:-9] complemented_name = f"{name}_complemented.fastq" # Concatenate multilane reads ( Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]] > "input_reads.fastq.gz" )() # Reverse complement reads (Cmd["seqtk"]["seq", "-r", "input_reads.fastq.gz"] > complemented_name)() _, _, stderr = ( Cmd["fastqc"][complemented_name, "--extract", "--outdir=./"] & TEE ) if "Failed to process" in stderr or "Skipping" in stderr: self.error("Failed while processing with FastQC.") (Cmd["gzip"][complemented_name])() outputs.fastq = [f"{complemented_name}.gz"] outputs.fastqc_url = [f"{name}_complemented_fastqc.html"] outputs.fastqc_archive = [f"{name}_complemented_fastqc.zip"] class ReverseComplementPaired(Process): """Reverse complement paired-end FASTQ reads file using Seqtk.""" slug = "seqtk-rev-complement-paired" process_type = "data:reads:fastq:paired:seqtk" name = "Reverse complement FASTQ (paired-end)" requirements = { "expression-engine": "jinja", "executor": { "docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/common:3.0.0"}, }, "resources": { "cores": 1, "memory": 16384, }, } entity = { "type": "sample", } data_name = '{{ reads|sample_name|default("?") }}' version = "1.1.0" class Input: """Input fields to process ReverseComplementPaired.""" reads = DataField("reads:fastq:paired", label="Reads") select_mate = StringField( label="Select mate", description="Select the which mate should be reverse complemented.", choices=[("Mate 1", "Mate 1"), ("Mate 2", "Mate 2"), ("Both", "Both")], default="Mate 1", ) class Output: """Output fields.""" fastq = ListField(FileField(), label="Reverse complemented FASTQ file") fastq2 = ListField(FileField(), label="Remaining mate") fastqc_url = ListField( FileHtmlField(), label="Quality control with FastQC (Mate 1)" ) fastqc_archive = ListField( FileField(), label="Download FastQC archive (Mate 1)" ) fastqc_url2 = ListField( FileHtmlField(), label="Quality control with FastQC (Mate 2)" ) fastqc_archive2 = ListField( FileField(), label="Download FastQC archive (Mate 2)" ) def run(self, inputs, outputs): """Run the analysis.""" basename_mate1 = os.path.basename(inputs.reads.output.fastq[0].path) basename_mate2 = os.path.basename(inputs.reads.output.fastq2[0].path) assert basename_mate1.endswith(".fastq.gz") assert basename_mate2.endswith(".fastq.gz") name_mate1 = basename_mate1[:-9] name_mate2 = basename_mate2[:-9] original_mate1 = f"{name_mate1}_original.fastq.gz" original_mate2 = f"{name_mate2}_original.fastq.gz" ( Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq]] > original_mate1 )() ( Cmd["cat"][[reads.path for reads in inputs.reads.output.fastq2]] > original_mate2 )() if inputs.select_mate == "Mate 1": complemented_mate1 = f"{name_mate1}_complemented.fastq" (Cmd["seqtk"]["seq", "-r", original_mate1] > complemented_mate1)() _, _, stderr = ( Cmd["fastqc"][complemented_mate1, "--extract", "--outdir=./"] & TEE ) if "Failed to process" in stderr or "Skipping" in stderr: self.error("Failed while processing with FastQC.") _, _, stderr2 = ( Cmd["fastqc"][original_mate2, "--extract", "--outdir=./"] & TEE ) if "Failed to process" in stderr2 or "Skipping" in stderr2: self.error("Failed while processing with FastQC.") (Cmd["gzip"][complemented_mate1])() outputs.fastq = [f"{complemented_mate1}.gz"] outputs.fastq2 = [original_mate2] outputs.fastqc_url = [f"{name_mate1}_complemented_fastqc.html"] outputs.fastqc_archive = [f"{name_mate1}_complemented_fastqc.zip"] outputs.fastqc_url2 = [f"{name_mate2}_original_fastqc.html"] outputs.fastqc_archive2 = [f"{name_mate2}_original_fastqc.zip"] elif inputs.select_mate == "Mate 2": complemented_mate2 = f"{name_mate2}_complemented.fastq" ( Cmd["seqtk"]["seq", "-r", f"{name_mate2}_original.fastq.gz"] > complemented_mate2 )() _, _, stderr = ( Cmd["fastqc"][original_mate1, "--extract", "--outdir=./"] & TEE ) if "Failed to process" in stderr or "Skipping" in stderr: self.error("Failed while processing with FastQC.") _, _, stderr2 = ( Cmd["fastqc"][complemented_mate2, "--extract", "--outdir=./"] & TEE ) if "Failed to process" in stderr2 or "Skipping" in stderr2: self.error("Failed while processing with FastQC.") (Cmd["gzip"][complemented_mate2])() outputs.fastq = [original_mate1] outputs.fastq2 = [f"{complemented_mate2}.gz"] outputs.fastqc_url = [f"{name_mate1}_original_fastqc.html"] outputs.fastqc_archive = [f"{name_mate1}_original_fastqc.zip"] outputs.fastqc_url2 = [f"{name_mate2}_complemented_fastqc.html"] outputs.fastqc_archive2 = [f"{name_mate2}_complemented_fastqc.zip"] else: complemented_mate1 = f"{name_mate1}_complemented.fastq" complemented_mate2 = f"{name_mate2}_complemented.fastq" ( Cmd["seqtk"]["seq", "-r", f"{name_mate1}_original.fastq.gz"] > complemented_mate1 )() _, _, stderr = ( Cmd["fastqc"][complemented_mate1, "--extract", "--outdir=./"] & TEE ) if "Failed to process" in stderr or "Skipping" in stderr: self.error("Failed while processing with FastQC.") (Cmd["gzip"][complemented_mate1])() ( Cmd["seqtk"]["seq", "-r", f"{name_mate2}_original.fastq.gz"] > complemented_mate2 )() _, _, stderr2 = ( Cmd["fastqc"][complemented_mate2, "--extract", "--outdir=./"] & TEE ) if "Failed to process" in stderr2 or "Skipping" in stderr2: self.error("Failed while processing with FastQC.") (Cmd["gzip"][complemented_mate2])() outputs.fastq = [f"{complemented_mate1}.gz"] outputs.fastq2 = [f"{complemented_mate2}.gz"] outputs.fastqc_url = [f"{name_mate1}_complemented_fastqc.html"] outputs.fastqc_archive = [f"{name_mate1}_complemented_fastqc.zip"] outputs.fastqc_url2 = [f"{name_mate2}_complemented_fastqc.html"] outputs.fastqc_archive2 = [f"{name_mate2}_complemented_fastqc.zip"]
genialis/resolwe-bio
resolwe_bio/processes/support_processors/seqtk_reverse_complement.py
Python
apache-2.0
8,764
"""TcEx Framework Service Common module""" # standard library import json import threading import time import traceback import uuid from datetime import datetime from typing import Callable, Optional, Union from .mqtt_message_broker import MqttMessageBroker class CommonService: """TcEx Framework Service Common module Shared service logic between the supported service types: * API Service * Custom Trigger Service * Webhook Trigger Service """ def __init__(self, tcex: object): """Initialize the Class properties. Args: tcex: Instance of TcEx. """ self.tcex = tcex # properties self._ready = False self._start_time = datetime.now() self.args: object = tcex.default_args self.configs = {} self.heartbeat_max_misses = 3 self.heartbeat_sleep_time = 1 self.heartbeat_watchdog = 0 self.ij = tcex.ij self.key_value_store = self.tcex.key_value_store self.log = tcex.log self.logger = tcex.logger self.message_broker = MqttMessageBroker( broker_host=self.args.tc_svc_broker_host, broker_port=self.args.tc_svc_broker_port, broker_timeout=self.args.tc_svc_broker_conn_timeout, broker_token=self.args.tc_svc_broker_token, broker_cacert=self.args.tc_svc_broker_cacert_file, logger=tcex.log, ) self.ready = False self.redis_client = self.tcex.redis_client self.token = tcex.token # config callbacks self.shutdown_callback = None def _create_logging_handler(self): """Create a logging handler.""" if self.logger.handler_exist(self.thread_name): return # create trigger id logging filehandler self.logger.add_pattern_file_handler( name=self.thread_name, filename=f'''{datetime.today().strftime('%Y%m%d')}/{self.session_id}.log''', level=self.args.tc_log_level, path=self.args.tc_log_path, # uuid4 pattern for session_id pattern=r'^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}.log$', handler_key=self.session_id, thread_key='session_id', ) def add_metric(self, label: str, value: Union[int, str]) -> None: """Add a metric. Metrics are reported in heartbeat message. Args: label: The metric label (e.g., hits) to add. value: The value for the metric. """ self._metrics[label] = value @property def command_map(self) -> dict: """Return the command map for the current Service type.""" return { 'heartbeat': self.process_heartbeat_command, 'loggingchange': self.process_logging_change_command, 'shutdown': self.process_shutdown_command, } @staticmethod def create_session_id() -> str: # pylint: disable=unused-argument """Return a uuid4 session id. Returns: str: A unique UUID string value. """ return str(uuid.uuid4()) def heartbeat(self) -> None: """Start heartbeat process.""" self.service_thread(name='heartbeat', target=self.heartbeat_monitor) def heartbeat_monitor(self) -> None: """Publish heartbeat on timer.""" self.log.info('feature=service, event=heartbeat-monitor-started') while True: if self.heartbeat_watchdog > ( int(self.args.tc_svc_hb_timeout_seconds) / int(self.heartbeat_sleep_time) ): self.log.error( 'feature=service, event=missed-heartbeat, action=shutting-service-down' ) self.process_shutdown_command({'reason': 'Missed heartbeat commands.'}) break time.sleep(self.heartbeat_sleep_time) self.heartbeat_watchdog += 1 def increment_metric(self, label: str, value: Optional[int] = 1) -> None: """Increment a metric if already exists. Args: label: The metric label (e.g., hits) to increment. value: The increment value. Defaults to 1. """ if self._metrics.get(label) is not None: self._metrics[label] += value def listen(self) -> None: """List for message coming from broker.""" self.message_broker.add_on_connect_callback(self.on_connect_handler) self.message_broker.add_on_message_callback( self.on_message_handler, topics=[self.args.tc_svc_server_topic] ) self.message_broker.register_callbacks() # start listener thread self.service_thread(name='broker-listener', target=self.message_broker.connect) def loop_forever(self, sleep: Optional[int] = 1) -> bool: """Block and wait for shutdown. Args: sleep: The amount of time to sleep between iterations. Defaults to 1. Returns: Bool: Returns True until shutdown received. """ while True: deadline = time.time() + sleep while time.time() < deadline: if self.message_broker.shutdown: return False time.sleep(1) return True @property def metrics(self) -> dict: """Return current metrics.""" # TODO: move to trigger command and handle API Service if self._metrics.get('Active Playbooks') is not None: self.update_metric('Active Playbooks', len(self.configs)) return self._metrics @metrics.setter def metrics(self, metrics: dict): """Return current metrics.""" if isinstance(metrics, dict): self._metrics = metrics else: self.log.error('feature=service, event=invalid-metrics') def on_connect_handler( self, client, userdata, flags, rc # pylint: disable=unused-argument ) -> None: """On connect method for mqtt broker.""" self.log.info( f'feature=service, event=topic-subscription, topic={self.args.tc_svc_server_topic}' ) self.message_broker.client.subscribe(self.args.tc_svc_server_topic) self.message_broker.client.disable_logger() def on_message_handler( self, client, userdata, message # pylint: disable=unused-argument ) -> None: """On message for mqtt.""" try: # messages on server topic must be json objects m = json.loads(message.payload) except ValueError: self.log.warning( f'feature=service, event=parsing-issue, message="""{message.payload}"""' ) return # use the command to call the appropriate method defined in command_map command: str = m.get('command', 'invalid').lower() trigger_id: Optional[int] = m.get('triggerId') if trigger_id is not None: # coerce trigger_id to int in case a string was provided (testing framework) trigger_id = int(trigger_id) self.log.info(f'feature=service, event=command-received, command="{command}"') # create unique session id to be used as thread name # and stored as property of thread for logging emit session_id = self.create_session_id() # get the target method from command_map for the current command thread_method = self.command_map.get(command, self.process_invalid_command) self.service_thread( # use session_id as thread name to provide easy debugging per thread name=session_id, target=thread_method, args=(m,), session_id=session_id, trigger_id=trigger_id, ) def process_heartbeat_command(self, message: dict) -> None: # pylint: disable=unused-argument """Process the HeartBeat command. .. code-block:: python :linenos: :lineno-start: 1 { "command": "Heartbeat", "metric": {}, "memoryPercent": 0, "cpuPercent": 0 } Args: message: The message payload from the server topic. """ self.heartbeat_watchdog = 0 # send heartbeat -acknowledge- command response = {'command': 'Heartbeat', 'metric': self.metrics} self.message_broker.publish( message=json.dumps(response), topic=self.args.tc_svc_client_topic ) self.log.info(f'feature=service, event=heartbeat-sent, metrics={self.metrics}') def process_logging_change_command(self, message: dict) -> None: """Process the LoggingChange command. .. code-block:: python :linenos: :lineno-start: 1 { "command": "LoggingChange", "level": "DEBUG" } Args: message: The message payload from the server topic. """ level: str = message.get('level') self.log.info(f'feature=service, event=logging-change, level={level}') self.logger.update_handler_level(level) def process_invalid_command(self, message: dict) -> None: """Process all invalid commands. Args: message: The message payload from the server topic. """ self.log.warning( f'feature=service, event=invalid-command-received, message="""({message})""".' ) def process_shutdown_command(self, message: dict) -> None: """Implement parent method to process the shutdown command. .. code-block:: python :linenos: :lineno-start: 1 { "command": "Shutdown", "reason": "Service disabled by user." } Args: message: The message payload from the server topic. """ reason = message.get('reason') or ( 'A shutdown command was received on server topic. Service is shutting down.' ) self.log.info(f'feature=service, event=shutdown, reason={reason}') # acknowledge shutdown command self.message_broker.publish( json.dumps({'command': 'Acknowledged', 'type': 'Shutdown'}), self.args.tc_svc_client_topic, ) # call App shutdown callback if callable(self.shutdown_callback): try: # call callback for shutdown and handle exceptions to protect thread self.shutdown_callback() # pylint: disable=not-callable except Exception as e: self.log.error( f'feature=service, event=shutdown-callback-error, error="""({e})""".' ) self.log.trace(traceback.format_exc()) # unsubscribe and disconnect from the broker self.message_broker.client.unsubscribe(self.args.tc_svc_server_topic) self.message_broker.client.disconnect() # update shutdown flag self.message_broker.shutdown = True # delay shutdown to give App time to cleanup time.sleep(5) self.tcex.exit(0) # final shutdown in case App did not @property def ready(self) -> bool: """Return ready boolean.""" return self._ready @ready.setter def ready(self, bool_val: bool): """Set ready boolean.""" if isinstance(bool_val, bool) and bool_val is True: # wait until connected to send ready command while not self.message_broker._connected: if self.message_broker.shutdown: break time.sleep(1) else: # pylint: disable=useless-else-on-loop self.log.info('feature=service, event=service-ready') ready_command = {'command': 'Ready'} if self.ij.runtime_level.lower() in ['apiservice']: ready_command['discoveryTypes'] = self.ij.service_discovery_types self.message_broker.publish( json.dumps(ready_command), self.args.tc_svc_client_topic ) self._ready = True def service_thread( self, name: str, target: Callable[[], bool], args: Optional[tuple] = None, kwargs: Optional[dict] = None, session_id: Optional[str] = None, trigger_id: Optional[int] = None, ) -> None: """Start a message thread. Args: name: The name of the thread. target: The method to call for the thread. args: The args to pass to the target method. kwargs: Additional args. session_id: The current session id. trigger_id: The current trigger id. """ self.log.info(f'feature=service, event=service-thread-creation, name={name}') args = args or () try: t = threading.Thread(name=name, target=target, args=args, kwargs=kwargs, daemon=True) # add session_id to thread to use in logger emit t.session_id = session_id # add trigger_id to thread to use in logger emit t.trigger_id = trigger_id t.start() except Exception: self.log.trace(traceback.format_exc()) @property def session_id(self) -> Optional[str]: """Return the current session_id.""" if not hasattr(threading.current_thread(), 'session_id'): threading.current_thread().session_id = self.create_session_id() return threading.current_thread().session_id @property def thread_name(self) -> str: """Return a uuid4 session id.""" return threading.current_thread().name @property def trigger_id(self) -> Optional[int]: """Return the current trigger_id.""" trigger_id = None if hasattr(threading.current_thread(), 'trigger_id'): trigger_id = threading.current_thread().trigger_id if trigger_id is not None: trigger_id = int(trigger_id) return trigger_id def update_metric(self, label: str, value: Union[int, str]) -> None: """Update a metric if already exists. Args: label: The metric label (e.g., hits) to update. value: The updated value for the metric. """ if self._metrics.get(label) is not None: self._metrics[label] = value
kstilwell/tcex
tcex/services/common_service.py
Python
apache-2.0
14,584
# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Some useful getters for thread local request style validation. """ def pecan_getter(parm): """pecan getter.""" pecan_module = __import__('pecan', globals(), locals(), ['request']) return getattr(pecan_module, 'request')
obulpathi/poppy
poppy/transport/validators/stoplight/helpers.py
Python
apache-2.0
823
""" * Copyright 2007 Fred Sauer * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http:#www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. """ import AbstractLocation """* * A position represented by a left (x) and top (y) coordinate. """ class CoordinateLocation(AbstractLocation): def __init__(self, left, top): self.left = left self.top = top """ * (non-Javadoc) * * @see com.allen_sauer.gwt.dnd.client.util.Location#getLeft() """ def getLeft(self): return left """ * (non-Javadoc) * * @see com.allen_sauer.gwt.dnd.client.util.Location#getTop() """ def getTop(self): return top
jaredly/pyjamas
library/pyjamas/dnd/util/CoordinateLocation.py
Python
apache-2.0
1,133
import torch from torch import nn, Tensor from typing import Iterable, Dict from sentence_transformers import SentenceTransformer from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, PreTrainedModel import logging logger = logging.getLogger(__name__) class DenoisingAutoEncoderLoss(nn.Module): """ This loss expects as input a batch consisting of damaged sentences and the corresponding original ones. The data generation process has already been implemented in readers/DenoisingAutoEncoderReader.py During training, the decoder reconstructs the original sentences from the encoded sentence embeddings. Here the argument 'decoder_name_or_path' indicates the pretrained model (supported by Huggingface) to be used as the decoder. Since decoding process is included, here the decoder should have a class called XXXLMHead (in the context of Huggingface's Transformers). Flag 'tie_encoder_decoder' indicates whether to tie the trainable parameters of encoder and decoder, which is shown beneficial to model performance while limiting the amount of required memory. Only when the encoder and decoder are from the same architecture, can the flag 'tie_encoder_decoder' works. For more information, please refer to the TSDAE paper. """ def __init__( self, model: SentenceTransformer, decoder_name_or_path: str = None, tie_encoder_decoder: bool = True ): """ :param model: SentenceTransformer model :param decoder_name_or_path: Model name or path for initializing a decoder (compatible with Huggingface's Transformers) :param tie_encoder_decoder: whether to tie the trainable parameters of encoder and decoder """ super(DenoisingAutoEncoderLoss, self).__init__() self.encoder = model # This will be the final model used during the inference time. self.tokenizer_encoder = model.tokenizer encoder_name_or_path = model[0].auto_model.config._name_or_path if decoder_name_or_path is None: assert tie_encoder_decoder, "Must indicate the decoder_name_or_path argument when tie_encoder_decoder=False!" if tie_encoder_decoder: if decoder_name_or_path: logger.warning('When tie_encoder_decoder=True, the decoder_name_or_path will be invalid.') decoder_name_or_path = encoder_name_or_path self.tokenizer_decoder = AutoTokenizer.from_pretrained(decoder_name_or_path) self.need_retokenization = not (type(self.tokenizer_encoder) == type(self.tokenizer_decoder)) decoder_config = AutoConfig.from_pretrained(decoder_name_or_path) decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder = {'config': decoder_config} try: self.decoder = AutoModelForCausalLM.from_pretrained(decoder_name_or_path, **kwargs_decoder) except ValueError as e: logger.error(f'Model name or path "{decoder_name_or_path}" does not support being as a decoder. Please make sure the decoder model has an "XXXLMHead" class.') raise e assert model[0].auto_model.config.hidden_size == decoder_config.hidden_size, 'Hidden sizes do not match!' if self.tokenizer_decoder.pad_token is None: # Needed by GPT-2, etc. self.tokenizer_decoder.pad_token = self.tokenizer_decoder.eos_token self.decoder.config.pad_token_id = self.decoder.config.eos_token_id if len(AutoTokenizer.from_pretrained(encoder_name_or_path)) != len(self.tokenizer_encoder): logger.warning('WARNING: The vocabulary of the encoder has been changed. One might need to change the decoder vocabulary, too.') if tie_encoder_decoder: assert not self.need_retokenization, "The tokenizers should be the same when tie_encoder_decoder=True." if len(self.tokenizer_encoder) != len(self.tokenizer_decoder): # The vocabulary has been changed. self.tokenizer_decoder = self.tokenizer_encoder self.decoder.resize_token_embeddings(len(self.tokenizer_decoder)) logger.warning('Since the encoder vocabulary has been changed and --tie_encoder_decoder=True, now the new vocabulary has also been used for the decoder.') decoder_base_model_prefix = self.decoder.base_model_prefix PreTrainedModel._tie_encoder_decoder_weights( model[0].auto_model, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix ) def retokenize(self, sentence_features): input_ids = sentence_features['input_ids'] device = input_ids.device sentences_decoded = self.tokenizer_decoder.batch_decode( input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) retokenized = self.tokenizer_decoder( sentences_decoded, padding=True, truncation='longest_first', return_tensors="pt", max_length=None ).to(device) return retokenized def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor): source_features, target_features = tuple(sentence_features) if self.need_retokenization: # since the sentence_features here are all tokenized by encoder's tokenizer, # retokenization by the decoder's one is needed if different tokenizers used target_features = self.retokenize(target_features) reps = self.encoder(source_features)['sentence_embedding'] # (bsz, hdim) # Prepare input and output target_length = target_features['input_ids'].shape[1] decoder_input_ids = target_features['input_ids'].clone()[:, :target_length - 1] label_ids = target_features['input_ids'][:, 1:] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, inputs_embeds=None, attention_mask=None, encoder_hidden_states=reps[:, None], # (bsz, hdim) -> (bsz, 1, hdim) encoder_attention_mask=source_features['attention_mask'][:, 0:1], labels=None, return_dict=None, use_cache=False ) # Calculate loss lm_logits = decoder_outputs[0] ce_loss_fct = nn.CrossEntropyLoss(ignore_index=self.tokenizer_decoder.pad_token_id) loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), label_ids.reshape(-1)) return loss
UKPLab/sentence-transformers
sentence_transformers/losses/DenoisingAutoEncoderLoss.py
Python
apache-2.0
6,700
import sys from typing import Optional from bowler import Query, LN, Capture, Filename, TOKEN, SYMBOL from fissix.pytree import Node, Leaf from lib2to3.fixer_util import Name, KeywordArg, Dot, Comma, Newline, ArgList def filter_print_string(node, capture, filename) -> bool: function_name = capture.get("function_name") from pprint import pprint pprint(node) pprint(capture) return True def filter_has_no_on_delete(node: LN, capture: Capture, filename: Filename) -> bool: arguments = capture.get("function_arguments")[0].children for arg in arguments: if arg.type == SYMBOL.argument and arg.children[0].type == TOKEN.NAME: arg_name = arg.children[0].value if arg_name == "on_delete": return False # this call already has an on_delete argument. return True def add_on_delete_cascade( node: LN, capture: Capture, filename: Filename ) -> Optional[LN]: arguments = capture.get("function_arguments")[0] new_on_delete_node = KeywordArg(Name(" on_delete"), Name("models.CASCADE")) if isinstance(arguments, Leaf): # Node is a leaf and so we need to replace it with a list of things we want instead. arguments.replace([arguments.clone(),Comma(),new_on_delete_node]) else: arguments.append_child(Comma()) arguments.append_child(new_on_delete_node) return node ( Query(sys.argv[1]) .select_method("ForeignKey") .is_call() .filter(filter_has_no_on_delete) .modify(add_on_delete_cascade) .idiff() ), ( Query(sys.argv[1]) .select_method("OneToOneField") .is_call() .filter(filter_has_no_on_delete) .modify(add_on_delete_cascade) .idiff() )
edx/repo-tools
edx_repo_tools/codemods/django2/foreignkey_on_delete_mod.py
Python
apache-2.0
1,718
#!/usr/bin/env python ''' Script to ingest GCP billing data into a DB ''' import logging import os import re import sys from datetime import datetime from dateutil.relativedelta import relativedelta from dateutil.parser import parse as parse_date from httplib2 import Http import transaction from apiclient.discovery import build from oauth2client.service_account import ServiceAccountCredentials from sqlalchemy import engine_from_config from sqlalchemy.sql import functions from pyramid.paster import get_appsettings, setup_logging from pyramid.scripts.common import parse_vars from ..models import (DBSession, GcpLineItem) from ..util.fileloader import load_json, save_json COMMIT_THRESHOLD = 10000 LOG = None def usage(argv): ''' cli usage ''' cmd = os.path.basename(argv[0]) print('usage: %s <config_uri> [rundate=YYYY-MM-DD]\n' '(example: "%s development.ini")' % (cmd, cmd)) sys.exit(1) def run(settings, options): ''' do things ''' os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = settings['creds.dir'] + \ "/" + \ settings['creds.gcp.json'] scopes = ['https://www.googleapis.com/auth/cloud-platform'] credentials = ServiceAccountCredentials.from_json_keyfile_name(settings['creds.dir'] + \ "/" + \ settings['creds.gcp.json'], scopes) http_auth = credentials.authorize(Http()) # The apiclient.discovery.build() function returns an instance of an API service # object that can be used to make API calls. The object is constructed with # methods specific to the books API. The arguments provided are: # name of the API ('cloudbilling') # version of the API you are using ('v1') # API key service = build('cloudbilling', 'v1', http=http_auth, cache_discovery=False) request = service.billingAccounts().projects().list(name='billingAccounts/0085BB-6B96B9-89FD9F') response = request.execute() LOG.debug(response) def main(argv): ''' main script entry point ''' if len(argv) < 2: usage(argv) config_uri = argv[1] options = parse_vars(argv[2:]) setup_logging(config_uri) global LOG LOG = logging.getLogger(__name__) settings = get_appsettings(config_uri, options=options) engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) run(settings, options) if '__main__' in __name__: try: main(sys.argv) except KeyboardInterrupt: print "Ctrl+C detected. Exiting..."
blentz/cloud-costs
budget/scripts/gcp_test.py
Python
apache-2.0
2,730
class Config(object): SERVER_URL = "BACKEND_SERVER_URL" CAMERA_NAME = "CAM_NAME" API_KEY = "API_KEY" class Backend(object): URL_PREFIX = "http://" API_PREFIX = "/api/v1/" AUTH_URL = "%susers/auth" % API_PREFIX REGISTER_CAM_URL = "%scam" % API_PREFIX UPLOAD_URL = "%scam/upload" % API_PREFIX CHECK_STATE = "%scam/state" % API_PREFIX
SillentTroll/rascam_client
wsgi/common/constants.py
Python
apache-2.0
370
# -*- coding: utf-8 -*- # # Copyright 2014-2020 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """BigMLer - Resources processing: creation, update and retrieval of ensembles """ import bigmler.utils as u import bigmler.resourcesapi.ensembles as r import bigmler.checkpoint as c MONTECARLO_FACTOR = 200 def ensemble_processing(datasets, api, args, resume, fields=None, session_file=None, path=None, log=None): """Creates an ensemble of models from the input data """ ensembles = [] ensemble_ids = [] models = [] model_ids = [] number_of_ensembles = len(datasets) if resume: resume, ensemble_ids = c.checkpoint( c.are_ensembles_created, path, number_of_ensembles, debug=args.debug) if args.number_of_models > 1: _, model_ids = c.checkpoint(c.are_models_created, path, \ number_of_ensembles * args.number_of_models) models = model_ids if not resume: message = u.dated("Found %s ensembles out of %s. Resuming.\n" % (len(ensemble_ids), number_of_ensembles)) u.log_message(message, log_file=session_file, console=args.verbosity) ensembles = ensemble_ids number_of_ensembles -= len(ensemble_ids) if number_of_ensembles > 0: ensemble_args = r.set_ensemble_args(args, fields=fields) ensembles, ensemble_ids, models, model_ids = r.create_ensembles( datasets, ensembles, ensemble_args, args, api=api, path=path, number_of_ensembles=number_of_ensembles, session_file=session_file, log=log) return ensembles, ensemble_ids, models, model_ids, resume def ensemble_per_label(labels, dataset, api, args, resume, fields=None, multi_label_data=None, session_file=None, path=None, log=None): """Creates an ensemble per label for multi-label datasets """ ensemble_ids = [] ensembles = [] model_ids = [] models = [] number_of_ensembles = len(labels) if resume: resume, ensemble_ids = c.checkpoint( c.are_ensembles_created, path, number_of_ensembles, debug=args.debug) ensembles = ensemble_ids if not resume: message = u.dated("Found %s ensembles out of %s." " Resuming.\n" % (len(ensemble_ids), number_of_ensembles)) u.log_message(message, log_file=session_file, console=args.verbosity) # erase models' info that will be rebuilt u.log_created_resources("models", path, None, mode='w') number_of_ensembles = len(labels) - len(ensemble_ids) ensemble_args_list = r.set_label_ensemble_args( args, labels, multi_label_data, number_of_ensembles, fields) # create ensembles changing the input_field to select # only one label at a time (ensembles, ensemble_ids, models, model_ids) = r.create_ensembles( dataset, ensemble_ids, ensemble_args_list, args, number_of_ensembles, api, path, session_file, log) return ensembles, ensemble_ids, models, model_ids, resume
jaor/bigmler
bigmler/processing/ensembles.py
Python
apache-2.0
3,944
import operator import pandas as pd import pandas.util.testing as tm import pytest import ibis from ibis.common import IbisTypeError def test_array_length(t, df): expr = t.projection([ t.array_of_float64.length().name('array_of_float64_length'), t.array_of_int64.length().name('array_of_int64_length'), t.array_of_strings.length().name('array_of_strings_length'), ]) result = expr.execute() expected = pd.DataFrame({ 'array_of_float64_length': [2, 1, 0], 'array_of_int64_length': [2, 0, 1], 'array_of_strings_length': [2, 0, 1], }) tm.assert_frame_equal(result, expected) def test_array_length_scalar(client): raw_value = [1, 2, 4] value = ibis.literal(raw_value) expr = value.length() result = client.execute(expr) expected = len(raw_value) assert result == expected def test_array_collect(t, df): expr = t.group_by( t.dup_strings ).aggregate(collected=t.float64_with_zeros.collect()) result = expr.execute().sort_values('dup_strings').reset_index(drop=True) expected = df.groupby( 'dup_strings' ).float64_with_zeros.apply(list).reset_index().rename( columns={'float64_with_zeros': 'collected'} ) tm.assert_frame_equal(result, expected) @pytest.mark.xfail( raises=TypeError, reason=( 'Pandas does not implement rolling for functions that do not return ' 'numbers' ) ) def test_array_collect_rolling_partitioned(t, df): window = ibis.trailing_window(2, order_by=t.plain_int64) colexpr = t.plain_float64.collect().over(window) expr = t['dup_strings', 'plain_int64', colexpr.name('collected')] result = expr.execute() expected = pd.DataFrame({ 'dup_strings': ['d', 'a', 'd'], 'plain_int64': [1, 2, 3], 'collected': [[4.0], [4.0, 5.0], [5.0, 6.0]], })[expr.columns] tm.assert_frame_equal(result, expected) @pytest.mark.xfail(raises=IbisTypeError, reason='Not sure if this should work') def test_array_collect_scalar(client): raw_value = 'abcd' value = ibis.literal(raw_value) expr = value.collect() result = client.execute(expr) expected = [raw_value] assert result == expected @pytest.mark.parametrize( ['start', 'stop'], [ (1, 3), (1, 1), (2, 3), (2, 5), (None, 3), (None, None), (3, None), # negative slices are not supported pytest.mark.xfail( (-3, None), raises=ValueError, reason='Negative slicing not supported' ), pytest.mark.xfail( (None, -3), raises=ValueError, reason='Negative slicing not supported' ), pytest.mark.xfail( (-3, -1), raises=ValueError, reason='Negative slicing not supported' ), ] ) def test_array_slice(t, df, start, stop): expr = t.array_of_strings[start:stop] result = expr.execute() slicer = operator.itemgetter(slice(start, stop)) expected = df.array_of_strings.apply(slicer) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( ['start', 'stop'], [ (1, 3), (1, 1), (2, 3), (2, 5), (None, 3), (None, None), (3, None), # negative slices are not supported pytest.mark.xfail( (-3, None), raises=ValueError, reason='Negative slicing not supported' ), pytest.mark.xfail( (None, -3), raises=ValueError, reason='Negative slicing not supported' ), pytest.mark.xfail( (-3, -1), raises=ValueError, reason='Negative slicing not supported' ), ] ) def test_array_slice_scalar(client, start, stop): raw_value = [-11, 42, 10] value = ibis.literal(raw_value) expr = value[start:stop] result = client.execute(expr) expected = raw_value[start:stop] assert result == expected @pytest.mark.parametrize('index', [1, 3, 4, 11, -11]) def test_array_index(t, df, index): expr = t[t.array_of_float64[index].name('indexed')] result = expr.execute() expected = pd.DataFrame({ 'indexed': df.array_of_float64.apply( lambda x: x[index] if -len(x) <= index < len(x) else None ) }) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('index', [1, 3, 4, 11]) def test_array_index_scalar(client, index): raw_value = [-10, 1, 2, 42] value = ibis.literal(raw_value) expr = value[index] result = client.execute(expr) expected = raw_value[index] if index < len(raw_value) else None assert result == expected @pytest.mark.parametrize('n', [1, 3, 4, 7, -2]) # negative returns empty list @pytest.mark.parametrize('mul', [lambda x, n: x * n, lambda x, n: n * x]) def test_array_repeat(t, df, n, mul): expr = t.projection([mul(t.array_of_strings, n).name('repeated')]) result = expr.execute() expected = pd.DataFrame({'repeated': df.array_of_strings * n}) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('n', [1, 3, 4, 7, -2]) # negative returns empty list @pytest.mark.parametrize('mul', [lambda x, n: x * n, lambda x, n: n * x]) def test_array_repeat_scalar(client, n, mul): raw_array = [1, 2] array = ibis.literal(raw_array) expr = mul(array, n) result = client.execute(expr) expected = mul(raw_array, n) assert result == expected @pytest.mark.parametrize('op', [lambda x, y: x + y, lambda x, y: y + x]) def test_array_concat(t, df, op): x = t.array_of_float64.cast('array<string>') y = t.array_of_strings expr = op(x, y) result = expr.execute() expected = op( df.array_of_float64.apply(lambda x: list(map(str, x))), df.array_of_strings ) tm.assert_series_equal(result, expected) @pytest.mark.parametrize('op', [lambda x, y: x + y, lambda x, y: y + x]) def test_array_concat_scalar(client, op): raw_left = [1, 2, 3] raw_right = [3, 4] left = ibis.literal(raw_left) right = ibis.literal(raw_right) expr = op(left, right) result = client.execute(expr) assert result == op(raw_left, raw_right)
deepfield/ibis
ibis/pandas/execution/tests/test_arrays.py
Python
apache-2.0
6,305
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for UpdateTensorboard # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-aiplatform # [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_async] from google.cloud import aiplatform_v1 async def sample_update_tensorboard(): # Create a client client = aiplatform_v1.TensorboardServiceAsyncClient() # Initialize request argument(s) tensorboard = aiplatform_v1.Tensorboard() tensorboard.display_name = "display_name_value" request = aiplatform_v1.UpdateTensorboardRequest( tensorboard=tensorboard, ) # Make the request operation = client.update_tensorboard(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) # [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_async]
googleapis/python-aiplatform
samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_async.py
Python
apache-2.0
1,730
# This file is for dev branch
umitmertcakmak/PySpyder
scrappers/devTest.py
Python
apache-2.0
31
# Copyright 2016 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Config Drive types and possible locations. CD_TYPES = { "vfat", # Visible device (with partition table). "iso", # "Raw" format containing ISO bytes. } CD_LOCATIONS = { # Look into optical devices. Only an ISO format could be # used here (vfat ignored). "cdrom", # Search through physical disks for raw ISO content or vfat filesystems # containing configuration drive's content. "hdd", # Search through partitions for raw ISO content or through volumes # containing configuration drive's content. "partition", } POLICY_IGNORE_ALL_FAILURES = "ignoreallfailures" SAN_POLICY_ONLINE_STR = 'OnlineAll' SAN_POLICY_OFFLINE_STR = 'OfflineAll' SAN_POLICY_OFFLINE_SHARED_STR = 'OfflineShared' CLEAR_TEXT_INJECTED_ONLY = 'clear_text_injected_only' ALWAYS_CHANGE = 'always' NEVER_CHANGE = 'no' LOGON_PASSWORD_CHANGE_OPTIONS = [CLEAR_TEXT_INJECTED_ONLY, NEVER_CHANGE, ALWAYS_CHANGE] VOL_ACT_KMS = "KMS" VOL_ACT_AVMA = "AVMA" CERT_LOCATION_LOCAL_MACHINE = "LocalMachine" CERT_LOCATION_CURRENT_USER = "CurrentUser" SCRIPT_HEADER_CMD = 'rem cmd'
stackforge/cloudbase-init
cloudbaseinit/constant.py
Python
apache-2.0
1,737
import web urls = ( '/wx', 'Handle', ) class Handle(object): def GET(self): return "hello, this is a test" if __name__ == '__main__': app = web.application(urls, globals()) app.run()
tanjoc/MyRespository
oauth/weixindemo/__init__.py
Python
apache-2.0
210
# -*- encoding: utf-8 -*- from django.core.paginator import Paginator, EmptyPage, InvalidPage from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from functools import wraps from django.template import RequestContext from django.shortcuts import render_to_response from django.core.exceptions import PermissionDenied from datetime import datetime def paginate(template_name=None, list_name='default', objects_per_page=10): def inner_p(fn): def wrapped(request, *args, **kwargs): fn_res = fn(request, *args, **kwargs) objects = fn_res[list_name] paginator = Paginator(objects, objects_per_page) try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 # If page request (9999) is out of range, # deliver last page of results. try: loo = paginator.page(page) except (EmptyPage, InvalidPage): loo = paginator.page(paginator.num_pages) fn_res.update({list_name: loo}) if 'template_name' in fn_res: return render_to_response(fn_res['template_name'], fn_res, context_instance=RequestContext(request)) else: return render_to_response(template_name, fn_res, context_instance=RequestContext(request)) return wraps(fn)(wrapped) return inner_p def only_doctor(func): def _fn(request, *args, **kwargs): if request.user.get_profile().is_doctor(): return func(request, *args, **kwargs) else: return HttpResponseRedirect(reverse('cal.index')) return _fn def only_doctor_consulting(func): def _fn(request, *args, **kwargs): if request.user.get_profile().is_doctor(): if 'patient_user_id' in kwargs and not request.user.doctor.filter(user__id=kwargs['patient_user_id']): raise PermissionDenied if 'id_patient' in kwargs and not request.user.doctor.filter(user__id=kwargs['id_patient']): raise PermissionDenied if 'id_task' in kwargs and not request.user.doctor.filter(user__patient_tasks__id=kwargs['id_task']): raise PermissionDenied if 'id_appointment' in kwargs and not request.user.doctor.filter(user__appointment_patient__id=kwargs['id_appointment']): raise PermissionDenied return func(request, *args, **kwargs) else: return HttpResponseRedirect(reverse('consulting_index')) return _fn def only_patient_consulting(func): def _fn(request, *args, **kwargs): if request.user.get_profile().is_patient(): return func(request, *args, **kwargs) else: return HttpResponseRedirect(reverse('consulting_index')) return _fn def only_doctor_administrative(func): def _fn(request, *args, **kwargs): if request.user.get_profile().is_administrative() or\ request.user.get_profile().is_doctor(): return func(request, *args, **kwargs) else: return HttpResponseRedirect(reverse('consulting_index')) return _fn def update_password_date(func): def _fn(request, *args, **kwargs): prof = request.user.get_profile() prof.updated_password_at = datetime.today() prof.save() return func(request, *args, **kwargs) return _fn
frhumanes/consulting
web/src/decorators.py
Python
apache-2.0
3,520
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hubblemon.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
naver/hubblemon
manage.py
Python
apache-2.0
252
# Copyright 2021 The Kubeflow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest import kfp.deprecated as kfp import kfp_server_api from ml_metadata.proto import Execution from .condition import condition from .condition_v2 import condition as condition_v2 from kfp.samples.test.utils import KfpTask, debug_verify, run_pipeline_func, TestCase def verify_heads(t: unittest.TestCase, run: kfp_server_api.ApiRun, tasks: dict[str, KfpTask], **kwargs): t.assertEqual(run.status, 'Succeeded') t.assertCountEqual(['print-msg', 'condition-1', 'flip-coin'], tasks.keys()) t.assertCountEqual(['print-msg-2', 'print-msg-3', 'flip-coin-2'], tasks['condition-1'].children.keys()) def verify_tails(t: unittest.TestCase, run: kfp_server_api.ApiRun, tasks: dict[str, KfpTask], **kwargs): t.assertEqual(run.status, 'Succeeded') t.assertCountEqual(['print-msg', 'condition-1', 'flip-coin'], tasks.keys()) t.assertIsNone(tasks['condition-1'].children) # MLMD canceled state means NotTriggered state for KFP. t.assertEqual(Execution.State.CANCELED, tasks['condition-1'].state) run_pipeline_func([ TestCase( pipeline_func=condition_v2, mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE, arguments={"force_flip_result": "heads"}, verify_func=verify_heads, ), TestCase( pipeline_func=condition_v2, mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE, arguments={"force_flip_result": "tails"}, verify_func=verify_tails, ), TestCase( pipeline_func=condition, mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY, arguments={"force_flip_result": "heads"}, ), TestCase( pipeline_func=condition, mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY, arguments={"force_flip_result": "tails"}, ), ])
kubeflow/pipelines
samples/core/condition/condition_test.py
Python
apache-2.0
2,438
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for UpdateIssue # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-contact-center-insights # [START contactcenterinsights_v1_generated_ContactCenterInsights_UpdateIssue_sync] from google.cloud import contact_center_insights_v1 def sample_update_issue(): # Create a client client = contact_center_insights_v1.ContactCenterInsightsClient() # Initialize request argument(s) request = contact_center_insights_v1.UpdateIssueRequest( ) # Make the request response = client.update_issue(request=request) # Handle the response print(response) # [END contactcenterinsights_v1_generated_ContactCenterInsights_UpdateIssue_sync]
googleapis/python-contact-center-insights
samples/generated_samples/contactcenterinsights_v1_generated_contact_center_insights_update_issue_sync.py
Python
apache-2.0
1,517
#!/usr/bin/python # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reusable utility functions. This file is generic and can be reused by other models without modification. """ from apache_beam.transforms import core import tensorflow as tf def int64_feature(value): """Create a multi-valued int64 feature from a single value.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def bytes_feature(value): """Create a multi-valued bytes feature from a single value.""" return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def float_feature(value): """Create a multi-valued float feature from a single value.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) class DefaultToKeyDict(dict): """Custom dictionary to use the key as the value for any missing entries.""" def __missing__(self, key): return str(key) class TableToDictCombineFn(core.CombineFn): """Beam transform to create a python dictionary from a BigQuery table. This CombineFn reshapes rows from a BigQuery table using the specified key column to a Python dictionary. """ def __init__(self, key_column): self.key_column = key_column def create_accumulator(self): return dict() def add_input(self, accumulator, element): accumulator[element[self.key_column]] = element return accumulator def add_inputs(self, accumulator, elements): for element in elements: self.add_input(accumulator, element) return accumulator def merge_accumulators(self, accumulators): final_accumulator = {} for accumulator in accumulators: final_accumulator.update(accumulator) return final_accumulator def extract_output(self, accumulator): return accumulator
googlegenomics/cloudml-examples
trainer/util.py
Python
apache-2.0
2,296
#Copyright 2008 Orbitz WorldWide # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. from datetime import date, datetime, timedelta from functools import partial from itertools import izip, imap import math import re import random import time from graphite.logger import log from graphite.render.attime import parseTimeOffset from graphite.events import models #XXX format_units() should go somewhere else from os import environ if environ.get('READTHEDOCS'): format_units = lambda *args, **kwargs: (0,'') else: from graphite.render.glyph import format_units from graphite.render.datalib import TimeSeries from graphite.util import timestamp NAN = float('NaN') INF = float('inf') DAY = 86400 HOUR = 3600 MINUTE = 60 #Utility functions def safeSum(values): safeValues = [v for v in values if v is not None] if safeValues: return sum(safeValues) def safeDiff(values): safeValues = [v for v in values if v is not None] if safeValues: values = map(lambda x: x*-1, safeValues[1:]) values.insert(0, safeValues[0]) return sum(values) def safeLen(values): return len([v for v in values if v is not None]) def safeDiv(a, b): if a is None: return None if b in (0,None): return None return float(a) / float(b) def safeMul(*factors): if None in factors: return None factors = [float(x) for x in factors] product = reduce(lambda x,y: x*y, factors) return product def safeSubtract(a,b): if a is None or b is None: return None return float(a) - float(b) def safeAvg(a): return safeDiv(safeSum(a),safeLen(a)) def safeStdDev(a): sm = safeSum(a) ln = safeLen(a) avg = safeDiv(sm,ln) sum = 0 safeValues = [v for v in a if v is not None] for val in safeValues: sum = sum + (val - avg) * (val - avg) return math.sqrt(sum/ln) def safeLast(values): for v in reversed(values): if v is not None: return v def safeMin(values): safeValues = [v for v in values if v is not None] if safeValues: return min(safeValues) def safeMax(values): safeValues = [v for v in values if v is not None] if safeValues: return max(safeValues) def safeMap(function, values): safeValues = [v for v in values if v is not None] if safeValues: return [function(x) for x in values] def safeAbs(value): if value is None: return None return abs(value) # Greatest common divisor def gcd(a, b): if b == 0: return a return gcd(b, a%b) # Least common multiple def lcm(a, b): if a == b: return a if a < b: (a, b) = (b, a) #ensure a > b return a / gcd(a,b) * b def normalize(seriesLists): seriesList = reduce(lambda L1,L2: L1+L2,seriesLists) step = reduce(lcm,[s.step for s in seriesList]) for s in seriesList: s.consolidate( step / s.step ) start = min([s.start for s in seriesList]) end = max([s.end for s in seriesList]) end -= (end - start) % step return (seriesList,start,end,step) def formatPathExpressions(seriesList): # remove duplicates pathExpressions = [] [pathExpressions.append(s.pathExpression) for s in seriesList if not pathExpressions.count(s.pathExpression)] return ','.join(pathExpressions) # Series Functions #NOTE: Some of the functions below use izip, which may be problematic. #izip stops when it hits the end of the shortest series #in practice this *shouldn't* matter because all series will cover #the same interval, despite having possibly different steps... def sumSeries(requestContext, *seriesLists): """ Short form: sum() This will add metrics together and return the sum at each datapoint. (See integral for a sum over time) Example: .. code-block:: none &target=sum(company.server.application*.requestsHandled) This would show the sum of all requests handled per minute (provided requestsHandled are collected once a minute). If metrics with different retention rates are combined, the coarsest metric is graphed, and the sum of the other metrics is averaged for the metrics with finer retention rates. """ try: (seriesList,start,end,step) = normalize(seriesLists) except: return [] name = "sumSeries(%s)" % formatPathExpressions(seriesList) values = ( safeSum(row) for row in izip(*seriesList) ) series = TimeSeries(name,start,end,step,values) series.pathExpression = name return [series] def sumSeriesWithWildcards(requestContext, seriesList, *position): #XXX """ Call sumSeries after inserting wildcards at the given position(s). Example: .. code-block:: none &target=sumSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1) This would be the equivalent of ``target=sumSeries(host.*.cpu-user.value)&target=sumSeries(host.*.cpu-system.value)`` """ if isinstance(position, int): positions = [position] else: positions = position newSeries = {} newNames = list() for series in seriesList: newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.'))))) if newname in newSeries.keys(): newSeries[newname] = sumSeries(requestContext, (series, newSeries[newname]))[0] else: newSeries[newname] = series newNames.append(newname) newSeries[newname].name = newname return [newSeries[name] for name in newNames] def averageSeriesWithWildcards(requestContext, seriesList, *position): #XXX """ Call averageSeries after inserting wildcards at the given position(s). Example: .. code-block:: none &target=averageSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1) This would be the equivalent of ``target=averageSeries(host.*.cpu-user.value)&target=averageSeries(host.*.cpu-system.value)`` """ if isinstance(position, int): positions = [position] else: positions = position result = [] matchedList = {} for series in seriesList: newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.'))))) if newname not in matchedList: matchedList[newname] = [] matchedList[newname].append(series) for name in matchedList.keys(): result.append( averageSeries(requestContext, (matchedList[name]))[0] ) result[-1].name = name return result def diffSeries(requestContext, *seriesLists): """ Can take two or more metrics, or a single metric and a constant. Subtracts parameters 2 through n from parameter 1. Example: .. code-block:: none &target=diffSeries(service.connections.total,service.connections.failed) &target=diffSeries(service.connections.total,5) """ (seriesList,start,end,step) = normalize(seriesLists) name = "diffSeries(%s)" % formatPathExpressions(seriesList) values = ( safeDiff(row) for row in izip(*seriesList) ) series = TimeSeries(name,start,end,step,values) series.pathExpression = name return [series] def averageSeries(requestContext, *seriesLists): """ Short Alias: avg() Takes one metric or a wildcard seriesList. Draws the average value of all metrics passed at each time. Example: .. code-block:: none &target=averageSeries(company.server.*.threads.busy) """ (seriesList,start,end,step) = normalize(seriesLists) name = "averageSeries(%s)" % formatPathExpressions(seriesList) values = ( safeDiv(safeSum(row),safeLen(row)) for row in izip(*seriesList) ) series = TimeSeries(name,start,end,step,values) series.pathExpression = name return [series] def stddevSeries(requestContext, *seriesLists): """ Takes one metric or a wildcard seriesList. Draws the standard deviation of all metrics passed at each time. Example: .. code-block:: none &target=stddevSeries(company.server.*.threads.busy) """ (seriesList,start,end,step) = normalize(seriesLists) name = "stddevSeries(%s)" % formatPathExpressions(seriesList) values = ( safeStdDev(row) for row in izip(*seriesList) ) series = TimeSeries(name,start,end,step,values) series.pathExpression = name return [series] def minSeries(requestContext, *seriesLists): """ Takes one metric or a wildcard seriesList. For each datapoint from each metric passed in, pick the minimum value and graph it. Example: .. code-block:: none &target=minSeries(Server*.connections.total) """ (seriesList, start, end, step) = normalize(seriesLists) name = "minSeries(%s)" % formatPathExpressions(seriesList) values = ( safeMin(row) for row in izip(*seriesList) ) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series] def maxSeries(requestContext, *seriesLists): """ Takes one metric or a wildcard seriesList. For each datapoint from each metric passed in, pick the maximum value and graph it. Example: .. code-block:: none &target=maxSeries(Server*.connections.total) """ (seriesList, start, end, step) = normalize(seriesLists) name = "maxSeries(%s)" % formatPathExpressions(seriesList) values = ( safeMax(row) for row in izip(*seriesList) ) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series] def rangeOfSeries(requestContext, *seriesLists): """ Takes a wildcard seriesList. Distills down a set of inputs into the range of the series Example: .. code-block:: none &target=rangeOfSeries(Server*.connections.total) """ (seriesList,start,end,step) = normalize(seriesLists) name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList) values = ( safeSubtract(max(row), min(row)) for row in izip(*seriesList) ) series = TimeSeries(name,start,end,step,values) series.pathExpression = name return [series] def percentileOfSeries(requestContext, seriesList, n, interpolate=False): """ percentileOfSeries returns a single series which is composed of the n-percentile values taken across a wildcard series at each point. Unless `interpolate` is set to True, percentile values are actual values contained in one of the supplied series. """ if n <= 0: raise ValueError('The requested percent is required to be greater than 0') name = 'percentilesOfSeries(%s,%g)' % (seriesList[0].pathExpression, n) (start, end, step) = normalize([seriesList])[1:] values = [ _getPercentile(row, n, interpolate) for row in izip(*seriesList) ] resultSeries = TimeSeries(name, start, end, step, values) resultSeries.pathExpression = name return [resultSeries] def keepLastValue(requestContext, seriesList, limit = INF): """ Takes one metric or a wildcard seriesList, and optionally a limit to the number of 'None' values to skip over. Continues the line with the last received value when gaps ('None' values) appear in your data, rather than breaking your line. Example: .. code-block:: none &target=keepLastValue(Server01.connections.handled) &target=keepLastValue(Server01.connections.handled, 10) """ for series in seriesList: series.name = "keepLastValue(%s)" % (series.name) series.pathExpression = series.name consecutiveNones = 0 for i,value in enumerate(series): series[i] = value # No 'keeping' can be done on the first value because we have no idea # what came before it. if i == 0: continue if value is None: consecutiveNones += 1 else: if 0 < consecutiveNones <= limit: # If a non-None value is seen before the limit of Nones is hit, # backfill all the missing datapoints with the last known value. for index in xrange(i - consecutiveNones, i): series[index] = series[i - consecutiveNones - 1] consecutiveNones = 0 # If the series ends with some None values, try to backfill a bit to cover it. if 0 < consecutiveNones < limit: for index in xrange(len(series) - consecutiveNones, len(series)): series[index] = series[len(series) - consecutiveNones - 1] return seriesList def asPercent(requestContext, seriesList, total=None): """ Calculates a percentage of the total of a wildcard series. If `total` is specified, each series will be calculated as a percentage of that total. If `total` is not specified, the sum of all points in the wildcard series will be used instead. The `total` parameter may be a single series or a numeric value. Example: .. code-block:: none &target=asPercent(Server01.connections.{failed,succeeded}, Server01.connections.attempted) &target=asPercent(apache01.threads.busy,1500) &target=asPercent(Server01.cpu.*.jiffies) """ normalize([seriesList]) if total is None: totalValues = [ safeSum(row) for row in izip(*seriesList) ] totalText = None # series.pathExpression elif isinstance(total, list): if len(total) != 1: raise ValueError("asPercent second argument must reference exactly 1 series") normalize([seriesList, total]) totalValues = total[0] totalText = totalValues.name else: totalValues = [total] * len(seriesList[0]) totalText = str(total) resultList = [] for series in seriesList: resultValues = [ safeMul(safeDiv(val, totalVal), 100.0) for val,totalVal in izip(series,totalValues) ] name = "asPercent(%s, %s)" % (series.name, totalText or series.pathExpression) resultSeries = TimeSeries(name,series.start,series.end,series.step,resultValues) resultSeries.pathExpression = name resultList.append(resultSeries) return resultList def divideSeries(requestContext, dividendSeriesList, divisorSeriesList): """ Takes a dividend metric and a divisor metric and draws the division result. A constant may *not* be passed. To divide by a constant, use the scale() function (which is essentially a multiplication operation) and use the inverse of the dividend. (Division by 8 = multiplication by 1/8 or 0.125) Example: .. code-block:: none &target=divideSeries(Series.dividends,Series.divisors) """ if len(divisorSeriesList) != 1: raise ValueError("divideSeries second argument must reference exactly 1 series") divisorSeries = divisorSeriesList[0] results = [] for dividendSeries in dividendSeriesList: name = "divideSeries(%s,%s)" % (dividendSeries.name, divisorSeries.name) bothSeries = (dividendSeries, divisorSeries) step = reduce(lcm,[s.step for s in bothSeries]) for s in bothSeries: s.consolidate( step / s.step ) start = min([s.start for s in bothSeries]) end = max([s.end for s in bothSeries]) end -= (end - start) % step values = ( safeDiv(v1,v2) for v1,v2 in izip(*bothSeries) ) quotientSeries = TimeSeries(name, start, end, step, values) quotientSeries.pathExpression = name results.append(quotientSeries) return results def multiplySeries(requestContext, *seriesLists): """ Takes two or more series and multiplies their points. A constant may not be used. To multiply by a constant, use the scale() function. Example: .. code-block:: none &target=multiplySeries(Series.dividends,Series.divisors) """ (seriesList,start,end,step) = normalize(seriesLists) if len(seriesList) == 1: return seriesList name = "multiplySeries(%s)" % ','.join([s.name for s in seriesList]) product = imap(lambda x: safeMul(*x), izip(*seriesList)) resultSeries = TimeSeries(name, start, end, step, product) resultSeries.pathExpression = name return [ resultSeries ] def weightedAverage(requestContext, seriesListAvg, seriesListWeight, node): """ Takes a series of average values and a series of weights and produces a weighted average for all values. The corresponding values should share a node as defined by the node parameter, 0-indexed. Example: .. code-block:: none &target=weightedAverage(*.transactions.mean,*.transactions.count,0) """ sortedSeries={} for seriesAvg, seriesWeight in izip(seriesListAvg , seriesListWeight): key = seriesAvg.name.split(".")[node] if key not in sortedSeries: sortedSeries[key]={} sortedSeries[key]['avg']=seriesAvg key = seriesWeight.name.split(".")[node] if key not in sortedSeries: sortedSeries[key]={} sortedSeries[key]['weight']=seriesWeight productList = [] for key in sortedSeries.keys(): if 'weight' not in sortedSeries[key]: continue if 'avg' not in sortedSeries[key]: continue seriesWeight = sortedSeries[key]['weight'] seriesAvg = sortedSeries[key]['avg'] productValues = [ safeMul(val1, val2) for val1,val2 in izip(seriesAvg,seriesWeight) ] name='product(%s,%s)' % (seriesWeight.name, seriesAvg.name) productSeries = TimeSeries(name,seriesAvg.start,seriesAvg.end,seriesAvg.step,productValues) productSeries.pathExpression=name productList.append(productSeries) sumProducts=sumSeries(requestContext, productList)[0] sumWeights=sumSeries(requestContext, seriesListWeight)[0] resultValues = [ safeDiv(val1, val2) for val1,val2 in izip(sumProducts,sumWeights) ] name = "weightedAverage(%s, %s)" % (','.join(set(s.pathExpression for s in seriesListAvg)) ,','.join(set(s.pathExpression for s in seriesListWeight))) resultSeries = TimeSeries(name,sumProducts.start,sumProducts.end,sumProducts.step,resultValues) resultSeries.pathExpression = name return resultSeries def movingMedian(requestContext, seriesList, windowSize): """ Graphs the moving median of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the median of the preceeding datapoints for each point on the graph. All previous datapoints are set to None at the beginning of the graph. Example: .. code-block:: none &target=movingMedian(Server.instance01.threads.busy,10) &target=movingMedian(Server.instance*.threads.idle,'5min') """ windowInterval = None if isinstance(windowSize, basestring): delta = parseTimeOffset(windowSize) windowInterval = abs(delta.seconds + (delta.days * 86400)) if windowInterval: bootstrapSeconds = windowInterval else: bootstrapSeconds = max([s.step for s in seriesList]) * int(windowSize) bootstrapList = _fetchWithBootstrap(requestContext, seriesList, seconds=bootstrapSeconds) result = [] for bootstrap, series in zip(bootstrapList, seriesList): if windowInterval: windowPoints = windowInterval / series.step else: windowPoints = int(windowSize) if isinstance(windowSize, basestring): newName = 'movingMedian(%s,"%s")' % (series.name, windowSize) else: newName = "movingMedian(%s,%d)" % (series.name, windowPoints) newSeries = TimeSeries(newName, series.start, series.end, series.step, []) newSeries.pathExpression = newName offset = len(bootstrap) - len(series) for i in range(len(series)): window = bootstrap[i + offset - windowPoints:i + offset] nonNull = [v for v in window if v is not None] if nonNull: m_index = len(nonNull) / 2 newSeries.append(sorted(nonNull)[m_index]) else: newSeries.append(None) result.append(newSeries) return result def scale(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and multiplies the datapoint by the constant provided at each point. Example: .. code-block:: none &target=scale(Server.instance01.threads.busy,10) &target=scale(Server.instance*.threads.busy,10) """ for series in seriesList: series.name = "scale(%s,%g)" % (series.name,float(factor)) series.pathExpression = series.name for i,value in enumerate(series): series[i] = safeMul(value,factor) return seriesList def invert(requestContext, seriesList): """ Takes one metric or a wildcard seriesList, and inverts each datapoint (i.e. 1/x). Example: .. code-block:: none &target=invert(Server.instance01.threads.busy) """ for series in seriesList: series.name = "invert(%s)" % (series.name) for i,value in enumerate(series): series[i] = safeDiv(1,value) return seriesList def scaleToSeconds(requestContext, seriesList, seconds): """ Takes one metric or a wildcard seriesList and returns "value per seconds" where seconds is a last argument to this functions. Useful in conjunction with derivative or integral function if you want to normalize its result to a known resolution for arbitrary retentions """ for series in seriesList: series.name = "scaleToSeconds(%s,%d)" % (series.name,seconds) series.pathExpression = series.name for i,value in enumerate(series): factor = seconds * 1.0 / series.step series[i] = safeMul(value,factor) return seriesList def absolute(requestContext, seriesList): """ Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example: .. code-block:: none &target=absolute(Server.instance01.threads.busy) &target=absolute(Server.instance*.threads.busy) """ for series in seriesList: series.name = "absolute(%s)" % (series.name) series.pathExpression = series.name for i,value in enumerate(series): series[i] = safeAbs(value) return seriesList def offset(requestContext, seriesList, factor): """ Takes one metric, a wildcard seriesList followed by a constant or single time serie, and adds the value to each datapoint. Example: .. code-block:: none &target=offset(Server.instance01.threads.busy,10) &target=scale(offset(Server.instance01.threads.*.last_change, scale(Server.instance01.uptime, -1)),-1) """ for series in seriesList: if isinstance(factor, list): if len(factor) != 1: raise ValueError("offset second argument must reference exactly 1 series") factor_serie = factor[0] series.name = "offset(%s,%s)" % (series.name,factor_serie.name) series.pathExpression = series.name for i,value in enumerate(series): if value is not None: series[i] = value + factor_serie[i] else: series.name = "offset(%s,%g)" % (series.name,float(factor)) series.pathExpression = series.name for i,value in enumerate(series): if value is not None: series[i] = value + factor return seriesList def offsetToZero(requestContext, seriesList): """ Offsets a metric or wildcard seriesList by subtracting the minimum value in the series from each datapoint. Useful to compare different series where the values in each series may be higher or lower on average but you're only interested in the relative difference. An example use case is for comparing different round trip time results. When measuring RTT (like pinging a server), different devices may come back with consistently different results due to network latency which will be different depending on how many network hops between the probe and the device. To compare different devices in the same graph, the network latency to each has to be factored out of the results. This is a shortcut that takes the fastest response (lowest number in the series) and sets that to zero and then offsets all of the other datapoints in that series by that amount. This makes the assumption that the lowest response is the fastest the device can respond, of course the more datapoints that are in the series the more accurate this assumption is. Example: .. code-block:: none &target=offsetToZero(Server.instance01.responseTime) &target=offsetToZero(Server.instance*.responseTime) """ for series in seriesList: series.name = "offsetToZero(%s)" % (series.name) minimum = safeMin(series) for i,value in enumerate(series): if value is not None: series[i] = value - minimum return seriesList def movingAverage(requestContext, seriesList, windowSize): """ Graphs the moving average of a metric (or metrics) over a fixed number of past points, or a time interval. Takes one metric or a wildcard seriesList followed by a number N of datapoints or a quoted string with a length of time like '1hour' or '5min' (See ``from / until`` in the render\_api_ for examples of time formats). Graphs the average of the preceeding datapoints for each point on the graph. All previous datapoints are set to None at the beginning of the graph. Example: .. code-block:: none &target=movingAverage(Server.instance01.threads.busy,10) &target=movingAverage(Server.instance*.threads.idle,'5min') """ windowInterval = None if isinstance(windowSize, basestring): delta = parseTimeOffset(windowSize) windowInterval = abs(delta.seconds + (delta.days * 86400)) if windowInterval: bootstrapSeconds = windowInterval else: bootstrapSeconds = max([s.step for s in seriesList]) * int(windowSize) bootstrapList = _fetchWithBootstrap(requestContext, seriesList, seconds=bootstrapSeconds) result = [] for bootstrap, series in zip(bootstrapList, seriesList): if windowInterval: windowPoints = windowInterval / series.step else: windowPoints = int(windowSize) if isinstance(windowSize, basestring): newName = 'movingAverage(%s,"%s")' % (series.name, windowSize) else: newName = "movingAverage(%s,%s)" % (series.name, windowSize) newSeries = TimeSeries(newName, series.start, series.end, series.step, []) newSeries.pathExpression = newName offset = len(bootstrap) - len(series) for i in range(len(series)): window = bootstrap[i + offset - windowPoints:i + offset] newSeries.append(safeAvg(window)) result.append(newSeries) return result def cumulative(requestContext, seriesList, consolidationFunc='sum'): """ Takes one metric or a wildcard seriesList, and an optional function. Valid functions are 'sum', 'average', 'min', and 'max' Sets the consolidation function to 'sum' for the given metric seriesList. Alias for :func:`consolidateBy(series, 'sum') <graphite.render.functions.consolidateBy>` .. code-block:: none &target=cumulative(Sales.widgets.largeBlue) """ return consolidateBy(requestContext, seriesList, 'sum') def consolidateBy(requestContext, seriesList, consolidationFunc): """ Takes one metric or a wildcard seriesList and a consolidation function name. Valid function names are 'sum', 'average', 'min', and 'max' When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to to prevent line overlap. The consolidateBy() function changes the consolidation function from the default of 'average' to one of 'sum', 'max', or 'min'. This is especially useful in sales graphs, where fractional values make no sense and a 'sum' of consolidated values is appropriate. .. code-block:: none &target=consolidateBy(Sales.widgets.largeBlue, 'sum') &target=consolidateBy(Servers.web01.sda1.free_space, 'max') """ for series in seriesList: # datalib will throw an exception, so it's not necessary to validate here series.consolidationFunc = consolidationFunc series.name = 'consolidateBy(%s,"%s")' % (series.name, series.consolidationFunc) series.pathExpression = series.name return seriesList def derivative(requestContext, seriesList): """ This is the opposite of the integral function. This is useful for taking a running total metric and calculating the delta between subsequent data points. This function does not normalize for periods of time, as a true derivative would. Instead see the perSecond() function to calculate a rate of change over time. Example: .. code-block:: none &target=derivative(company.server.application01.ifconfig.TXPackets) Each time you run ifconfig, the RX and TXPackets are higher (assuming there is network traffic.) By applying the derivative function, you can get an idea of the packets per minute sent or received, even though you're only recording the total. """ results = [] for series in seriesList: newValues = [] prev = None for val in series: if None in (prev,val): newValues.append(None) prev = val continue newValues.append(val - prev) prev = val newName = "derivative(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def perSecond(requestContext, seriesList, maxValue=None): """ Derivative adjusted for the series time interval This is useful for taking a running total metric and showing how many requests per second were handled. Example: .. code-block:: none &target=perSecond(company.server.application01.ifconfig.TXPackets) Each time you run ifconfig, the RX and TXPackets are higher (assuming there is network traffic.) By applying the derivative function, you can get an idea of the packets per minute sent or received, even though you're only recording the total. """ results = [] for series in seriesList: newValues = [] prev = None for val in series: step = series.step if None in (prev,val): newValues.append(None) prev = val continue diff = val - prev if diff >= 0: newValues.append(diff / step) elif maxValue is not None and maxValue >= val: newValues.append( ((maxValue - prev) + val + 1) / step ) else: newValues.append(None) prev = val newName = "perSecond(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def integral(requestContext, seriesList): """ This will show the sum over time, sort of like a continuous addition function. Useful for finding totals or trends in metrics that are collected per minute. Example: .. code-block:: none &target=integral(company.sales.perMinute) This would start at zero on the left side of the graph, adding the sales each minute, and show the total sales for the time period selected at the right side, (time now, or the time specified by '&until='). """ results = [] for series in seriesList: newValues = [] current = 0.0 for val in series: if val is None: newValues.append(None) else: current += val newValues.append(current) newName = "integral(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def nonNegativeDerivative(requestContext, seriesList, maxValue=None): """ Same as the derivative function above, but ignores datapoints that trend down. Useful for counters that increase for a long time, then wrap or reset. (Such as if a network interface is destroyed and recreated by unloading and re-loading a kernel module, common with USB / WiFi cards. Example: .. code-block:: none &target=nonNegativederivative(company.server.application01.ifconfig.TXPackets) """ results = [] for series in seriesList: newValues = [] prev = None for val in series: if None in (prev, val): newValues.append(None) prev = val continue diff = val - prev if diff >= 0: newValues.append(diff) elif maxValue is not None and maxValue >= val: newValues.append( (maxValue - prev) + val + 1 ) else: newValues.append(None) prev = val newName = "nonNegativeDerivative(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def stacked(requestContext,seriesLists,stackName='__DEFAULT__'): """ Takes one metric or a wildcard seriesList and change them so they are stacked. This is a way of stacking just a couple of metrics without having to use the stacked area mode (that stacks everything). By means of this a mixed stacked and non stacked graph can be made It can also take an optional argument with a name of the stack, in case there is more than one, e.g. for input and output metrics. Example: .. code-block:: none &target=stacked(company.server.application01.ifconfig.TXPackets, 'tx') """ if 'totalStack' in requestContext: totalStack = requestContext['totalStack'].get(stackName, []) else: requestContext['totalStack'] = {} totalStack = []; results = [] for series in seriesLists: newValues = [] for i in range(len(series)): if len(totalStack) <= i: totalStack.append(0) if series[i] is not None: totalStack[i] += series[i] newValues.append(totalStack[i]) else: newValues.append(None) # Work-around for the case when legend is set if stackName=='__DEFAULT__': newName = "stacked(%s)" % series.name else: newName = series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.options['stacked'] = True newSeries.pathExpression = newName results.append(newSeries) requestContext['totalStack'][stackName] = totalStack return results def areaBetween(requestContext, seriesList): """ Draws the area in between the two series in seriesList """ assert len(seriesList) == 2, "areaBetween series argument must reference *exactly* 2 series" lower = seriesList[0] upper = seriesList[1] lower.options['stacked'] = True lower.options['invisible'] = True upper.options['stacked'] = True lower.name = upper.name = "areaBetween(%s)" % upper.pathExpression return seriesList def aliasSub(requestContext, seriesList, search, replace): """ Runs series names through a regex search/replace. .. code-block:: none &target=aliasSub(ip.*TCP*,"^.*TCP(\d+)","\\1") """ try: seriesList.name = re.sub(search, replace, seriesList.name) except AttributeError: for series in seriesList: series.name = re.sub(search, replace, series.name) return seriesList def alias(requestContext, seriesList, newName): """ Takes one metric or a wildcard seriesList and a string in quotes. Prints the string instead of the metric name in the legend. .. code-block:: none &target=alias(Sales.widgets.largeBlue,"Large Blue Widgets") """ try: seriesList.name = newName except AttributeError: for series in seriesList: series.name = newName return seriesList def cactiStyle(requestContext, seriesList, system=None): """ Takes a series list and modifies the aliases to provide column aligned output with Current, Max, and Min values in the style of cacti. Optonally takes a "system" value to apply unit formatting in the same style as the Y-axis. NOTE: column alignment only works with monospace fonts such as terminus. .. code-block:: none &target=cactiStyle(ganglia.*.net.bytes_out,"si") """ if 0 == len(seriesList): return seriesList if system: fmt = lambda x:"%.2f%s" % format_units(x,system=system) else: fmt = lambda x:"%.2f"%x nameLen = max([0] + [len(getattr(series,"name")) for series in seriesList]) lastLen = max([0] + [len(fmt(int(safeLast(series) or 3))) for series in seriesList]) + 3 maxLen = max([0] + [len(fmt(int(safeMax(series) or 3))) for series in seriesList]) + 3 minLen = max([0] + [len(fmt(int(safeMin(series) or 3))) for series in seriesList]) + 3 for series in seriesList: name = series.name last = safeLast(series) maximum = safeMax(series) minimum = safeMin(series) if last is None: last = NAN else: last = fmt(float(last)) if maximum is None: maximum = NAN else: maximum = fmt(float(maximum)) if minimum is None: minimum = NAN else: minimum = fmt(float(minimum)) series.name = "%*s Current:%*s Max:%*s Min:%*s " % \ (-nameLen, series.name, -lastLen, last, -maxLen, maximum, -minLen, minimum) return seriesList def aliasByNode(requestContext, seriesList, *nodes): """ Takes a seriesList and applies an alias derived from one or more "node" portion/s of the target name. Node indices are 0 indexed. .. code-block:: none &target=aliasByNode(ganglia.*.cpu.load5,1) """ if isinstance(nodes, int): nodes=[nodes] for series in seriesList: metric_pieces = re.search('(?:.*\()?(?P<name>[-\w*\.]+)(?:,|\)?.*)?',series.name).groups()[0].split('.') series.name = '.'.join(metric_pieces[n] for n in nodes) return seriesList def aliasByMetric(requestContext, seriesList): """ Takes a seriesList and applies an alias derived from the base metric name. .. code-block:: none &target=aliasByMetric(carbon.agents.graphite.creates) """ for series in seriesList: series.name = series.name.split('.')[-1] return seriesList def legendValue(requestContext, seriesList, *valueTypes): """ Takes one metric or a wildcard seriesList and a string in quotes. Appends a value to the metric name in the legend. Currently one or several of: `last`, `avg`, `total`, `min`, `max`. The last argument can be `si` (default) or `binary`, in that case values will be formatted in the corresponding system. .. code-block:: none &target=legendValue(Sales.widgets.largeBlue, 'avg', 'max', 'si') """ def last(s): "Work-around for the missing last point" v = s[-1] if v is None: return s[-2] return v valueFuncs = { 'avg': lambda s: safeDiv(safeSum(s), safeLen(s)), 'total': safeSum, 'min': safeMin, 'max': safeMax, 'last': last } system = None if valueTypes[-1] in ('si', 'binary'): system = valueTypes[-1] valueTypes = valueTypes[:-1] for valueType in valueTypes: valueFunc = valueFuncs.get(valueType, lambda s: '(?)') if system is None: for series in seriesList: series.name += " (%s: %s)" % (valueType, valueFunc(series)) else: for series in seriesList: value = valueFunc(series) formatted = None if value is not None: formatted = "%.2f%s" % format_units(abs(value), system=system) series.name = "%-20s%-5s%-10s" % (series.name, valueType, formatted) return seriesList def alpha(requestContext, seriesList, alpha): """ Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1. """ for series in seriesList: series.options['alpha'] = alpha return seriesList def color(requestContext, seriesList, theColor): """ Assigns the given color to the seriesList Example: .. code-block:: none &target=color(collectd.hostname.cpu.0.user, 'green') &target=color(collectd.hostname.cpu.0.system, 'ff0000') &target=color(collectd.hostname.cpu.0.idle, 'gray') &target=color(collectd.hostname.cpu.0.idle, '6464ffaa') """ for series in seriesList: series.color = theColor return seriesList def substr(requestContext, seriesList, start=0, stop=0): """ Takes one metric or a wildcard seriesList followed by 1 or 2 integers. Assume that the metric name is a list or array, with each element separated by dots. Prints n - length elements of the array (if only one integer n is passed) or n - m elements of the array (if two integers n and m are passed). The list starts with element 0 and ends with element (length - 1). Example: .. code-block:: none &target=substr(carbon.agents.hostname.avgUpdateTime,2,4) The label would be printed as "hostname.avgUpdateTime". """ for series in seriesList: left = series.name.rfind('(') + 1 right = series.name.find(')') if right < 0: right = len(series.name)+1 cleanName = series.name[left:right:] if int(stop) == 0: series.name = '.'.join(cleanName.split('.')[int(start)::]) else: series.name = '.'.join(cleanName.split('.')[int(start):int(stop):]) # substr(func(a.b,'c'),1) becomes b instead of b,'c' series.name = re.sub(',.*$', '', series.name) return seriesList def logarithm(requestContext, seriesList, base=10): """ Takes one metric or a wildcard seriesList, a base, and draws the y-axis in logarithmic format. If base is omitted, the function defaults to base 10. Example: .. code-block:: none &target=log(carbon.agents.hostname.avgUpdateTime,2) """ results = [] for series in seriesList: newValues = [] for val in series: if val is None: newValues.append(None) elif val <= 0: newValues.append(None) else: newValues.append(math.log(val, base)) newName = "log(%s, %s)" % (series.name, base) newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def maximumAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a maximum value above n. Example: .. code-block:: none &target=maximumAbove(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent more than 1000 packets/min. """ results = [] for series in seriesList: if max(series) > n: results.append(series) return results def minimumAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value above n. Example: .. code-block:: none &target=minimumAbove(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent more than 1000 packets/min. """ results = [] for series in seriesList: if min(series) > n: results.append(series) return results def maximumBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a maximum value below n. Example: .. code-block:: none &target=maximumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent less than 1000 packets/min. """ result = [] for series in seriesList: if max(series) <= n: result.append(series) return result def highestCurrent(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the N metrics with the highest value at the end of the time period specified. Example: .. code-block:: none &target=highestCurrent(server*.instance*.threads.busy,5) Draws the 5 servers with the highest busy threads. """ return sorted( seriesList, key=safeLast )[-n:] def highestMax(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the N metrics with the highest maximum value in the time period specified. Example: .. code-block:: none &target=highestMax(server*.instance*.threads.busy,5) Draws the top 5 servers who have had the most busy threads during the time period specified. """ result_list = sorted( seriesList, key=lambda s: max(s) )[-n:] return sorted(result_list, key=lambda s: max(s), reverse=True) def lowestCurrent(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the N metrics with the lowest value at the end of the time period specified. Example: .. code-block:: none &target=lowestCurrent(server*.instance*.threads.busy,5) Draws the 5 servers with the least busy threads right now. """ return sorted( seriesList, key=safeLast )[:n] def currentAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics whose value is above N at the end of the time period specified. Example: .. code-block:: none &target=currentAbove(server*.instance*.threads.busy,50) Draws the servers with more than 50 busy threads. """ return [ series for series in seriesList if safeLast(series) >= n ] def currentBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics whose value is below N at the end of the time period specified. Example: .. code-block:: none &target=currentBelow(server*.instance*.threads.busy,3) Draws the servers with less than 3 busy threads. """ return [ series for series in seriesList if safeLast(series) <= n ] def highestAverage(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the top N metrics with the highest average value for the time period specified. Example: .. code-block:: none &target=highestAverage(server*.instance*.threads.busy,5) Draws the top 5 servers with the highest average value. """ return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[-n:] def lowestAverage(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the bottom N metrics with the lowest average value for the time period specified. Example: .. code-block:: none &target=lowestAverage(server*.instance*.threads.busy,5) Draws the bottom 5 servers with the lowest average value. """ return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[:n] def averageAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics with an average value above N for the time period specified. Example: .. code-block:: none &target=averageAbove(server*.instance*.threads.busy,25) Draws the servers with average values above 25. """ return [ series for series in seriesList if safeDiv(safeSum(series),safeLen(series)) >= n ] def averageBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics with an average value below N for the time period specified. Example: .. code-block:: none &target=averageBelow(server*.instance*.threads.busy,25) Draws the servers with average values below 25. """ return [ series for series in seriesList if safeDiv(safeSum(series),safeLen(series)) <= n ] def _getPercentile(points, n, interpolate=False): """ Percentile is calculated using the method outlined in the NIST Engineering Statistics Handbook: http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm """ sortedPoints = sorted([ p for p in points if p is not None]) if len(sortedPoints) == 0: return None fractionalRank = (n/100.0) * (len(sortedPoints) + 1) rank = int(fractionalRank) rankFraction = fractionalRank - rank if not interpolate: rank += int(math.ceil(rankFraction)) if rank == 0: percentile = sortedPoints[0] elif rank - 1 == len(sortedPoints): percentile = sortedPoints[-1] else: percentile = sortedPoints[rank - 1] # Adjust for 0-index if interpolate: if rank != len(sortedPoints): # if a next value exists nextValue = sortedPoints[rank] percentile = percentile + rankFraction * (nextValue - percentile) return percentile def nPercentile(requestContext, seriesList, n): """Returns n-percent of each series in the seriesList.""" assert n, 'The requested percent is required to be greater than 0' results = [] for s in seriesList: # Create a sorted copy of the TimeSeries excluding None values in the values list. s_copy = TimeSeries( s.name, s.start, s.end, s.step, sorted( [item for item in s if item is not None] ) ) if not s_copy: continue # Skip this series because it is empty. perc_val = _getPercentile(s_copy, n) if perc_val is not None: name = 'nPercentile(%s, %g)' % (s_copy.name, n) point_count = int((s.end - s.start)/s.step) perc_series = TimeSeries(name, s_copy.start, s_copy.end, s_copy.step, [perc_val] * point_count ) perc_series.pathExpression = name results.append(perc_series) return results def averageOutsidePercentile(requestContext, seriesList, n): """ Removes functions lying inside an average percentile interval """ averages = [] for s in seriesList: averages.append(safeDiv(safeSum(s), safeLen(s))) if n < 50: n = 100 - n; lowPercentile = _getPercentile(averages, 100 - n) highPercentile = _getPercentile(averages, n) return [s for s in seriesList if not lowPercentile < safeDiv(safeSum(s), safeLen(s)) < highPercentile] def removeBetweenPercentile(requestContext, seriesList, n): """ Removes lines who do not have an value lying in the x-percentile of all the values at a moment """ if n < 50: n = 100 - n transposed = zip(*seriesList) lowPercentiles = [_getPercentile(col, 100-n) for col in transposed] highPercentiles = [_getPercentile(col, n) for col in transposed] return [l for l in seriesList if sum([not lowPercentiles[val_i] < val < highPercentiles[val_i] for (val_i, val) in enumerate(l)]) > 0] def removeAbovePercentile(requestContext, seriesList, n): """ Removes data above the nth percentile from the series or list of series provided. Values above this percentile are assigned a value of None. """ for s in seriesList: s.name = 'removeAbovePercentile(%s, %d)' % (s.name, n) s.pathExpression = s.name percentile = nPercentile(requestContext, [s], n)[0][0] for (index, val) in enumerate(s): if val > percentile: s[index] = None return seriesList def removeAboveValue(requestContext, seriesList, n): """ Removes data above the given threshold from the series or list of series provided. Values above this threshole are assigned a value of None """ for s in seriesList: s.name = 'removeAboveValue(%s, %d)' % (s.name, n) s.pathExpression = s.name for (index, val) in enumerate(s): if val > n: s[index] = None return seriesList def removeBelowPercentile(requestContext, seriesList, n): """ Removes data below the nth percentile from the series or list of series provided. Values below this percentile are assigned a value of None. """ for s in seriesList: s.name = 'removeBelowPercentile(%s, %d)' % (s.name, n) s.pathExpression = s.name percentile = nPercentile(requestContext, [s], n)[0][0] for (index, val) in enumerate(s): if val < percentile: s[index] = None return seriesList def removeBelowValue(requestContext, seriesList, n): """ Removes data below the given threshold from the series or list of series provided. Values below this threshole are assigned a value of None """ for s in seriesList: s.name = 'removeBelowValue(%s, %d)' % (s.name, n) s.pathExpression = s.name for (index, val) in enumerate(s): if val < n: s[index] = None return seriesList def limit(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Only draw the first N metrics. Useful when testing a wildcard in a metric. Example: .. code-block:: none &target=limit(server*.instance*.memory.free,5) Draws only the first 5 instance's memory free. """ return seriesList[0:n] def sortByName(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. Sorts the list of metrics by the metric name. """ def compare(x,y): return cmp(x.name, y.name) seriesList.sort(compare) return seriesList def sortByTotal(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. Sorts the list of metrics by the sum of values across the time period specified. """ def compare(x,y): return cmp(safeSum(y), safeSum(x)) seriesList.sort(compare) return seriesList def sortByMaxima(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. Sorts the list of metrics by the maximum value across the time period specified. Useful with the &areaMode=all parameter, to keep the lowest value lines visible. Example: .. code-block:: none &target=sortByMaxima(server*.instance*.memory.free) """ def compare(x,y): return cmp(max(y), max(x)) seriesList.sort(compare) return seriesList def sortByMinima(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. Sorts the list of metrics by the lowest value across the time period specified. Example: .. code-block:: none &target=sortByMinima(server*.instance*.memory.free) """ def compare(x,y): return cmp(min(x), min(y)) newSeries = [series for series in seriesList if max(series) > 0] newSeries.sort(compare) return newSeries def sortByName(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. Sorts the list of metrics by the name. """ def compare(x,y): return 1 if x.name > y.name else -1 seriesList.sort(compare) return seriesList def useSeriesAbove(requestContext, seriesList, value, search, replace): """ Compares the maximum of each series against the given `value`. If the series maximum is greater than `value`, the regular expression search and replace is applied against the series name to plot a related metric e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'), the response time metric will be plotted only when the maximum value of the corresponding request/s metric is > 10 .. code-block:: none &target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time") """ newSeries = [] for series in seriesList: newname = re.sub(search, replace, series.name) if max(series) > value: n = evaluateTarget(requestContext, newname) if n is not None and len(n) > 0: newSeries.append(n[0]) return newSeries def mostDeviant(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Draws the N most deviant metrics. To find the deviants, the standard deviation (sigma) of each series is taken and ranked. The top N standard deviations are returned. Example: .. code-block:: none &target=mostDeviant(5, server*.instance*.memory.free) Draws the 5 instances furthest from the average memory free. """ deviants = [] for series in seriesList: mean = safeDiv( safeSum(series), safeLen(series) ) if mean is None: continue square_sum = sum([ (value - mean) ** 2 for value in series if value is not None ]) sigma = safeDiv(square_sum, safeLen(series)) if sigma is None: continue deviants.append( (sigma, series) ) deviants.sort(key=lambda i: i[0], reverse=True) #sort by sigma return [ series for (sigma,series) in deviants ][:n] #return the n most deviant series def stdev(requestContext, seriesList, points, windowTolerance=0.1): """ Takes one metric or a wildcard seriesList followed by an integer N. Draw the Standard Deviation of all metrics passed for the past N datapoints. If the ratio of null points in the window is greater than windowTolerance, skip the calculation. The default for windowTolerance is 0.1 (up to 10% of points in the window can be missing). Note that if this is set to 0.0, it will cause large gaps in the output anywhere a single point is missing. Example: .. code-block:: none &target=stdev(server*.instance*.threads.busy,30) &target=stdev(server*.instance*.cpu.system,30,0.0) """ # For this we take the standard deviation in terms of the moving average # and the moving average of series squares. for (seriesIndex,series) in enumerate(seriesList): stddevSeries = TimeSeries("stddev(%s,%d)" % (series.name, int(points)), series.start, series.end, series.step, []) stddevSeries.pathExpression = "stddev(%s,%d)" % (series.name, int(points)) validPoints = 0 currentSum = 0 currentSumOfSquares = 0 for (index, newValue) in enumerate(series): # Mark whether we've reached our window size - dont drop points out otherwise if index < points: bootstrapping = True droppedValue = None else: bootstrapping = False droppedValue = series[index - points] # Track non-None points in window if not bootstrapping and droppedValue is not None: validPoints -= 1 if newValue is not None: validPoints += 1 # Remove the value that just dropped out of the window if not bootstrapping and droppedValue is not None: currentSum -= droppedValue currentSumOfSquares -= droppedValue**2 # Add in the value that just popped in the window if newValue is not None: currentSum += newValue currentSumOfSquares += newValue**2 if validPoints > 0 and \ float(validPoints)/points >= windowTolerance: try: deviation = math.sqrt(validPoints * currentSumOfSquares - currentSum**2)/validPoints except ValueError: deviation = None stddevSeries.append(deviation) else: stddevSeries.append(None) seriesList[seriesIndex] = stddevSeries return seriesList def secondYAxis(requestContext, seriesList): """ Graph the series on the secondary Y axis. """ for series in seriesList: series.options['secondYAxis'] = True series.name= 'secondYAxis(%s)' % series.name return seriesList def _fetchWithBootstrap(requestContext, seriesList, **delta_kwargs): 'Request the same data but with a bootstrap period at the beginning' bootstrapContext = requestContext.copy() bootstrapContext['startTime'] = requestContext['startTime'] - timedelta(**delta_kwargs) bootstrapContext['endTime'] = requestContext['startTime'] bootstrapList = [] for series in seriesList: if series.pathExpression in [ b.pathExpression for b in bootstrapList ]: # This pathExpression returns multiple series and we already fetched it continue bootstraps = evaluateTarget(bootstrapContext, series.pathExpression) bootstrapList.extend(bootstraps) newSeriesList = [] for bootstrap, original in zip(bootstrapList, seriesList): newValues = [] if bootstrap.step != original.step: ratio = bootstrap.step / original.step for value in bootstrap: #XXX For series with aggregationMethod = sum this should also # divide by the ratio to bring counts to the same time unit # ...but we have no way of knowing whether that's the case newValues.extend([ value ] * ratio) else: newValues.extend(bootstrap) newValues.extend(original) newSeries = TimeSeries(original.name, bootstrap.start, original.end, original.step, newValues) newSeries.pathExpression = series.pathExpression newSeriesList.append(newSeries) return newSeriesList def _trimBootstrap(bootstrap, original): 'Trim the bootstrap period off the front of this series so it matches the original' original_len = len(original) bootstrap_len = len(bootstrap) length_limit = (original_len * original.step) / bootstrap.step trim_start = bootstrap.end - (length_limit * bootstrap.step) trimmed = TimeSeries(bootstrap.name, trim_start, bootstrap.end, bootstrap.step, bootstrap[-length_limit:]) return trimmed def holtWintersIntercept(alpha,actual,last_season,last_intercept,last_slope): return alpha * (actual - last_season) \ + (1 - alpha) * (last_intercept + last_slope) def holtWintersSlope(beta,intercept,last_intercept,last_slope): return beta * (intercept - last_intercept) + (1 - beta) * last_slope def holtWintersSeasonal(gamma,actual,intercept,last_season): return gamma * (actual - intercept) + (1 - gamma) * last_season def holtWintersDeviation(gamma,actual,prediction,last_seasonal_dev): if prediction is None: prediction = 0 return gamma * math.fabs(actual - prediction) + (1 - gamma) * last_seasonal_dev def holtWintersAnalysis(series): alpha = gamma = 0.1 beta = 0.0035 # season is currently one day season_length = (24*60*60) / series.step intercept = 0 slope = 0 pred = 0 intercepts = list() slopes = list() seasonals = list() predictions = list() deviations = list() def getLastSeasonal(i): j = i - season_length if j >= 0: return seasonals[j] return 0 def getLastDeviation(i): j = i - season_length if j >= 0: return deviations[j] return 0 last_seasonal = 0 last_seasonal_dev = 0 next_last_seasonal = 0 next_pred = None for i,actual in enumerate(series): if actual is None: # missing input values break all the math # do the best we can and move on intercepts.append(None) slopes.append(0) seasonals.append(0) predictions.append(next_pred) deviations.append(0) next_pred = None continue if i == 0: last_intercept = actual last_slope = 0 # seed the first prediction as the first actual prediction = actual else: last_intercept = intercepts[-1] last_slope = slopes[-1] if last_intercept is None: last_intercept = actual prediction = next_pred last_seasonal = getLastSeasonal(i) next_last_seasonal = getLastSeasonal(i+1) last_seasonal_dev = getLastDeviation(i) intercept = holtWintersIntercept(alpha,actual,last_seasonal ,last_intercept,last_slope) slope = holtWintersSlope(beta,intercept,last_intercept,last_slope) seasonal = holtWintersSeasonal(gamma,actual,intercept,last_seasonal) next_pred = intercept + slope + next_last_seasonal deviation = holtWintersDeviation(gamma,actual,prediction,last_seasonal_dev) intercepts.append(intercept) slopes.append(slope) seasonals.append(seasonal) predictions.append(prediction) deviations.append(deviation) # make the new forecast series forecastName = "holtWintersForecast(%s)" % series.name forecastSeries = TimeSeries(forecastName, series.start, series.end , series.step, predictions) forecastSeries.pathExpression = forecastName # make the new deviation series deviationName = "holtWintersDeviation(%s)" % series.name deviationSeries = TimeSeries(deviationName, series.start, series.end , series.step, deviations) deviationSeries.pathExpression = deviationName results = { 'predictions': forecastSeries , 'deviations': deviationSeries , 'intercepts': intercepts , 'slopes': slopes , 'seasonals': seasonals } return results def holtWintersForecast(requestContext, seriesList): """ Performs a Holt-Winters forecast using the series as input data. Data from one week previous to the series is used to bootstrap the initial forecast. """ results = [] bootstrapList = _fetchWithBootstrap(requestContext, seriesList, days=7) for bootstrap, series in zip(bootstrapList, seriesList): analysis = holtWintersAnalysis(bootstrap) results.append(_trimBootstrap(analysis['predictions'], series)) return results def holtWintersConfidenceBands(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots upper and lower bands with the predicted forecast deviations. """ results = [] bootstrapList = _fetchWithBootstrap(requestContext, seriesList, days=7) for bootstrap,series in zip(bootstrapList, seriesList): analysis = holtWintersAnalysis(bootstrap) forecast = _trimBootstrap(analysis['predictions'], series) deviation = _trimBootstrap(analysis['deviations'], series) seriesLength = len(forecast) i = 0 upperBand = list() lowerBand = list() while i < seriesLength: forecast_item = forecast[i] deviation_item = deviation[i] i = i + 1 if forecast_item is None or deviation_item is None: upperBand.append(None) lowerBand.append(None) else: scaled_deviation = delta * deviation_item upperBand.append(forecast_item + scaled_deviation) lowerBand.append(forecast_item - scaled_deviation) upperName = "holtWintersConfidenceUpper(%s)" % series.name lowerName = "holtWintersConfidenceLower(%s)" % series.name upperSeries = TimeSeries(upperName, forecast.start, forecast.end , forecast.step, upperBand) lowerSeries = TimeSeries(lowerName, forecast.start, forecast.end , forecast.step, lowerBand) upperSeries.pathExpression = series.pathExpression lowerSeries.pathExpression = series.pathExpression results.append(lowerSeries) results.append(upperSeries) return results def holtWintersAberration(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the positive or negative deviation of the series data from the forecast. """ results = [] for series in seriesList: confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta) lowerBand = confidenceBands[0] upperBand = confidenceBands[1] aberration = list() for i, actual in enumerate(series): if series[i] is None: aberration.append(0) elif upperBand[i] is not None and series[i] > upperBand[i]: aberration.append(series[i] - upperBand[i]) elif lowerBand[i] is not None and series[i] < lowerBand[i]: aberration.append(series[i] - lowerBand[i]) else: aberration.append(0) newName = "holtWintersAberration(%s)" % series.name results.append(TimeSeries(newName, series.start, series.end , series.step, aberration)) return results def holtWintersConfidenceArea(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the area between the upper and lower bands of the predicted forecast deviations. """ bands = holtWintersConfidenceBands(requestContext, seriesList, delta) results = areaBetween(requestContext, bands) for series in results: series.name = series.name.replace('areaBetween', 'holtWintersConfidenceArea') return results def drawAsInfinite(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. If the value is zero, draw the line at 0. If the value is above zero, draw the line at infinity. If the value is null or less than zero, do not draw the line. Useful for displaying on/off metrics, such as exit codes. (0 = success, anything else = failure.) Example: .. code-block:: none drawAsInfinite(Testing.script.exitCode) """ for series in seriesList: series.options['drawAsInfinite'] = True series.name = 'drawAsInfinite(%s)' % series.name return seriesList def lineWidth(requestContext, seriesList, width): """ Takes one metric or a wildcard seriesList, followed by a float F. Draw the selected metrics with a line width of F, overriding the default value of 1, or the &lineWidth=X.X parameter. Useful for highlighting a single metric out of many, or having multiple line widths in one graph. Example: .. code-block:: none &target=lineWidth(server01.instance01.memory.free,5) """ for series in seriesList: series.options['lineWidth'] = width return seriesList def dashed(requestContext, *seriesList): """ Takes one metric or a wildcard seriesList, followed by a float F. Draw the selected metrics with a dotted line with segments of length F If omitted, the default length of the segments is 5.0 Example: .. code-block:: none &target=dashed(server01.instance01.memory.free,2.5) """ if len(seriesList) == 2: dashLength = seriesList[1] else: dashLength = 5 for series in seriesList[0]: series.name = 'dashed(%s, %d)' % (series.name, dashLength) series.options['dashed'] = dashLength return seriesList[0] def timeStack(requestContext, seriesList, timeShiftUnit, timeShiftStart, timeShiftEnd): """ Takes one metric or a wildcard seriesList, followed by a quoted string with the length of time (See ``from / until`` in the render\_api_ for examples of time formats). Also takes a start multiplier and end multiplier for the length of time create a seriesList which is composed the orginal metric series stacked with time shifts starting time shifts from the start multiplier through the end multiplier Useful for looking at history, or feeding into seriesAverage or seriesStdDev Example: .. code-block:: none &target=timeStack(Sales.widgets.largeBlue,"1d",0,7) # create a series for today and each of the previous 7 days """ # Default to negative. parseTimeOffset defaults to + if timeShiftUnit[0].isdigit(): timeShiftUnit = '-' + timeShiftUnit delta = parseTimeOffset(timeShiftUnit) series = seriesList[0] # if len(seriesList) > 1, they will all have the same pathExpression, which is all we care about. results = [] timeShiftStartint = int(timeShiftStart) timeShiftEndint = int(timeShiftEnd) for shft in range(timeShiftStartint,timeShiftEndint): myContext = requestContext.copy() innerDelta = delta * shft myContext['startTime'] = requestContext['startTime'] + innerDelta myContext['endTime'] = requestContext['endTime'] + innerDelta for shiftedSeries in evaluateTarget(myContext, series.pathExpression): shiftedSeries.name = 'timeShift(%s, %s, %s)' % (shiftedSeries.name, timeShiftUnit,shft) shiftedSeries.pathExpression = shiftedSeries.name shiftedSeries.start = series.start shiftedSeries.end = series.end results.append(shiftedSeries) return results def timeShift(requestContext, seriesList, timeShift, resetEnd=True): """ Takes one metric or a wildcard seriesList, followed by a quoted string with the length of time (See ``from / until`` in the render\_api_ for examples of time formats). Draws the selected metrics shifted in time. If no sign is given, a minus sign ( - ) is implied which will shift the metric back in time. If a plus sign ( + ) is given, the metric will be shifted forward in time. Will reset the end date range automatically to the end of the base stat unless resetEnd is False. Example case is when you timeshift to last week and have the graph date range set to include a time in the future, will limit this timeshift to pretend ending at the current time. If resetEnd is False, will instead draw full range including future time. Useful for comparing a metric against itself at a past periods or correcting data stored at an offset. Example: .. code-block:: none &target=timeShift(Sales.widgets.largeBlue,"7d") &target=timeShift(Sales.widgets.largeBlue,"-7d") &target=timeShift(Sales.widgets.largeBlue,"+1h") """ # Default to negative. parseTimeOffset defaults to + if timeShift[0].isdigit(): timeShift = '-' + timeShift delta = parseTimeOffset(timeShift) myContext = requestContext.copy() myContext['startTime'] = requestContext['startTime'] + delta myContext['endTime'] = requestContext['endTime'] + delta results = [] if len(seriesList) > 0: series = seriesList[0] # if len(seriesList) > 1, they will all have the same pathExpression, which is all we care about. for shiftedSeries in evaluateTarget(myContext, series.pathExpression): shiftedSeries.name = 'timeShift(%s, %s)' % (shiftedSeries.name, timeShift) if resetEnd: shiftedSeries.end = series.end else: shiftedSeries.end = shiftedSeries.end - shiftedSeries.start + series.start shiftedSeries.start = series.start results.append(shiftedSeries) return results def constantLine(requestContext, value): """ Takes a float F. Draws a horizontal line at value F across the graph. Example: .. code-block:: none &target=constantLine(123.456) """ start = timestamp( requestContext['startTime'] ) end = timestamp( requestContext['endTime'] ) step = (end - start) / 1.0 series = TimeSeries(str(value), start, end, step, [value, value]) return [series] def aggregateLine(requestContext, seriesList, func='avg'): """ Draws a horizontal line based the function applied to the series. Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the line drawn by this function and the data itself. To fix this, you should use the consolidateBy() function with the same function argument you are using for aggregateLine. This will ensure that the proper data points are retained and the graph should line up correctly. Example: .. code-block:: none &target=aggregateLineSeries(server.connections.total, 'avg') """ t_funcs = { 'avg': safeAvg, 'min': safeMin, 'max': safeMax } if func not in t_funcs: raise ValueError("Invalid function %s" % func) value = t_funcs[func]( seriesList[0] ) name = 'aggregateLine(%s,%d)' % (seriesList[0].pathExpression, value) series = constantLine(requestContext, value)[0] series.name = name return [series] def threshold(requestContext, value, label=None, color=None): """ Takes a float F, followed by a label (in double quotes) and a color. (See ``bgcolor`` in the render\_api_ for valid color names & formats.) Draws a horizontal line at value F across the graph. Example: .. code-block:: none &target=threshold(123.456, "omgwtfbbq", red) """ series = constantLine(requestContext, value)[0] if label: series.name = label if color: series.color = color return [series] def transformNull(requestContext, seriesList, default=0): """ Takes a metric or wild card seriesList and an optional value to transform Nulls to. Default is 0. This method compliments drawNullAsZero flag in graphical mode but also works in text only mode. Example: .. code-block:: none &target=transformNull(webapp.pages.*.views,-1) This would take any page that didn't have values and supply negative 1 as a default. Any other numeric value may be used as well. """ def transform(v): if v is None: return default else: return v for series in seriesList: series.name = "transformNull(%s,%g)" % (series.name, default) series.pathExpression = series.name values = [transform(v) for v in series] series.extend(values) del series[:len(values)] return seriesList def isNonNull(requestContext, seriesList): """ Takes a metric or wild card seriesList and counts up how many non-null values are specified. This is useful for understanding which metrics have data at a given point in time (ie, to count which servers are alive). Example: .. code-block:: none &target=isNonNull(webapp.pages.*.views) Returns a seriesList where 1 is specified for non-null values, and 0 is specified for null values. """ def transform(v): if v is None: return 0 else: return 1 for series in seriesList: series.name = "isNonNull(%s)" % (series.name) series.pathExpression = series.name values = [transform(v) for v in series] series.extend(values) del series[:len(values)] return seriesList def upperBound(requestContext, seriesList, boundary): """ Takes a metric or wild card seriesList and returns min(value, boundary) for non-null values. This is useful for when you only care about the value up to a certain point - for example if you are logging error codes and you only care if the value is >= 1 and not the value itself. Example: .. code-block:: none &target=upperBound(application.myapp.*.exitcode, 1.0) Returns a seriesList where the maximum value is the boundary or lower. """ def transform(v): if v is None: return None return min(v, boundary) for series in seriesList: series.name = "upperBound(%s, %d)" % (series.name, boundary) series.pathExpression = series.name values = [transform(v) for v in series] series.extend(values) del series[:len(values)] return seriesList def lowerBound(requestContext, seriesList, boundary): """ Takes a metric or wild card seriesList and returns max(value, boundary) for non-null values. This is useful for when you only care about the value up to a certain point - for example if you are logging error codes and you only care if the value is <= -1 and not the value itself. Example: .. code-block:: none &target=lowerBound(application.myapp.*.exitcode, -1.0) Returns a seriesList where the minimum value is the boundary or greater. """ def transform(v): if v is None: return None return max(v, boundary) for series in seriesList: series.name = "lowerBound(%s, %d)" % (series.name, boundary) series.pathExpression = series.name values = [transform(v) for v in series] series.extend(values) del series[:len(values)] return seriesList def identity(requestContext, name): """ Identity function: Returns datapoints where the value equals the timestamp of the datapoint. Useful when you have another series where the value is a timestamp, and you want to compare it to the time of the datapoint, to render an age Example: .. code-block:: none &target=identity("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == t. """ step = 60 delta = timedelta(seconds=step) start = time.mktime(requestContext["startTime"].timetuple()) end = time.mktime(requestContext["endTime"].timetuple()) values = range(start, end, step) series = TimeSeries(name, start, end, step, values) series.pathExpression = 'identity("%s")' % name return [series] def countSeries(requestContext, *seriesLists): """ Draws a horizontal line representing the number of nodes found in the seriesList. .. code-block:: none &target=countSeries(carbon.agents.*.*) """ (seriesList,start,end,step) = normalize(seriesLists) name = "countSeries(%s)" % formatPathExpressions(seriesList) values = ( int(len(row)) for row in izip(*seriesList) ) series = TimeSeries(name,start,end,step,values) series.pathExpression = name return [series] def group(requestContext, *seriesLists): """ Takes an arbitrary number of seriesLists and adds them to a single seriesList. This is used to pass multiple seriesLists to a function which only takes one """ seriesGroup = [] for s in seriesLists: seriesGroup.extend(s) return seriesGroup def mapSeries(requestContext, seriesList, mapNode): """ Takes a seriesList and maps it to a list of sub-seriesList. Each sub-seriesList has the given mapNode in common. Example: .. code-block:: none map(servers.*.cpu.*,1) => [ servers.server1.cpu.*, servers.server2.cpu.*, ... servers.serverN.cpu.* ] """ metaSeries = {} keys = [] for series in seriesList: key = series.name.split(".")[mapNode] if key not in metaSeries: metaSeries[key] = [series] keys.append(key) else: metaSeries[key].append(series) return [ metaSeries[key] for key in keys ] def reduceSeries(requestContext, seriesLists, reduceFunction, reduceNode, *reduceMatchers): """ Takes a list of seriesLists and reduces it to a list of series by means of the reduceFunction. Reduction is performed by matching the reduceNode in each series against the list of reduceMatchers. The each series is then passed to the reduceFunction as arguments in the order given by reduceMatchers. The reduceFunction should yield a single series. Example: .. code-block:: none reduce(map(servers.*.disk.*,1),3,"asPercent","bytes_used","total_bytes") => asPercent(servers.server1.disk.bytes_used,servers.server1.disk.total_bytes), asPercent(servers.server2.disk.bytes_used,servers.server2.disk.total_bytes), ... asPercent(servers.serverN.disk.bytes_used,servers.serverN.disk.total_bytes) The resulting list of series are aliased so that they can easily be nested in other functions. In the above example, the resulting series names would become: .. code-block:: none servers.server1.disk.reduce.asPercent, servers.server2.disk.reduce.asPercent, ... servers.serverN.disk.reduce.asPercent """ metaSeries = {} keys = [] for seriesList in seriesLists: for series in seriesList: nodes = series.name.split('.') node = nodes[reduceNode] reduceSeriesName = '.'.join(nodes[0:reduceNode]) + '.reduce.' + reduceFunction if node in reduceMatchers: if reduceSeriesName not in metaSeries: metaSeries[reduceSeriesName] = [None] * len(reduceMatchers) keys.append(reduceSeriesName) i = reduceMatchers.index(node) metaSeries[reduceSeriesName][i] = series for key in keys: metaSeries[key] = SeriesFunctions[reduceFunction](requestContext,metaSeries[key])[0] metaSeries[key].name = key return [ metaSeries[key] for key in keys ] def groupByNode(requestContext, seriesList, nodeNum, callback): """ Takes a serieslist and maps a callback to subgroups within as defined by a common node .. code-block:: none &target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries") Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the second node (0 indexed) resulting in a list of targets like sumSeries(ganglia.by-function.server1.*.cpu.load5),sumSeries(ganglia.by-function.server2.*.cpu.load5),... """ metaSeries = {} keys = [] for series in seriesList: key = series.name.split(".")[nodeNum] if key not in metaSeries.keys(): metaSeries[key] = [series] keys.append(key) else: metaSeries[key].append(series) for key in metaSeries.keys(): metaSeries[key] = SeriesFunctions[callback](requestContext, metaSeries[key])[0] metaSeries[key].name = key return [ metaSeries[key] for key in keys ] def exclude(requestContext, seriesList, pattern): """ Takes a metric or a wildcard seriesList, followed by a regular expression in double quotes. Excludes metrics that match the regular expression. Example: .. code-block:: none &target=exclude(servers*.instance*.threads.busy,"server02") """ regex = re.compile(pattern) return [s for s in seriesList if not regex.search(s.name)] def grep(requestContext, seriesList, pattern): """ Takes a metric or a wildcard seriesList, followed by a regular expression in double quotes. Excludes metrics that don't match the regular expression. Example: .. code-block:: none &target=grep(servers*.instance*.threads.busy,"server02") """ regex = re.compile(pattern) return [s for s in seriesList if regex.search(s.name)] def smartSummarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False): """ Smarter experimental version of summarize. The alignToFrom parameter has been deprecated, it no longer has any effect. Alignment happens automatically for days, hours, and minutes. """ if alignToFrom: log.info("Deprecated parameter 'alignToFrom' is being ignored.") results = [] delta = parseTimeOffset(intervalString) interval = delta.seconds + (delta.days * 86400) # Adjust the start time to fit an entire day for intervals >= 1 day requestContext = requestContext.copy() s = requestContext['startTime'] if interval >= DAY: requestContext['startTime'] = datetime(s.year, s.month, s.day) elif interval >= HOUR: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour) elif interval >= MINUTE: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, s.minute) for i,series in enumerate(seriesList): # XXX: breaks with summarize(metric.{a,b}) # each series.pathExpression == metric.{a,b} newSeries = evaluateTarget(requestContext, series.pathExpression)[0] series[0:len(series)] = newSeries series.start = newSeries.start series.end = newSeries.end series.step = newSeries.step for series in seriesList: buckets = {} # { timestamp: [values] } timestamps = range( int(series.start), int(series.end), int(series.step) ) datapoints = zip(timestamps, series) # Populate buckets for (timestamp, value) in datapoints: bucketInterval = int((timestamp - series.start) / interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None: buckets[bucketInterval].append(value) newValues = [] for timestamp in range(series.start, series.end, interval): bucketInterval = int((timestamp - series.start) / interval) bucket = buckets.get(bucketInterval, []) if bucket: if func == 'avg': newValues.append( float(sum(bucket)) / float(len(bucket)) ) elif func == 'last': newValues.append( bucket[len(bucket)-1] ) elif func == 'max': newValues.append( max(bucket) ) elif func == 'min': newValues.append( min(bucket) ) else: newValues.append( sum(bucket) ) else: newValues.append( None ) newName = "smartSummarize(%s, \"%s\", \"%s\")" % (series.name, intervalString, func) alignedEnd = series.start + (bucketInterval * interval) + interval newSeries = TimeSeries(newName, series.start, alignedEnd, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def summarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False): """ Summarize the data into interval buckets of a certain size. By default, the contents of each interval bucket are summed together. This is useful for counters where each increment represents a discrete event and retrieving a "per X" value requires summing all the events in that interval. Specifying 'avg' instead will return the mean for each bucket, which can be more useful when the value is a gauge that represents a certain value in time. 'max', 'min' or 'last' can also be specified. By default, buckets are caculated by rounding to the nearest interval. This works well for intervals smaller than a day. For example, 22:32 will end up in the bucket 22:00-23:00 when the interval=1hour. Passing alignToFrom=true will instead create buckets starting at the from time. In this case, the bucket for 22:32 depends on the from time. If from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30. Example: .. code-block:: none &target=summarize(counter.errors, "1hour") # total errors per hour &target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # new users per week &target=summarize(queue.size, "1hour", "avg") # average queue size per hour &target=summarize(queue.size, "1hour", "max") # maximum queue size during each hour &target=summarize(metric, "13week", "avg", true)&from=midnight+20100101 # 2010 Q1-4 """ results = [] delta = parseTimeOffset(intervalString) interval = delta.seconds + (delta.days * 86400) for series in seriesList: buckets = {} timestamps = range( int(series.start), int(series.end), int(series.step) ) datapoints = zip(timestamps, series) for (timestamp, value) in datapoints: if alignToFrom: bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None: buckets[bucketInterval].append(value) if alignToFrom: newStart = series.start newEnd = series.end else: newStart = series.start - (series.start % interval) newEnd = series.end - (series.end % interval) + interval newValues = [] for timestamp in range(newStart, newEnd, interval): if alignToFrom: newEnd = timestamp bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) bucket = buckets.get(bucketInterval, []) if bucket: if func == 'avg': newValues.append( float(sum(bucket)) / float(len(bucket)) ) elif func == 'last': newValues.append( bucket[len(bucket)-1] ) elif func == 'max': newValues.append( max(bucket) ) elif func == 'min': newValues.append( min(bucket) ) else: newValues.append( sum(bucket) ) else: newValues.append( None ) if alignToFrom: newEnd += interval newName = "summarize(%s, \"%s\", \"%s\"%s)" % (series.name, intervalString, func, alignToFrom and ", true" or "") newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def hitcount(requestContext, seriesList, intervalString, alignToInterval = False): """ Estimate hit counts from a list of time series. This function assumes the values in each time series represent hits per second. It calculates hits per some larger interval such as per day or per hour. This function is like summarize(), except that it compensates automatically for different time scales (so that a similar graph results from using either fine-grained or coarse-grained records) and handles rarely-occurring events gracefully. """ results = [] delta = parseTimeOffset(intervalString) interval = int(delta.seconds + (delta.days * 86400)) if alignToInterval: requestContext = requestContext.copy() s = requestContext['startTime'] if interval >= DAY: requestContext['startTime'] = datetime(s.year, s.month, s.day) elif interval >= HOUR: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour) elif interval >= MINUTE: requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, s.minute) for i,series in enumerate(seriesList): newSeries = evaluateTarget(requestContext, series.pathExpression)[0] intervalCount = int((series.end - series.start) / interval) series[0:len(series)] = newSeries series.start = newSeries.start series.end = newSeries.start + (intervalCount * interval) + interval series.step = newSeries.step for series in seriesList: length = len(series) step = int(series.step) bucket_count = int(math.ceil(float(series.end - series.start) / interval)) buckets = [[] for _ in range(bucket_count)] newStart = int(series.end - bucket_count * interval) for i, value in enumerate(series): if value is None: continue start_time = int(series.start + i * step) start_bucket, start_mod = divmod(start_time - newStart, interval) end_time = start_time + step end_bucket, end_mod = divmod(end_time - newStart, interval) if end_bucket >= bucket_count: end_bucket = bucket_count - 1 end_mod = interval if start_bucket == end_bucket: # All of the hits go to a single bucket. if start_bucket >= 0: buckets[start_bucket].append(value * (end_mod - start_mod)) else: # Spread the hits among 2 or more buckets. if start_bucket >= 0: buckets[start_bucket].append(value * (interval - start_mod)) hits_per_bucket = value * interval for j in range(start_bucket + 1, end_bucket): buckets[j].append(hits_per_bucket) if end_mod > 0: buckets[end_bucket].append(value * end_mod) newValues = [] for bucket in buckets: if bucket: newValues.append( sum(bucket) ) else: newValues.append(None) newName = 'hitcount(%s, "%s"%s)' % (series.name, intervalString, alignToInterval and ", true" or "") newSeries = TimeSeries(newName, newStart, series.end, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results def timeFunction(requestContext, name): """ Short Alias: time() Just returns the timestamp for each X value. T Example: .. code-block:: none &target=time("The.time.series") This would create a series named "The.time.series" that contains in Y the same value (in seconds) as X. """ step = 60 delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] while when < requestContext["endTime"]: values.append(time.mktime(when.timetuple())) when += delta series = TimeSeries(name, int(time.mktime(requestContext["startTime"].timetuple())), int(time.mktime(requestContext["endTime"].timetuple())), step, values) series.pathExpression = name return [series] def sinFunction(requestContext, name, amplitude=1): """ Short Alias: sin() Just returns the sine of the current time. The optional amplitude parameter changes the amplitude of the wave. Example: .. code-block:: none &target=sin("The.time.series", 2) This would create a series named "The.time.series" that contains sin(x)*2. """ step = 60 delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] while when < requestContext["endTime"]: values.append(math.sin(time.mktime(when.timetuple()))*amplitude) when += delta return [TimeSeries(name, int(time.mktime(requestContext["startTime"].timetuple())), int(time.mktime(requestContext["endTime"].timetuple())), step, values)] def randomWalkFunction(requestContext, name): """ Short Alias: randomWalk() Returns a random walk starting at 0. This is great for testing when there is no real data in whisper. Example: .. code-block:: none &target=randomWalk("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == x(t-1)+random()-0.5, and x(0) == 0. """ step = 60 delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] current = 0 while when < requestContext["endTime"]: values.append(current) current += random.random() - 0.5 when += delta return [TimeSeries(name, int(time.mktime(requestContext["startTime"].timetuple())), int(time.mktime(requestContext["endTime"].timetuple())), step, values)] def events(requestContext, *tags): """ Returns the number of events at this point in time. Usable with drawAsInfinite. Example: .. code-block:: none &target=events("tag-one", "tag-two") &target=events("*") Returns all events tagged as "tag-one" and "tag-two" and the second one returns all events. """ def to_epoch(datetime_object): return int(time.mktime(datetime_object.timetuple())) step = 1 name = "events(" + ", ".join(tags) + ")" if tags == ("*",): tags = None # Django returns database timestamps in timezone-ignorant datetime objects # so we use epoch seconds and do the conversion ourselves start_timestamp = to_epoch(requestContext["startTime"]) start_timestamp = start_timestamp - start_timestamp % step end_timestamp = to_epoch(requestContext["endTime"]) end_timestamp = end_timestamp - end_timestamp % step points = (end_timestamp - start_timestamp)/step events = models.Event.find_events(datetime.fromtimestamp(start_timestamp), datetime.fromtimestamp(end_timestamp), tags=tags) values = [None] * points for event in events: event_timestamp = to_epoch(event.when) value_offset = (event_timestamp - start_timestamp)/step if values[value_offset] is None: values[value_offset] = 1 else: values[value_offset] += 1 result_series = TimeSeries(name, start_timestamp, end_timestamp, step, values, 'sum') result_series.pathExpression = name return [result_series] def pieAverage(requestContext, series): return safeDiv(safeSum(series),safeLen(series)) def pieMaximum(requestContext, series): return max(series) def pieMinimum(requestContext, series): return min(series) PieFunctions = { 'average' : pieAverage, 'maximum' : pieMaximum, 'minimum' : pieMinimum, } SeriesFunctions = { # Combine functions 'sumSeries' : sumSeries, 'sum' : sumSeries, 'multiplySeries' : multiplySeries, 'averageSeries' : averageSeries, 'stddevSeries' : stddevSeries, 'avg' : averageSeries, 'sumSeriesWithWildcards': sumSeriesWithWildcards, 'averageSeriesWithWildcards': averageSeriesWithWildcards, 'minSeries' : minSeries, 'maxSeries' : maxSeries, 'rangeOfSeries': rangeOfSeries, 'percentileOfSeries': percentileOfSeries, 'countSeries': countSeries, 'weightedAverage': weightedAverage, # Transform functions 'scale' : scale, 'invert' : invert, 'scaleToSeconds' : scaleToSeconds, 'offset' : offset, 'offsetToZero' : offsetToZero, 'derivative' : derivative, 'perSecond' : perSecond, 'integral' : integral, 'percentileOfSeries': percentileOfSeries, 'nonNegativeDerivative' : nonNegativeDerivative, 'log' : logarithm, 'timeStack': timeStack, 'timeShift': timeShift, 'summarize' : summarize, 'smartSummarize' : smartSummarize, 'hitcount' : hitcount, 'absolute' : absolute, # Calculate functions 'movingAverage' : movingAverage, 'movingMedian' : movingMedian, 'stdev' : stdev, 'holtWintersForecast': holtWintersForecast, 'holtWintersConfidenceBands': holtWintersConfidenceBands, 'holtWintersConfidenceArea': holtWintersConfidenceArea, 'holtWintersAberration': holtWintersAberration, 'asPercent' : asPercent, 'pct' : asPercent, 'diffSeries' : diffSeries, 'divideSeries' : divideSeries, # Series Filter functions 'mostDeviant' : mostDeviant, 'highestCurrent' : highestCurrent, 'lowestCurrent' : lowestCurrent, 'highestMax' : highestMax, 'currentAbove' : currentAbove, 'currentBelow' : currentBelow, 'highestAverage' : highestAverage, 'lowestAverage' : lowestAverage, 'averageAbove' : averageAbove, 'averageBelow' : averageBelow, 'maximumAbove' : maximumAbove, 'minimumAbove' : minimumAbove, 'maximumBelow' : maximumBelow, 'nPercentile' : nPercentile, 'limit' : limit, 'sortByTotal' : sortByTotal, 'sortByName' : sortByName, 'averageOutsidePercentile' : averageOutsidePercentile, 'removeBetweenPercentile' : removeBetweenPercentile, 'sortByMaxima' : sortByMaxima, 'sortByMinima' : sortByMinima, 'sortByName' : sortByName, 'useSeriesAbove': useSeriesAbove, 'exclude' : exclude, # Data Filter functions 'removeAbovePercentile' : removeAbovePercentile, 'removeAboveValue' : removeAboveValue, 'removeBelowPercentile' : removeBelowPercentile, 'removeBelowValue' : removeBelowValue, # Special functions 'legendValue' : legendValue, 'alias' : alias, 'aliasSub' : aliasSub, 'aliasByNode' : aliasByNode, 'aliasByMetric' : aliasByMetric, 'cactiStyle' : cactiStyle, 'color' : color, 'alpha' : alpha, 'cumulative' : cumulative, 'consolidateBy' : consolidateBy, 'keepLastValue' : keepLastValue, 'drawAsInfinite' : drawAsInfinite, 'secondYAxis': secondYAxis, 'lineWidth' : lineWidth, 'dashed' : dashed, 'substr' : substr, 'group' : group, 'map': mapSeries, 'reduce': reduceSeries, 'groupByNode' : groupByNode, 'constantLine' : constantLine, 'stacked' : stacked, 'areaBetween' : areaBetween, 'threshold' : threshold, 'transformNull' : transformNull, 'isNonNull' : isNonNull, 'identity': identity, 'aggregateLine' : aggregateLine, 'upperBound' : upperBound, 'lowerBound' : lowerBound, # test functions 'time': timeFunction, "sin": sinFunction, "randomWalk": randomWalkFunction, 'timeFunction': timeFunction, "sinFunction": sinFunction, "randomWalkFunction": randomWalkFunction, #events 'events': events, } #Avoid import circularity if not environ.get('READTHEDOCS'): from graphite.render.evaluator import evaluateTarget
dhtech/graphite-web
webapp/graphite/render/functions.py
Python
apache-2.0
101,346
from pyparsing import ( CaselessLiteral, Combine, Literal, ParseException, Regex, Suppress, Word, alphanums, alphas, ) from great_expectations.exceptions import GreatExpectationsError try: import pyspark.sql.functions as F except ImportError: F = None try: import sqlalchemy as sa except ImportError: sa = None def _set_notnull(s, l, t): t["notnull"] = True column_name = Combine( Suppress(Literal('col("')) + Word(alphas, f"{alphanums}_.").setResultsName("column") + Suppress(Literal('")')) ) gt = Literal(">") lt = Literal("<") ge = Literal(">=") le = Literal("<=") eq = Literal("==") ops = (gt ^ lt ^ ge ^ le ^ eq).setResultsName("op") fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?").setResultsName("fnumber") condition_value = Suppress('"') + Word(f"{alphanums}.").setResultsName( "condition_value" ) + Suppress('"') ^ Suppress("'") + Word(f"{alphanums}.").setResultsName( "condition_value" ) + Suppress( "'" ) not_null = CaselessLiteral(".notnull()").setResultsName("notnull") condition = (column_name + not_null).setParseAction(_set_notnull) ^ ( column_name + ops + (fnumber ^ condition_value) ) class ConditionParserError(GreatExpectationsError): pass def _parse_great_expectations_condition(row_condition: str): try: return condition.parseString(row_condition) except ParseException: raise ConditionParserError(f"unable to parse condition: {row_condition}") # noinspection PyUnresolvedReferences def parse_condition_to_spark(row_condition: str) -> "pyspark.sql.Column": parsed = _parse_great_expectations_condition(row_condition) column = parsed["column"] if "condition_value" in parsed: if parsed["op"] == "==": return F.col(column) == parsed["condition_value"] else: raise ConditionParserError( f"Invalid operator: {parsed['op']} for string literal spark condition." ) elif "fnumber" in parsed: try: num = int(parsed["fnumber"]) except ValueError: num = float(parsed["fnumber"]) op = parsed["op"] if op == ">": return F.col(column) > num elif op == "<": return F.col(column) < num elif op == ">=": return F.col(column) >= num elif op == "<=": return F.col(column) <= num elif op == "==": return F.col(column) == num elif "notnull" in parsed and parsed["notnull"] is True: return F.col(column).isNotNull() else: raise ConditionParserError(f"unrecognized column condition: {row_condition}") def parse_condition_to_sqlalchemy( row_condition: str, ) -> "sqlalchemy.sql.expression.ColumnElement": parsed = _parse_great_expectations_condition(row_condition) column = parsed["column"] if "condition_value" in parsed: if parsed["op"] == "==": return sa.column(column) == parsed["condition_value"] else: raise ConditionParserError( f"Invalid operator: {parsed['op']} for string literal spark condition." ) elif "fnumber" in parsed: try: num = int(parsed["fnumber"]) except ValueError: num = float(parsed["fnumber"]) op = parsed["op"] if op == ">": return sa.column(column) > num elif op == "<": return sa.column(column) < num elif op == ">=": return sa.column(column) >= num elif op == "<=": return sa.column(column) <= num elif op == "==": return sa.column(column) == num elif "notnull" in parsed and parsed["notnull"] is True: return sa.not_(sa.column(column).is_(None)) else: raise ConditionParserError(f"unrecognized column condition: {row_condition}")
great-expectations/great_expectations
great_expectations/expectations/row_conditions.py
Python
apache-2.0
3,909
#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for the zip path specification implementation.""" import unittest from dfvfs.path import zip_path_spec from tests.path import test_lib class ZipPathSpecTest(test_lib.PathSpecTestCase): """Tests for the zip path specification implementation.""" def testInitialize(self): """Tests the path specification initialization.""" path_spec = zip_path_spec.ZipPathSpec( location=u'/test', parent=self._path_spec) self.assertNotEqual(path_spec, None) with self.assertRaises(ValueError): _ = zip_path_spec.ZipPathSpec(location=u'/test', parent=None) with self.assertRaises(ValueError): _ = zip_path_spec.ZipPathSpec(location=None, parent=self._path_spec) with self.assertRaises(ValueError): _ = zip_path_spec.ZipPathSpec( location=u'/test', parent=self._path_spec, bogus=u'BOGUS') def testComparable(self): """Tests the path specification comparable property.""" path_spec = zip_path_spec.ZipPathSpec( location=u'/test', parent=self._path_spec) self.assertNotEqual(path_spec, None) expected_comparable = u'\n'.join([ u'type: TEST', u'type: ZIP, location: /test', u'']) self.assertEqual(path_spec.comparable, expected_comparable) if __name__ == '__main__': unittest.main()
manashmndl/dfvfs
tests/path/zip_path_spec.py
Python
apache-2.0
1,347
def get_injured_sharks(): """ >>> from ibeis.scripts.getshark import * # NOQA """ import requests url = 'http://www.whaleshark.org/getKeywordImages.jsp' resp = requests.get(url) assert resp.status_code == 200 keywords = resp.json()['keywords'] key_list = ut.take_column(keywords, 'indexName') key_to_nice = {k['indexName']: k['readableName'] for k in keywords} injury_patterns = [ 'injury', 'net', 'hook', 'trunc', 'damage', 'scar', 'nicks', 'bite', ] injury_keys = [key for key in key_list if any([pat in key for pat in injury_patterns])] noninjury_keys = ut.setdiff(key_list, injury_keys) injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys) # NOQA noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys) # NOQA key_list = injury_keys keyed_images = {} for key in ut.ProgIter(key_list, lbl='reading index', bs=True): key_url = url + '?indexName={indexName}'.format(indexName=key) key_resp = requests.get(key_url) assert key_resp.status_code == 200 key_imgs = key_resp.json()['images'] keyed_images[key] = key_imgs key_hist = {key: len(imgs) for key, imgs in keyed_images.items()} key_hist = ut.sort_dict(key_hist, ut.identity) print(ut.repr3(key_hist)) nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist) nice_key_hist = ut.sort_dict(nice_key_hist, ut.identity) print(ut.repr3(nice_key_hist)) key_to_urls = {key: ut.take_column(vals, 'url') for key, vals in keyed_images.items()} overlaps = {} import itertools overlap_img_list = [] for k1, k2 in itertools.combinations(key_to_urls.keys(), 2): overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2]) num_overlap = len(overlap_imgs) overlaps[(k1, k2)] = num_overlap overlaps[(k1, k1)] = len(key_to_urls[k1]) if num_overlap > 0: #print('[%s][%s], overlap=%r' % (k1, k2, num_overlap)) overlap_img_list.extend(overlap_imgs) all_img_urls = list(set(ut.flatten(key_to_urls.values()))) num_all = len(all_img_urls) # NOQA print('num_all = %r' % (num_all,)) # Determine super-categories categories = ['nicks', 'scar', 'trunc'] # Force these keys into these categories key_to_cat = {'scarbite': 'other_injury'} cat_to_keys = ut.ddict(list) for key in key_to_urls.keys(): flag = 1 if key in key_to_cat: cat = key_to_cat[key] cat_to_keys[cat].append(key) continue for cat in categories: if cat in key: cat_to_keys[cat].append(key) flag = 0 if flag: cat = 'other_injury' cat_to_keys[cat].append(key) cat_urls = ut.ddict(list) for cat, keys in cat_to_keys.items(): for key in keys: cat_urls[cat].extend(key_to_urls[key]) cat_hist = {} for cat in list(cat_urls.keys()): cat_urls[cat] = list(set(cat_urls[cat])) cat_hist[cat] = len(cat_urls[cat]) print(ut.repr3(cat_to_keys)) print(ut.repr3(cat_hist)) key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items() for val in vals]) #ingestset = { # '__class__': 'ImageSet', # 'images': ut.ddict(dict) #} #for key, key_imgs in keyed_images.items(): # for imgdict in key_imgs: # url = imgdict['url'] # encid = imgdict['correspondingEncounterNumber'] # # Make structure # encdict = encounters[encid] # encdict['__class__'] = 'Encounter' # imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber']) # imgdict['__class__'] = 'Image' # cat = key_to_cat[key] # annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]} # annotdict['__class__'] = 'Annotation' # # Ensure structures exist # encdict['images'] = encdict.get('images', []) # imgdict['annots'] = imgdict.get('annots', []) # # Add an image to this encounter # encdict['images'].append(imgdict) # # Add an annotation to this image # imgdict['annots'].append(annotdict) ##http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111 #get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,) #resp = requests.get(get_enc_url) #print(ut.repr3(encdict)) #print(ut.repr3(encounters)) # Download the files to the local disk #fpath_list = all_urls = ut.unique(ut.take_column( ut.flatten( ut.dict_subset(keyed_images, ut.flatten(cat_to_keys.values())).values() ), 'url')) dldir = ut.truepath('~/tmpsharks') from os.path import commonprefix, basename # NOQA prefix = commonprefix(all_urls) suffix_list = [url_[len(prefix):] for url_ in all_urls] fname_list = [suffix.replace('/', '--') for suffix in suffix_list] fpath_list = [] for url, fname in ut.ProgIter(zip(all_urls, fname_list), lbl='downloading imgs', freq=1): fpath = ut.grab_file_url(url, download_dir=dldir, fname=fname, verbose=False) fpath_list.append(fpath) # Make sure we keep orig info #url_to_keys = ut.ddict(list) url_to_info = ut.ddict(dict) for key, imgdict_list in keyed_images.items(): for imgdict in imgdict_list: url = imgdict['url'] info = url_to_info[url] for k, v in imgdict.items(): info[k] = info.get(k, []) info[k].append(v) info['keys'] = info.get('keys', []) info['keys'].append(key) #url_to_keys[url].append(key) info_list = ut.take(url_to_info, all_urls) for info in info_list: if len(set(info['correspondingEncounterNumber'])) > 1: assert False, 'url with two different encounter nums' # Combine duplicate tags hashid_list = [ut.get_file_uuid(fpath_, stride=8) for fpath_ in ut.ProgIter(fpath_list, bs=True)] groupxs = ut.group_indices(hashid_list)[1] # Group properties by duplicate images #groupxs = [g for g in groupxs if len(g) > 1] fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0) url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0) info_list_ = [ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_)) for info_ in ut.apply_grouping(info_list, groupxs)] encid_list_ = [ut.unique(info_['correspondingEncounterNumber'])[0] for info_ in info_list_] keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_] cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_] clist = ut.ColumnLists({ 'gpath': fpath_list_, 'url': url_list_, 'encid': encid_list_, 'key': keys_list_, 'cat': cats_list_, }) #for info_ in ut.apply_grouping(info_list, groupxs): # info = ut.dict_accum(*info_) # info = ut.map_dict_vals(ut.flatten, info) # x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber'])) # if len(x) > 1: # info = info.copy() # del info['keys'] # print(ut.repr3(info)) flags = ut.lmap(ut.fpath_has_imgext, clist['gpath']) clist = clist.compress(flags) import ibeis ibs = ibeis.opendb('WS_Injury', allow_newdir=True) gid_list = ibs.add_images(clist['gpath']) clist['gid'] = gid_list failed_flags = ut.flag_None_items(clist['gid']) print('# failed %s' % (sum(failed_flags)),) passed_flags = ut.not_list(failed_flags) clist = clist.compress(passed_flags) ut.assert_all_not_None(clist['gid']) #ibs.get_image_uris_original(clist['gid']) ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True) #ut.zipflat(clist['cat'], clist['key']) if False: # Can run detection instead clist['tags'] = ut.zipflat(clist['cat']) aid_list = ibs.use_images_as_annotations(clist['gid'], adjust_percent=0.01, tags_list=clist['tags']) aid_list import plottool as pt from ibeis import core_annots pt.qt4ensure() #annots = ibs.annots() #aids = [1, 2] #ibs.depc_annot.get('hog', aids , 'hog') #ibs.depc_annot.get('chip', aids, 'img') for aid in ut.InteractiveIter(ibs.get_valid_aids()): hogs = ibs.depc_annot.d.get_hog_hog([aid]) chips = ibs.depc_annot.d.get_chips_img([aid]) chip = chips[0] hogimg = core_annots.make_hog_block_image(hogs[0]) pt.clf() pt.imshow(hogimg, pnum=(1, 2, 1)) pt.imshow(chip, pnum=(1, 2, 2)) fig = pt.gcf() fig.show() fig.canvas.draw() #print(len(groupxs)) #if False: #groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values() #print(ut.repr3(ut.apply_grouping(all_urls, groupxs))) # # FIX # for fpath, fname in zip(fpath_list, fname_list): # if ut.checkpath(fpath): # ut.move(fpath, join(dirname(fpath), fname)) # print('fpath = %r' % (fpath,)) #import ibeis #from ibeis.dbio import ingest_dataset #dbdir = ibeis.sysres.lookup_dbdir('WS_ALL') #self = ingest_dataset.Ingestable2(dbdir) if False: # Show overlap matrix import plottool as pt import pandas as pd import numpy as np dict_ = overlaps s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps)) df = s.unstack() lhs, rhs = df.align(df.T) df = lhs.add(rhs, fill_value=0).fillna(0) label_texts = df.columns.values def label_ticks(label_texts): import plottool as pt truncated_labels = [repr(lbl[0:100]) for lbl in label_texts] ax = pt.gca() ax.set_xticks(list(range(len(label_texts)))) ax.set_xticklabels(truncated_labels) [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()] [lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()] #xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts))) #pt.plot_surface3d(xgrid, ygrid, disjoint_mat) ax.set_yticks(list(range(len(label_texts)))) ax.set_yticklabels(truncated_labels) [lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()] [lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()] #[lbl.set_rotation(20) for lbl in ax.get_yticklabels()] #df = df.sort(axis=0) #df = df.sort(axis=1) sortx = np.argsort(df.sum(axis=1).values)[::-1] df = df.take(sortx, axis=0) df = df.take(sortx, axis=1) fig = pt.figure(fnum=1) fig.clf() mat = df.values.astype(np.int32) mat[np.diag_indices(len(mat))] = 0 vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max() import matplotlib.colors norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True) pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none') pt.plt.colorbar() pt.plt.grid('off') label_ticks(label_texts) fig.tight_layout() #overlap_df = pd.DataFrame.from_dict(overlap_img_list) class TmpImage(ut.NiceRepr): pass from skimage.feature import hog from skimage import data, color, exposure import plottool as pt image2 = color.rgb2gray(data.astronaut()) # NOQA fpath = './GOPR1120.JPG' import vtool as vt for fpath in [fpath]: """ http://scikit-image.org/docs/dev/auto_examples/plot_hog.html """ image = vt.imread(fpath, grayscale=True) image = pt.color_funcs.to_base01(image) fig = pt.figure(fnum=2) fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=True) fig, (ax1, ax2) = pt.plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True) ax1.axis('off') ax1.imshow(image, cmap=pt.plt.cm.gray) ax1.set_title('Input image') ax1.set_adjustable('box-forced') # Rescale histogram for better display hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') ax1.set_adjustable('box-forced') pt.plt.show() #for def detect_sharks(ibs, gids): #import ibeis #ibs = ibeis.opendb('WS_ALL') config = { 'algo' : 'yolo', 'sensitivity' : 0.2, 'config_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg'), 'weight_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.39000.weights'), 'class_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg.classes'), } depc = ibs.depc_image #imgsets = ibs.imagesets(text='Injured Sharks') #images = ibs.images(imgsets.gids[0]) images = ibs.images(gids) images = images.compress([ext not in ['.gif'] for ext in images.exts]) gid_list = images.gids # result is a tuple: # (score, bbox_list, theta_list, conf_list, class_list) results_list = depc.get_property('localizations', gid_list, None, config=config) results_list2 = [] multi_gids = [] failed_gids = [] #ibs.set_image_imagesettext(failed_gids, ['Fixme'] * len(failed_gids)) ibs.set_image_imagesettext(multi_gids, ['Fixme2'] * len(multi_gids)) failed_gids for gid, res in zip(gid_list, results_list): score, bbox_list, theta_list, conf_list, class_list = res if len(bbox_list) == 0: failed_gids.append(gid) elif len(bbox_list) == 1: results_list2.append((gid, bbox_list, theta_list)) elif len(bbox_list) > 1: multi_gids.append(gid) idx = conf_list.argmax() res2 = (gid, bbox_list[idx:idx + 1], theta_list[idx:idx + 1]) results_list2.append(res2) ut.dict_hist(([t[1].shape[0] for t in results_list])) localized_imgs = ibs.images(ut.take_column(results_list2, 0)) assert all([len(a) == 1 for a in localized_imgs.aids]) old_annots = ibs.annots(ut.flatten(localized_imgs.aids)) #old_tags = old_annots.case_tags # Override old bboxes import numpy as np bboxes = np.array(ut.take_column(results_list2, 1))[:, 0, :] ibs.set_annot_bboxes(old_annots.aids, bboxes) if False: import plottool as pt pt.qt4ensure() inter = pt.MultiImageInteraction( ibs.get_image_paths(ut.take_column(results_list2, 0)), bboxes_list=ut.take_column(results_list2, 1) ) inter.dump_to_disk('shark_loc', num=50, prefix='shark_loc') inter.start() inter = pt.MultiImageInteraction(ibs.get_image_paths(failed_gids)) inter.start() inter = pt.MultiImageInteraction(ibs.get_image_paths(multi_gids)) inter.start() def train_part_detector(): """ Problem: healthy sharks usually have a mostly whole body shot injured sharks usually have a close up shot. This distribution of images is likely what the injur-shark net is picking up on. The goal is to train a detector that looks for things that look like the distribution of injured sharks. We will run this on healthy sharks to find the parts of """ import ibeis ibs = ibeis.opendb('WS_ALL') imgset = ibs.imagesets(text='Injured Sharks') injured_annots = imgset.annots[0] # NOQA #config = { # 'dim_size': (224, 224), # 'resize_dim': 'wh' #} from pydarknet import Darknet_YOLO_Detector data_path = ibs.export_to_xml() output_path = join(ibs.get_cachedir(), 'training', 'localizer') ut.ensuredir(output_path) dark = Darknet_YOLO_Detector() results = dark.train(data_path, output_path) del dark localizer_weight_path, localizer_config_path, localizer_class_path = results classifier_model_path = ibs.classifier_train() labeler_model_path = ibs.labeler_train() output_path = join(ibs.get_cachedir(), 'training', 'detector') ut.ensuredir(output_path) ut.copy(localizer_weight_path, join(output_path, 'localizer.weights')) ut.copy(localizer_config_path, join(output_path, 'localizer.config')) ut.copy(localizer_class_path, join(output_path, 'localizer.classes')) ut.copy(classifier_model_path, join(output_path, 'classifier.npy')) ut.copy(labeler_model_path, join(output_path, 'labeler.npy')) # ibs.detector_train() def purge_ensure_one_annot_per_images(ibs): """ pip install Pipe """ # Purge all but one annotation images = ibs.images() #images.aids groups = images._annot_groups import numpy as np # Take all but the largest annotations per images large_masks = [ut.index_to_boolmask([np.argmax(x)], len(x)) for x in groups.bbox_area] small_masks = ut.lmap(ut.not_list, large_masks) # Remove all but the largets annotation small_aids = ut.zipcompress(groups.aid, small_masks) small_aids = ut.flatten(small_aids) # Fix any empty images images = ibs.images() empty_images = ut.where(np.array(images.num_annotations) == 0) print('empty_images = %r' % (empty_images,)) #list(map(basename, map(dirname, images.uris_original))) def VecPipe(func): import pipe @pipe.Pipe def wrapped(sequence): return map(func, sequence) #return (None if item is None else func(item) for item in sequence) return wrapped name_list = list(images.uris_original | VecPipe(dirname) | VecPipe(basename)) aids_list = images.aids ut.assert_all_eq(list(aids_list | VecPipe(len))) annots = ibs.annots(ut.flatten(aids_list)) annots.names = name_list def shark_misc(): import ibeis ibs = ibeis.opendb('WS_ALL') aid_list = ibs.get_valid_aids() flag_list = ibs.get_annot_been_adjusted(aid_list) adjusted_aids = ut.compress(aid_list, flag_list) return adjusted_aids #if False: # # TRY TO FIGURE OUT WHY URLS ARE MISSING IN STEP 1 # encounter_to_parsed1 = parsed1.group_items('encounter') # encounter_to_parsed2 = parsed2.group_items('encounter') # url_to_parsed1 = parsed1.group_items('img_url') # url_to_parsed2 = parsed2.group_items('img_url') # def set_overlap(set1, set2): # set1 = set(set1) # set2 = set(set2) # return ut.odict([ # ('s1', len(set1)), # ('s2', len(set2)), # ('isect', len(set1.intersection(set2))), # ('union', len(set1.union(set2))), # ('s1 - s2', len(set1.difference(set2))), # ('s2 - s1', len(set2.difference(set1))), # ]) # print('encounter overlap: ' + ut.repr3(set_overlap(encounter_to_parsed1, encounter_to_parsed2))) # print('url overlap: ' + ut.repr3(set_overlap(url_to_parsed1, url_to_parsed2))) # url1 = list(url_to_parsed1.keys()) # url2 = list(url_to_parsed2.keys()) # # remove common prefixes # from os.path import commonprefix, basename # NOQA # cp1 = commonprefix(url1) # cp2 = commonprefix(url2) # #suffix1 = sorted([u[len(cp1):].lower() for u in url1]) # #suffix2 = sorted([u[len(cp2):].lower() for u in url2]) # suffix1 = sorted([u[len(cp1):] for u in url1]) # suffix2 = sorted([u[len(cp2):] for u in url2]) # print('suffix overlap: ' + ut.repr3(set_overlap(suffix1, suffix2))) # set1 = set(suffix1) # set2 = set(suffix2) # only1 = list(set1 - set1.intersection(set2)) # only2 = list(set2 - set1.intersection(set2)) # import numpy as np # for suf in ut.ProgIter(only2, bs=True): # dist = np.array(ut.edit_distance(suf, only1)) # idx = ut.argsort(dist)[0:3] # if dist[idx][0] < 3: # close = ut.take(only1, idx) # print('---') # print('suf = %r' % (join(cp2, suf),)) # print('close = %s' % (ut.repr3([join(cp1, c) for c in close]),)) # print('---') # break # # Associate keywords with original images # #lower_urls = [x.lower() for x in parsed['img_url']] # url_to_idx = ut.make_index_lookup(parsed1['img_url']) # parsed1['keywords'] = [[] for _ in range(len(parsed1))] # for url, keys in url_to_keys.items(): # # hack because urls are note in the same format # url = url.replace('wildbook_data_dir', 'shepherd_data_dir') # url = url.lower() # if url in url_to_idx: # idx = url_to_idx[url] # parsed1['keywords'][idx].extend(keys) #healthy_annots = ibs.annots(ibs.imagesets(text='Non-Injured Sharks').aids[0]) #ibs.set_annot_prop('healthy', healthy_annots.aids, [True] * len(healthy_annots)) #['healthy' in t and len(t) > 0 for t in single_annots.case_tags] #healthy_tags = [] #ut.find_duplicate_items(cur_img_uuids) #ut.find_duplicate_items(new_img_uuids) #cur_uuids = set(cur_img_uuids) #new_uuids = set(new_img_uuids) #both_uuids = new_uuids.intersection(cur_uuids) #only_cur = cur_uuids - both_uuids #only_new = new_uuids - both_uuids #print('len(cur_uuids) = %r' % (len(cur_uuids))) #print('len(new_uuids) = %r' % (len(new_uuids))) #print('len(both_uuids) = %r' % (len(both_uuids))) #print('len(only_cur) = %r' % (len(only_cur))) #print('len(only_new) = %r' % (len(only_new))) # Ensure that data in both sets are syncronized #images_both = [] #if False: # print('Removing small images') # import numpy as np # import vtool as vt # imgsize_list = np.array([vt.open_image_size(gpath) for gpath in parsed['new_fpath']]) # sqrt_area_list = np.sqrt(np.prod(imgsize_list, axis=1)) # areq_flags_list = sqrt_area_list >= 750 # parsed = parsed.compress(areq_flags_list)
SU-ECE-17-7/ibeis
ibeis/scripts/getshark_old.py
Python
apache-2.0
22,458
"""Test the TcEx Batch Module.""" # third-party import pytest # pylint: disable=no-self-use class TestIndicator3: """Test the TcEx Batch Module.""" def setup_class(self): """Configure setup before all tests.""" @pytest.mark.parametrize( 'indicator,description,label,tag', [ ('3.33.33.1', 'Example #1', 'PYTEST1', 'PyTest1'), ('3.33.33.2', 'Example #2', 'PYTEST2', 'PyTest2'), ('3.33.33.3', 'Example #3', 'PYTEST3', 'PyTest3'), ('3.33.33.4', 'Example #4', 'PYTEST4', 'PyTest4'), ], ) def test_address(self, indicator, description, label, tag, tcex): """Test address creation""" batch = tcex.batch(owner='TCI') xid = batch.generate_xid(['pytest', 'address', indicator]) ti = batch.add_indicator( { 'type': 'Address', 'rating': 5.00, 'confidence': 100, 'summary': indicator, 'xid': xid, 'attribute': [{'displayed': True, 'type': 'Description', 'value': description}], 'securityLabel': [ {'color': 'ffc0cb', 'name': label, 'description': 'Pytest Label Description'} ], 'tag': [{'name': tag}], } ) batch.save(ti) batch_status = batch.submit_all() assert batch_status[0].get('status') == 'Completed' assert batch_status[0].get('successCount') == 1 @pytest.mark.parametrize( 'indicator,description,label,tag', [ ('[email protected]', 'Example #1', 'PYTEST:1', 'PyTest1'), ('[email protected]', 'Example #2', 'PYTEST:2', 'PyTest2'), ('[email protected]', 'Example #3', 'PYTEST:3', 'PyTest3'), ('[email protected]', 'Example #4', 'PYTEST:4', 'PyTest4'), ], ) def test_email_address(self, indicator, description, label, tag, tcex): """Test email_address creation""" batch = tcex.batch(owner='TCI') xid = batch.generate_xid(['pytest', 'email_address', indicator]) ti = batch.add_indicator( { 'type': 'EmailAddress', 'rating': 5.00, 'confidence': 100, 'summary': indicator, 'xid': xid, 'attribute': [{'displayed': True, 'type': 'Description', 'value': description}], 'securityLabel': [ {'color': 'ffc0cb', 'name': label, 'description': 'Pytest Label Description'} ], 'tag': [{'name': tag}], } ) batch.save(ti) batch_status = batch.submit_all() assert batch_status[0].get('status') == 'Completed' assert batch_status[0].get('successCount') == 1 @pytest.mark.parametrize( 'md5,sha1,sha256,description,label,tag', [ ('a3', 'a3', 'a3', 'Example #1', 'PYTEST:1', 'PyTest1'), ('b3', 'b3', 'b3', 'Example #2', 'PYTEST:2', 'PyTest2'), ('c3', 'c3', 'c3', 'Example #3', 'PYTEST:3', 'PyTest3'), ('d3', 'd3', 'd3', 'Example #4', 'PYTEST:4', 'PyTest4'), ], ) def test_file(self, md5, sha1, sha256, description, label, tag, tcex): """Test file creation""" batch = tcex.batch(owner='TCI') xid = batch.generate_xid(['pytest', 'file', md5, sha1, sha256]) ti = batch.add_indicator( { 'type': 'File', 'rating': 5.00, 'confidence': 100, 'summary': f'{md5 * 16} : {sha1 * 20} : {sha256 * 32}', 'xid': xid, 'attribute': [{'displayed': True, 'type': 'Description', 'value': description}], 'securityLabel': [ {'color': 'ffc0cb', 'name': label, 'description': 'Pytest Label Description'} ], 'tag': [{'name': tag}], } ) batch.save(ti) batch_status = batch.submit_all() assert batch_status[0].get('status') == 'Completed' assert batch_status[0].get('successCount') == 1 @pytest.mark.parametrize( 'indicator,description,label,tag', [ ('pytest-host-i3-001.com', 'Example #1', 'PYTEST:1', 'PyTest1'), ('pytest-host-i3-002.com', 'Example #2', 'PYTEST:2', 'PyTest2'), ('pytest-host-i3-003.com', 'Example #3', 'PYTEST:3', 'PyTest3'), ('pytest-host-i3-004.com', 'Example #4', 'PYTEST:4', 'PyTest4'), ], ) def test_host(self, indicator, description, label, tag, tcex): """Test host creation""" batch = tcex.batch(owner='TCI') xid = batch.generate_xid(['pytest', 'host', indicator]) ti = batch.add_indicator( { 'type': 'Host', 'rating': 5.00, 'confidence': 100, 'summary': indicator, 'xid': xid, 'attribute': [{'displayed': True, 'type': 'Description', 'value': description}], 'securityLabel': [ {'color': 'ffc0cb', 'name': label, 'description': 'Pytest Label Description'} ], 'tag': [{'name': tag}], } ) batch.save(ti) batch_status = batch.submit_all() assert batch_status[0].get('status') == 'Completed' assert batch_status[0].get('successCount') == 1 @pytest.mark.parametrize( 'indicator,description,label,tag', [ ('https://pytest-url-i3-001.com', 'Example #1', 'PYTEST:1', 'PyTest1'), ('https://pytest-url-i3-002.com', 'Example #2', 'PYTEST:2', 'PyTest2'), ('https://pytest-url-i3-003.com', 'Example #3', 'PYTEST:3', 'PyTest3'), ('https://pytest-url-i3-004.com', 'Example #4', 'PYTEST:4', 'PyTest4'), ], ) def test_url(self, indicator, description, label, tag, tcex): """Test url creation""" batch = tcex.batch(owner='TCI') xid = batch.generate_xid(['pytest', 'url', indicator]) ti = batch.add_indicator( { 'type': 'Url', 'rating': 5.00, 'confidence': 100, 'summary': indicator, 'xid': xid, 'attribute': [{'displayed': True, 'type': 'Description', 'value': description}], 'securityLabel': [ {'color': 'ffc0cb', 'name': label, 'description': 'Pytest Label Description'} ], 'tag': [{'name': tag}], } ) batch.save(ti) batch_status = batch.submit_all() assert batch_status[0].get('status') == 'Completed' assert batch_status[0].get('successCount') == 1
kstilwell/tcex
tests/batch/test_indicator_interface_3.py
Python
apache-2.0
6,925
from perfrunner.helpers import local from perfrunner.helpers.cbmonitor import timeit, with_stats from perfrunner.helpers.profiler import with_profiles from perfrunner.helpers.worker import java_dcp_client_task from perfrunner.tests import PerfTest class DCPThroughputTest(PerfTest): def _report_kpi(self, time_elapsed: float, clients: int, stream: str): self.reporter.post( *self.metrics.dcp_throughput(time_elapsed, clients, stream) ) @with_stats @timeit @with_profiles def access(self, *args): username, password = self.cluster_spec.rest_credentials for target in self.target_iterator: local.run_dcptest( host=target.node, username=username, password=password, bucket=target.bucket, num_items=self.test_config.load_settings.items, num_connections=self.test_config.dcp_settings.num_connections ) def warmup(self): self.remote.stop_server() self.remote.drop_caches() return self._warmup() def _warmup(self): self.remote.start_server() for master in self.cluster_spec.masters: for bucket in self.test_config.buckets: self.monitor.monitor_warmup(self.memcached, master, bucket) def run(self): self.load() self.wait_for_persistence() self.check_num_items() self.compact_bucket() if self.test_config.dcp_settings.invoke_warm_up: self.warmup() time_elapsed = self.access() self.report_kpi(time_elapsed, int(self.test_config.java_dcp_settings.clients), self.test_config.java_dcp_settings.stream) class JavaDCPThroughputTest(DCPThroughputTest): def init_java_dcp_client(self): local.clone_git_repo(repo=self.test_config.java_dcp_settings.repo, branch=self.test_config.java_dcp_settings.branch) local.build_java_dcp_client() @with_stats @timeit @with_profiles def access(self, *args): for target in self.target_iterator: local.run_java_dcp_client( connection_string=target.connection_string, messages=self.test_config.load_settings.items, config_file=self.test_config.java_dcp_settings.config, ) def run(self): self.init_java_dcp_client() super().run() class JavaDCPCollectionThroughputTest(DCPThroughputTest): def init_java_dcp_clients(self): if self.worker_manager.is_remote: self.remote.init_java_dcp_client(repo=self.test_config.java_dcp_settings.repo, branch=self.test_config.java_dcp_settings.branch, worker_home=self.worker_manager.WORKER_HOME, commit=self.test_config.java_dcp_settings.commit) else: local.clone_git_repo(repo=self.test_config.java_dcp_settings.repo, branch=self.test_config.java_dcp_settings.branch, commit=self.test_config.java_dcp_settings.commit) local.build_java_dcp_client() @with_stats @timeit @with_profiles def access(self, *args, **kwargs): access_settings = self.test_config.access_settings access_settings.workload_instances = int(self.test_config.java_dcp_settings.clients) PerfTest.access(self, task=java_dcp_client_task, settings=access_settings) def run(self): self.init_java_dcp_clients() self.load() self.wait_for_persistence() self.check_num_items() self.compact_bucket() if self.test_config.access_settings.workers > 0: self.access_bg() time_elapsed = self.access() self.report_kpi(time_elapsed, int(self.test_config.java_dcp_settings.clients), self.test_config.java_dcp_settings.stream)
couchbase/perfrunner
perfrunner/tests/dcp.py
Python
apache-2.0
4,121
from herd.manager.server import HerdManager manager = HerdManager(address=None, port=8339, ip='127.0.0.1', config=None, stream_ip='127.0.0.1', stream_port=8338) manager.start_listener()
hoangelos/Herd
demo/misc/manager_node1.py
Python
apache-2.0
209
from __future__ import division from __future__ import print_function import numpy as np import matplotlib.pyplot as plt import os import string plt.ioff() data = np.load("attn_weights.npz") lines = map(lambda x: x.split('\t'), open("sanitycheck.txt", 'r').readlines()) save_dir = "attn_plots3" sentences = [] current_sent = [] for line in lines: if len(line) < 10: sentences.append(map(list, zip(*current_sent))) current_sent = [] else: current_sent.append(map(string.strip, (line[1], line[6], line[7], line[8], line[9]))) sentences.append(map(list, zip(*current_sent))) max_layer = 3 remove_padding = True plot = False batch_sum = 0 fig, axes = plt.subplots(nrows=2, ncols=4) # For each batch+layer for arr_name in sorted(data.files): print("Processing %s" % arr_name) batch_size = data[arr_name].shape[0] batch = int(arr_name[1]) layer = int(arr_name.split(':')[1][-1]) idx_in_batch = 0 # For each element in the batch (one layer) # if layer == max_layer and batch > 0: for b_i, arrays in enumerate(data[arr_name]): sentence_idx = batch_sum + b_i width = arrays.shape[-1] name = "sentence%d_layer%d" % (sentence_idx, layer) print("Batch: %d, sentence: %d, layer: %d" % (batch, sentence_idx, layer)) sentence = sentences[sentence_idx] words = sentence[0] pred_deps = np.array(map(int, sentence[1])) pred_labels = sentence[2] gold_deps = np.array(map(int, sentence[3])) gold_labels = sentence[4] sent_len = len(words) text = words + [] if remove_padding else (['PAD'] * (width - sent_len)) gold_deps_xy = np.array(list(enumerate(gold_deps))) pred_deps_xy = np.array(list(enumerate(pred_deps))) labels_incorrect = map(lambda x: x[0] != x[1], zip(pred_labels, gold_labels)) incorrect_indices = np.where((pred_deps != gold_deps) | labels_incorrect) pred_deps_xy_incorrect = pred_deps_xy[incorrect_indices] pred_labels_incorrect = np.array(pred_labels)[incorrect_indices] if 'prep' in pred_labels_incorrect: print(' '.join(text)) print(' '.join(pred_labels)) print(' '.join(gold_labels)) if plot: correct_dir = "correct" if len(incorrect_indices[0]) == 0 else "incorrect" fig.suptitle(name, fontsize=16) # For each attention head for arr, ax in zip(arrays, axes.flat): res = ax.imshow(arr[:sent_len, :sent_len], cmap=plt.cm.viridis, interpolation=None) ax.set_xticks(range(sent_len)) ax.set_yticks(range(sent_len)) ax.set_xticklabels(text, rotation=75, fontsize=2) ax.set_yticklabels(text, fontsize=2) map(lambda x: ax.text(x[0][1], x[0][0], x[1], ha="center", va="center", fontsize=1), zip(gold_deps_xy, gold_labels)) map(lambda x: ax.text(x[0][1], x[0][0], x[1], ha="center", va="bottom", fontsize=1, color='red'), zip(pred_deps_xy_incorrect, pred_labels_incorrect)) fig.tight_layout() fig.savefig(os.path.join(save_dir, correct_dir, name + ".pdf")) map(lambda x: x.clear(), axes.flat) if layer == max_layer: batch_sum += batch_size
strubell/Parser
bin/plot_attn.py
Python
apache-2.0
3,315
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from webob import exc from karbor.api.v1 import quotas from karbor import context from karbor.tests import base from karbor.tests.unit.api import fakes CONF = cfg.CONF class QuotaApiTest(base.TestCase): def setUp(self): super(QuotaApiTest, self).setUp() self.controller = quotas.QuotasController() self.ctxt = context.RequestContext('demo', 'fakeproject', True) @mock.patch( 'karbor.db.sqlalchemy.api.quota_update') def test_quota_update(self, mock_quota_update): quota = self._quota_in_request_body() body = {"quota": quota} req = fakes.HTTPRequest.blank( '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', use_admin_context=True) self.controller.update( req, '73f74f90a1754bd7ad658afb3272323f', body=body) self.assertTrue(mock_quota_update.called) def test_quota_update_invalid_project_id(self): quota = self._quota_in_request_body() body = {"quota": quota} req = fakes.HTTPRequest.blank( '/v1/quotas/111', use_admin_context=True) self.assertRaises(exc.HTTPBadRequest, self.controller.update, req, '111', body=body) @mock.patch( 'karbor.quota.DbQuotaDriver.get_defaults') def test_quota_defaults(self, mock_quota_get): req = fakes.HTTPRequest.blank( 'v1/quotas/73f74f90a1754bd7ad658afb3272323f', use_admin_context=True) self.controller.defaults( req, '73f74f90a1754bd7ad658afb3272323f') self.assertTrue(mock_quota_get.called) @mock.patch( 'karbor.quota.DbQuotaDriver.get_project_quotas') def test_quota_detail(self, mock_quota_get): req = fakes.HTTPRequest.blank( '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', use_admin_context=True) self.controller.detail( req, '73f74f90a1754bd7ad658afb3272323f') self.assertTrue(mock_quota_get.called) @mock.patch( 'karbor.quota.DbQuotaDriver.get_project_quotas') def test_quota_show(self, moak_quota_get): req = fakes.HTTPRequest.blank( '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', use_admin_context=True) self.controller.show( req, '73f74f90a1754bd7ad658afb3272323f') self.assertTrue(moak_quota_get.called) def test_quota_show_invalid(self): req = fakes.HTTPRequest.blank('/v1/quotas/1', use_admin_context=True) self.assertRaises( exc.HTTPBadRequest, self.controller.show, req, "1") @mock.patch( 'karbor.quota.DbQuotaDriver.destroy_all_by_project') def test_quota_delete(self, moak_restore_get): req = fakes.HTTPRequest.blank( '/v1/quotas/73f74f90a1754bd7ad658afb3272323f', use_admin_context=True) self.controller.delete( req, '73f74f90a1754bd7ad658afb3272323f') self.assertTrue(moak_restore_get.called) def test_quota_delete_invalid(self): req = fakes.HTTPRequest.blank('/v1/quotas/1', use_admin_context=True) self.assertRaises( exc.HTTPBadRequest, self.controller.delete, req, "1") def _quota_in_request_body(self): quota_req = { "plans": 20, } return quota_req
openstack/smaug
karbor/tests/unit/api/v1/test_quotas.py
Python
apache-2.0
4,032
from turbo import register import app import api register.register_group_urls('', [ ('/', app.HomeHandler), ('/plus', app.IncHandler), ('/minus', app.MinusHandler), ]) register.register_group_urls('/v1', [ ('', api.HomeHandler), ])
wecatch/app-turbo
demos/jinja2-support/apps/app/__init__.py
Python
apache-2.0
253
""" thainlp tag command line. """ import argparse from pythainlp import cli from pythainlp.tag import locations, named_entity, pos_tag class SubAppBase: def __init__(self, name, argv): parser = argparse.ArgumentParser(**cli.make_usage("tag " + name)) parser.add_argument( "text", type=str, help="input text", ) parser.add_argument( "-s", "--sep", dest="separator", type=str, help=f"Token separator for input text. default: {self.separator}", default=self.separator, ) args = parser.parse_args(argv) self.args = args tokens = args.text.split(args.separator) result = self.run(tokens) for word, tag in result: print(word, "/", tag) class POSTaggingApp(SubAppBase): def __init__(self, *args, **kwargs): self.separator = "|" self.run = pos_tag super().__init__(*args, **kwargs) class App: def __init__(self, argv): parser = argparse.ArgumentParser( prog="tag", description="Annotate a text with linguistic information", usage=( 'thainlp tag <tag_type> [--sep "<separator>"] "<text>"\n\n' "tag_type:\n\n" "pos part-of-speech\n\n" "<separator> and <text> should be inside double quotes.\n" "<text> should be a tokenized text, " "with tokens separated by <separator>.\n\n" "Example:\n\n" 'thainlp tag pos -s " " "แรงดึงดูด เก็บ หัว คุณ ลง"\n\n' "--" ), ) parser.add_argument("tag_type", type=str, help="[pos]") args = parser.parse_args(argv[2:3]) cli.exit_if_empty(args.tag_type, parser) tag_type = str.lower(args.tag_type) argv = argv[3:] if tag_type == "pos": POSTaggingApp("Part-of-Speech tagging", argv) else: print(f"Tag type not available: {tag_type}")
PyThaiNLP/pythainlp
pythainlp/cli/tag.py
Python
apache-2.0
2,123
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v9.enums", marshal="google.ads.googleads.v9", manifest={"SimulationTypeEnum",}, ) class SimulationTypeEnum(proto.Message): r"""Container for enum describing the field a simulation modifies. """ class SimulationType(proto.Enum): r"""Enum describing the field a simulation modifies.""" UNSPECIFIED = 0 UNKNOWN = 1 CPC_BID = 2 CPV_BID = 3 TARGET_CPA = 4 BID_MODIFIER = 5 TARGET_ROAS = 6 PERCENT_CPC_BID = 7 TARGET_IMPRESSION_SHARE = 8 BUDGET = 9 __all__ = tuple(sorted(__protobuf__.manifest))
googleads/google-ads-python
google/ads/googleads/v9/enums/types/simulation_type.py
Python
apache-2.0
1,302
#Please read "usefull links" before going on, they are necessary for better understanding import StringIO import json #Imports the json library that decodes json tokens recieved from telegram api import logging #Imports the library that puts messages in the log info of the google app engine import random #Library that creates random numbers import urllib import urllib2 # for sending images from PIL import Image import multipart # standard app engine imports from google.appengine.api import urlfetch from google.appengine.ext import ndb import webapp2 TOKEN = 'YOUR_BOT_TOKEN_HERE' BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/' # ================================ class EnableStatus(ndb.Model): #NDB entity called EnabledStatus # key name: str(chat_id) enabled = ndb.BooleanProperty(indexed=False, default=False) #Entity has atribute enabled # ================================ def setEnabled(chat_id, yes): es = ndb.Key(EnableStatus, str(chat_id)).get() #Gets the entity if es: #If it exists es.enabled = yes #Sets its enabled atribute es.put() return es = EnableStatus(id = str(chat_id)) #If not creates a new entity es.put() def getEnabled(chat_id): es = ndb.Key(EnableStatus, str(chat_id)).get() if es: return es.enabled #Return the atual state es = EnableStatus(id = str(chat_id)) es.put() return False # ================================ This part makes the comunication google-telegram class MeHandler(webapp2.RequestHandler): def get(self): urlfetch.set_default_fetch_deadline(60) self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe')))) class GetUpdatesHandler(webapp2.RequestHandler): def get(self): urlfetch.set_default_fetch_deadline(60) self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates')))) class SetWebhookHandler(webapp2.RequestHandler): def get(self): urlfetch.set_default_fetch_deadline(60) url = self.request.get('url') if url: self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url}))))) class WebhookHandler(webapp2.RequestHandler): def post(self): urlfetch.set_default_fetch_deadline(60) body = json.loads(self.request.body) logging.info('request body:') logging.info(body) self.response.write(json.dumps(body)) #From here you can take message information, now it only uses the chat_id and text, #you can take more things from it, search how to use json on google update_id = body['update_id'] message = body['message'] message_id = message.get('message_id') date = message.get('date') text = message.get('text') #Takes the 'text' string fr = message.get('from') chat = message['chat'] chat_id = chat['id'] #Chat id string if not text: logging.info('no text') return def reply(msg=None, img=None): #Function used to send messages, it recieves a string message or a binary image if msg: resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({ 'chat_id': str(chat_id), 'text': msg.encode('utf-8'), 'disable_web_page_preview': 'true', 'reply_to_message_id': str(message_id), })).read() elif img: resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [ ('chat_id', str(chat_id)), ('reply_to_message_id', str(message_id)), ], [ ('photo', 'image.jpg', img), ]) else: logging.error('no msg or img specified') #If there is no image it puts in the google log the string resp = None logging.info('send response:') logging.info(resp) #From here you can make custom commands, just add an 'elif' if text.startswith('/'): if text == '/start': reply('Bot enabled') setEnabled(chat_id, True) #Sets the status to True (read above comments) elif text == '/stop': reply('Bot disabled') setEnabled(chat_id, False) #Changes it to false elif text == '/image': #Creates an aleatory image img = Image.new('RGB', (512, 512)) #Size of the image base = random.randint(0, 16777216) pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image img.putdata(pixels) output = StringIO.StringIO() img.save(output, 'JPEG') reply(img=output.getvalue()) """If you want to send a different image use this piece of code: img = Image.open("image.jpg") output = StringIO.StringIO() img.save(output, 'JPEG') reply(img=output.getvalue())""" else: reply('What command?') #If it is not a command (does not start with /) elif 'who are you' in text: reply('telebot starter kit, created by yukuku: https://github.com/yukuku/telebot') elif 'what time' in text: reply('look at the top-right corner of your screen!') else: if getEnabled(chat_id): #If the status of the bot is enabled the bot answers you try: resp1 = json.load(urllib2.urlopen('http://www.simsimi.com/requestChat?lc=en&ft=1.0&req=' + urllib.quote_plus(text.encode('utf-8')))) #Sends you mesage to simsimi IA back = resp1.get('res') except urllib2.HTTPError, err: logging.error(err) back = str(err) if not back: reply('okay...') elif 'I HAVE NO RESPONSE' in back: reply('you said something with no meaning') else: reply(back) else: logging.info('not enabled for chat_id {}'.format(chat_id)) #Telegram comunication (dont change) app = webapp2.WSGIApplication([ ('/me', MeHandler), ('/updates', GetUpdatesHandler), ('/set_webhook', SetWebhookHandler), ('/webhook', WebhookHandler), ], debug=True)
0Cristofer/telebot
main.py
Python
apache-2.0
6,516
from flask import Blueprint, render_template, Response, current_app, send_from_directory from pyox import ServiceError from pyox.apps.monitor.api import get_cluster_client from datetime import datetime cluster_ui = Blueprint('cluster_ui',__name__,template_folder='templates') @cluster_ui.route('/') def index(): client = get_cluster_client() try: info = client.info(); scheduler = client.scheduler(); metrics = client.metrics(); info['startedOn'] = datetime.fromtimestamp(info['startedOn'] / 1e3).isoformat() return render_template('cluster.html',info=info,scheduler=scheduler,metrics=metrics) except ServiceError as err: return Response(status=err.status_code,response=err.message if err.status_code!=401 else 'Authentication Required',mimetype="text/plain",headers={'WWW-Authenticate': 'Basic realm="Login Required"'}) assets = Blueprint('assets_ui',__name__) @assets.route('/assets/<path:path>') def send_asset(path): dir = current_app.config.get('ASSETS') if dir is None: dir = __file__[:__file__.rfind('/')] + '/assets/' return send_from_directory(dir, path)
alexmilowski/python-hadoop-rest-api
pyox/apps/monitor/views.py
Python
apache-2.0
1,129
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import base64 import codecs import cherrypy import io import json import logging import os import shutil import signal import six import sys import unittest import uuid from six import BytesIO from six.moves import urllib from girder.utility import model_importer from girder.utility.server import setup as setupServer from girder.constants import AccessType, ROOT_DIR, SettingKey from girder.models import getDbConnection from . import mock_smtp from . import mock_s3 from . import mongo_replicaset local = cherrypy.lib.httputil.Host('127.0.0.1', 30000) remote = cherrypy.lib.httputil.Host('127.0.0.1', 30001) mockSmtp = mock_smtp.MockSmtpReceiver() mockS3Server = None enabledPlugins = [] def startServer(mock=True, mockS3=False): """ Test cases that communicate with the server should call this function in their setUpModule() function. """ server = setupServer(test=True, plugins=enabledPlugins) if mock: cherrypy.server.unsubscribe() cherrypy.engine.start() # Make server quiet (won't announce start/stop or requests) cherrypy.config.update({'environment': 'embedded'}) # Log all requests if we asked to do so if 'cherrypy' in os.environ.get('EXTRADEBUG', '').split(): cherrypy.config.update({'log.screen': True}) logHandler = logging.StreamHandler(sys.stdout) logHandler.setLevel(logging.DEBUG) cherrypy.log.error_log.addHandler(logHandler) mockSmtp.start() if mockS3: global mockS3Server mockS3Server = mock_s3.startMockS3Server() return server def stopServer(): """ Test cases that communicate with the server should call this function in their tearDownModule() function. """ cherrypy.engine.exit() mockSmtp.stop() def dropTestDatabase(dropModels=True): """ Call this to clear all contents from the test database. Also forces models to reload. """ db_connection = getDbConnection() dbName = cherrypy.config['database']['uri'].split('/')[-1] if 'girder_test_' not in dbName: raise Exception('Expected a testing database name, but got %s' % dbName) db_connection.drop_database(dbName) if dropModels: model_importer.reinitializeAll() def dropGridFSDatabase(dbName): """ Clear all contents from a gridFS database used as an assetstore. :param dbName: the name of the database to drop. """ db_connection = getDbConnection() db_connection.drop_database(dbName) def dropFsAssetstore(path): """ Delete all of the files in a filesystem assetstore. This unlinks the path, which is potentially dangerous. :param path: the path to remove. """ if os.path.isdir(path): shutil.rmtree(path) class TestCase(unittest.TestCase, model_importer.ModelImporter): """ Test case base class for the application. Adds helpful utilities for database and HTTP communication. """ def setUp(self, assetstoreType=None, dropModels=True): """ We want to start with a clean database each time, so we drop the test database before each test. We then add an assetstore so the file model can be used without 500 errors. :param assetstoreType: if 'gridfs' or 's3', use that assetstore. For any other value, use a filesystem assetstore. """ self.assetstoreType = assetstoreType dropTestDatabase(dropModels=dropModels) assetstoreName = os.environ.get('GIRDER_TEST_ASSETSTORE', 'test') assetstorePath = os.path.join( ROOT_DIR, 'tests', 'assetstore', assetstoreName) if assetstoreType == 'gridfs': # Name this as '_auto' to prevent conflict with assetstores created # within test methods gridfsDbName = 'girder_test_%s_assetstore_auto' % assetstoreName dropGridFSDatabase(gridfsDbName) self.assetstore = self.model('assetstore'). \ createGridFsAssetstore(name='Test', db=gridfsDbName) elif assetstoreType == 'gridfsrs': gridfsDbName = 'girder_test_%s_rs_assetstore_auto' % assetstoreName mongo_replicaset.startMongoReplicaSet() self.assetstore = self.model('assetstore'). \ createGridFsAssetstore( name='Test', db=gridfsDbName, mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,' '127.0.0.1:27072', replicaset='replicaset') elif assetstoreType == 's3': self.assetstore = self.model('assetstore'). \ createS3Assetstore(name='Test', bucket='bucketname', accessKeyId='test', secret='test', service=mockS3Server.service) else: dropFsAssetstore(assetstorePath) self.assetstore = self.model('assetstore'). \ createFilesystemAssetstore(name='Test', root=assetstorePath) addr = ':'.join(map(str, mockSmtp.address)) self.model('setting').set(SettingKey.SMTP_HOST, addr) self.model('setting').set(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE, 0) self.model('setting').set(SettingKey.PLUGINS_ENABLED, enabledPlugins) def tearDown(self): """ Stop any services that we started just for this test. """ # If "self.setUp" is overridden, "self.assetstoreType" may not be set if getattr(self, 'assetstoreType', None) == 'gridfsrs': mongo_replicaset.stopMongoReplicaSet() def assertStatusOk(self, response): """ Call this to assert that the response yielded a 200 OK output_status. :param response: The response object. """ self.assertStatus(response, 200) def assertStatus(self, response, code): """ Call this to assert that a given HTTP status code was returned. :param response: The response object. :param code: The status code. :type code: int or str """ code = str(code) if not response.output_status.startswith(code.encode()): msg = 'Response status was %s, not %s.' % (response.output_status, code) if hasattr(response, 'json'): msg += ' Response body was:\n%s' % json.dumps( response.json, sort_keys=True, indent=4, separators=(',', ': ')) self.fail(msg) def assertHasKeys(self, obj, keys): """ Assert that the given object has the given list of keys. :param obj: The dictionary object. :param keys: The keys it must contain. :type keys: list or tuple """ for k in keys: self.assertTrue(k in obj, 'Object does not contain key "%s"' % k) def assertRedirect(self, resp, url=None): """ Assert that we were given an HTTP redirect response, and optionally assert that you were redirected to a specific URL. :param resp: The response object. :param url: If you know the URL you expect to be redirected to, you should pass it here. :type url: str """ self.assertStatus(resp, 303) self.assertTrue('Location' in resp.headers) if url: self.assertEqual(url, resp.headers['Location']) def assertNotHasKeys(self, obj, keys): """ Assert that the given object does not have any of the given list of keys. :param obj: The dictionary object. :param keys: The keys it must not contain. :type keys: list or tuple """ for k in keys: self.assertFalse(k in obj, 'Object contains key "%s"' % k) def assertValidationError(self, response, field=None): """ Assert that a ValidationException was thrown with the given field. :param response: The response object. :param field: The field that threw the validation exception. :type field: str """ self.assertStatus(response, 400) self.assertEqual(response.json['type'], 'validation') self.assertEqual(response.json.get('field', None), field) def assertAccessDenied(self, response, level, modelName, user=None): if level == AccessType.READ: ls = 'Read' elif level == AccessType.WRITE: ls = 'Write' else: ls = 'Admin' if user is None: self.assertStatus(response, 401) else: self.assertStatus(response, 403) self.assertEqual('%s access denied for %s.' % (ls, modelName), response.json['message']) def assertMissingParameter(self, response, param): """ Assert that the response was a "parameter missing" error response. :param response: The response object. :param param: The name of the missing parameter. :type param: str """ self.assertEqual("Parameter '%s' is required." % param, response.json.get('message', '')) self.assertStatus(response, 400) def getSseMessages(self, resp): messages = self.getBody(resp).strip().split('\n\n') if not messages or messages == ['']: return () return [json.loads(m.replace('data: ', '')) for m in messages] def uploadFile(self, name, contents, user, parent, parentType='folder', mimeType=None): """ Upload a file. This is meant for small testing files, not very large files that should be sent in multiple chunks. :param name: The name of the file. :type name: str :param contents: The file contents :type contents: str :param user: The user performing the upload. :type user: dict :param parent: The parent document. :type parent: dict :param parentType: The type of the parent ("folder" or "item") :type parentType: str :param mimeType: Explicit MIME type to set on the file. :type mimeType: str :returns: The file that was created. :rtype: dict """ mimeType = mimeType or 'application/octet-stream' resp = self.request( path='/file', method='POST', user=user, params={ 'parentType': parentType, 'parentId': str(parent['_id']), 'name': name, 'size': len(contents), 'mimeType': mimeType }) self.assertStatusOk(resp) fields = [('offset', 0), ('uploadId', resp.json['_id'])] files = [('chunk', name, contents)] resp = self.multipartRequest( path='/file/chunk', user=user, fields=fields, files=files) self.assertStatusOk(resp) file = resp.json self.assertHasKeys(file, ['itemId']) self.assertEqual(file['name'], name) self.assertEqual(file['size'], len(contents)) self.assertEqual(file['mimeType'], mimeType) return self.model('file').load(file['_id'], force=True) def ensureRequiredParams(self, path='/', method='GET', required=(), user=None): """ Ensure that a set of parameters is required by the endpoint. :param path: The endpoint path to test. :param method: The HTTP method of the endpoint. :param required: The required parameter set. :type required: sequence of str """ for exclude in required: params = dict.fromkeys([p for p in required if p != exclude], '') resp = self.request(path=path, method=method, params=params, user=user) self.assertMissingParameter(resp, exclude) def _genToken(self, user): """ Helper method for creating an authentication token for the user. """ token = self.model('token').createToken(user) return str(token['_id']) def _buildHeaders(self, headers, cookie, user, token, basicAuth, authHeader): if cookie is not None: headers.append(('Cookie', cookie)) if user is not None: headers.append(('Girder-Token', self._genToken(user))) elif token is not None: if isinstance(token, dict): headers.append(('Girder-Token', token['_id'])) else: headers.append(('Girder-Token', token)) if basicAuth is not None: auth = base64.b64encode(basicAuth.encode('utf8')) headers.append((authHeader, 'Basic %s' % auth.decode())) def request(self, path='/', method='GET', params=None, user=None, prefix='/api/v1', isJson=True, basicAuth=None, body=None, type=None, exception=False, cookie=None, token=None, additionalHeaders=None, useHttps=False, authHeader='Girder-Authorization'): """ Make an HTTP request. :param path: The path part of the URI. :type path: str :param method: The HTTP method. :type method: str :param params: The HTTP parameters. :type params: dict :param prefix: The prefix to use before the path. :param isJson: Whether the response is a JSON object. :param basicAuth: A string to pass with the Authorization: Basic header of the form 'login:password' :param exception: Set this to True if a 500 is expected from this call. :param cookie: A custom cookie value to set. :param token: If you want to use an existing token to login, pass the token ID. :type token: str :param additionalHeaders: A list of headers to add to the request. Each item is a tuple of the form (header-name, header-value). :param useHttps: If True, pretend to use HTTPS. :param authHeader: The HTTP request header to use for authentication. :type authHeader: str :returns: The cherrypy response object from the request. """ if not params: params = {} headers = [('Host', '127.0.0.1'), ('Accept', 'application/json')] qs = fd = None if additionalHeaders: headers.extend(additionalHeaders) if method in ['POST', 'PUT', 'PATCH'] or body: if isinstance(body, six.string_types): body = body.encode('utf8') qs = urllib.parse.urlencode(params).encode('utf8') if type is None: headers.append(('Content-Type', 'application/x-www-form-urlencoded')) else: headers.append(('Content-Type', type)) qs = body headers.append(('Content-Length', '%d' % len(qs))) fd = BytesIO(qs) qs = None elif params: qs = urllib.parse.urlencode(params) app = cherrypy.tree.apps[''] request, response = app.get_serving( local, remote, 'http' if not useHttps else 'https', 'HTTP/1.1') request.show_tracebacks = True self._buildHeaders(headers, cookie, user, token, basicAuth, authHeader) # Python2 will not match Unicode URLs url = str(prefix + path) try: response = request.run(method, url, qs, 'HTTP/1.1', headers, fd) finally: if fd: fd.close() if isJson: body = self.getBody(response) try: response.json = json.loads(body) except Exception: print(body) raise AssertionError('Did not receive JSON response') if not exception and response.output_status.startswith(b'500'): raise AssertionError("Internal server error: %s" % self.getBody(response)) return response def getBody(self, response, text=True): """ Returns the response body as a text type or binary string. :param response: The response object from the server. :param text: If true, treat the data as a text string, otherwise, treat as binary. """ data = '' if text else b'' for chunk in response.body: if text and isinstance(chunk, six.binary_type): chunk = chunk.decode('utf8') elif not text and not isinstance(chunk, six.binary_type): chunk = chunk.encode('utf8') data += chunk return data def multipartRequest(self, fields, files, path, method='POST', user=None, prefix='/api/v1', isJson=True): """ Make an HTTP request with multipart/form-data encoding. This can be used to send files with the request. :param fields: List of (name, value) tuples. :param files: List of (name, filename, content) tuples. :param path: The path part of the URI. :type path: str :param method: The HTTP method. :type method: str :param prefix: The prefix to use before the path. :param isJson: Whether the response is a JSON object. :returns: The cherrypy response object from the request. """ contentType, body, size = MultipartFormdataEncoder().encode( fields, files) headers = [('Host', '127.0.0.1'), ('Accept', 'application/json'), ('Content-Type', contentType), ('Content-Length', str(size))] app = cherrypy.tree.apps[''] request, response = app.get_serving(local, remote, 'http', 'HTTP/1.1') request.show_tracebacks = True if user is not None: headers.append(('Girder-Token', self._genToken(user))) fd = io.BytesIO(body) # Python2 will not match Unicode URLs url = str(prefix + path) try: response = request.run(method, url, None, 'HTTP/1.1', headers, fd) finally: fd.close() if isJson: body = self.getBody(response) try: response.json = json.loads(body) except Exception: print(body) raise AssertionError('Did not receive JSON response') if response.output_status.startswith(b'500'): raise AssertionError("Internal server error: %s" % self.getBody(response)) return response class MultipartFormdataEncoder(object): """ This class is adapted from http://stackoverflow.com/a/18888633/2550451 It is used as a helper for creating multipart/form-data requests to simulate file uploads. """ def __init__(self): self.boundary = uuid.uuid4().hex self.contentType = \ 'multipart/form-data; boundary=%s' % self.boundary @classmethod def u(cls, s): if sys.hexversion < 0x03000000 and isinstance(s, str): s = s.decode('utf-8') if sys.hexversion >= 0x03000000 and isinstance(s, bytes): s = s.decode('utf-8') return s def iter(self, fields, files): encoder = codecs.getencoder('utf-8') for (key, value) in fields: key = self.u(key) yield encoder('--%s\r\n' % self.boundary) yield encoder(self.u('Content-Disposition: form-data; ' 'name="%s"\r\n') % key) yield encoder('\r\n') if isinstance(value, int) or isinstance(value, float): value = str(value) yield encoder(self.u(value)) yield encoder('\r\n') for (key, filename, content) in files: key = self.u(key) filename = self.u(filename) yield encoder('--%s\r\n' % self.boundary) yield encoder(self.u('Content-Disposition: form-data; name="%s";' ' filename="%s"\r\n' % (key, filename))) yield encoder('Content-Type: application/octet-stream\r\n') yield encoder('\r\n') yield (content, len(content)) yield encoder('\r\n') yield encoder('--%s--\r\n' % self.boundary) def encode(self, fields, files): body = io.BytesIO() size = 0 for chunk, chunkLen in self.iter(fields, files): if not isinstance(chunk, six.binary_type): chunk = chunk.encode('utf8') body.write(chunk) size += chunkLen return self.contentType, body.getvalue(), size def _sigintHandler(*args): print('Received SIGINT, shutting down mock SMTP server...') mockSmtp.stop() sys.exit(1) signal.signal(signal.SIGINT, _sigintHandler)
salamb/girder
tests/base.py
Python
apache-2.0
21,800
# -*- coding: utf-8 -*- ''' Module for listing programs that automatically run on startup (very alpha...not tested on anything but my Win 7x64) ''' # Import python libs import os # Import salt libs import salt.utils # Define a function alias in order not to shadow built-in's __func_alias__ = { 'list_': 'list' } # Define the module's virtual name __virtualname__ = 'autoruns' def __virtual__(): ''' Only works on Windows systems ''' if salt.utils.is_windows(): return __virtualname__ return False def list_(): ''' Get a list of automatically running programs CLI Example: .. code-block:: bash salt '*' autoruns.list ''' autoruns = {} # Find autoruns in registry keys = ['HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run', 'HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run /reg:64', 'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run' ] winver = __grains__['osfullname'] for key in keys: autoruns[key] = [] cmd = 'reg query ' + key print cmd for line in __salt__['cmd.run'](cmd).splitlines(): if line and line[0:4] != "HKEY" and line[0:5] != "ERROR": # Remove junk lines autoruns[key].append(line) # Find autoruns in user's startup folder if '7' in winver: user_dir = 'C:\\Users\\' startup_dir = '\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup' else: user_dir = 'C:\\Documents and Settings\\' startup_dir = '\\Start Menu\\Programs\\Startup' for user in os.listdir(user_dir): try: full_dir = user_dir + user + startup_dir files = os.listdir(full_dir) autoruns[full_dir] = [] for afile in files: autoruns[full_dir].append(afile) except Exception: pass return autoruns
victorywang80/Maintenance
saltstack/src/salt/modules/win_autoruns.py
Python
apache-2.0
1,932
# Copyright 2010 Jacob Kaplan-Moss # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Flavor interface. """ from oslo_utils import strutils from six.moves.urllib import parse from novaclient import base from novaclient import exceptions from novaclient.openstack.common.gettextutils import _ from novaclient import utils class Flavor(base.Resource): """ A flavor is an available hardware configuration for a server. """ HUMAN_ID = True def __repr__(self): return "<Flavor: %s>" % self.name @property def ephemeral(self): """ Provide a user-friendly accessor to OS-FLV-EXT-DATA:ephemeral """ return self._info.get("OS-FLV-EXT-DATA:ephemeral", 'N/A') @property def is_public(self): """ Provide a user-friendly accessor to os-flavor-access:is_public """ return self._info.get("os-flavor-access:is_public", 'N/A') def get_keys(self): """ Get extra specs from a flavor. :param flavor: The :class:`Flavor` to get extra specs from """ _resp, body = self.manager.api.client.get( "/flavors/%s/os-extra_specs" % base.getid(self)) return body["extra_specs"] def set_keys(self, metadata): """ Set extra specs on a flavor. :param flavor: The :class:`Flavor` to set extra spec on :param metadata: A dict of key/value pairs to be set """ utils.validate_flavor_metadata_keys(metadata.keys()) body = {'extra_specs': metadata} return self.manager._create( "/flavors/%s/os-extra_specs" % base.getid(self), body, "extra_specs", return_raw=True) def unset_keys(self, keys): """ Unset extra specs on a flavor. :param flavor: The :class:`Flavor` to unset extra spec on :param keys: A list of keys to be unset """ for k in keys: self.manager._delete( "/flavors/%s/os-extra_specs/%s" % (base.getid(self), k)) def delete(self): """ Delete this flavor. """ self.manager.delete(self) class FlavorManager(base.ManagerWithFind): """ Manage :class:`Flavor` resources. """ resource_class = Flavor is_alphanum_id_allowed = True def list(self, detailed=True, is_public=True): """ Get a list of all flavors. :rtype: list of :class:`Flavor`. """ qparams = {} # is_public is ternary - None means give all flavors. # By default Nova assumes True and gives admins public flavors # and flavors from their own projects only. if not is_public: qparams['is_public'] = is_public query_string = "?%s" % parse.urlencode(qparams) if qparams else "" detail = "" if detailed: detail = "/detail" return self._list("/flavors%s%s" % (detail, query_string), "flavors") def get(self, flavor): """ Get a specific flavor. :param flavor: The ID of the :class:`Flavor` to get. :rtype: :class:`Flavor` """ return self._get("/flavors/%s" % base.getid(flavor), "flavor") def delete(self, flavor): """ Delete a specific flavor. :param flavor: The ID of the :class:`Flavor` to get. """ self._delete("/flavors/%s" % base.getid(flavor)) def _build_body(self, name, ram, vcpus, disk, id, swap, ephemeral, rxtx_factor, is_public): return { "flavor": { "name": name, "ram": ram, "vcpus": vcpus, "disk": disk, "id": id, "swap": swap, "OS-FLV-EXT-DATA:ephemeral": ephemeral, "rxtx_factor": rxtx_factor, "os-flavor-access:is_public": is_public, } } def create(self, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """ Create a flavor. :param name: Descriptive name of the flavor :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :rtype: :class:`Flavor` """ try: ram = int(ram) except (TypeError, ValueError): raise exceptions.CommandError(_("Ram must be an integer.")) try: vcpus = int(vcpus) except (TypeError, ValueError): raise exceptions.CommandError(_("VCPUs must be an integer.")) try: disk = int(disk) except (TypeError, ValueError): raise exceptions.CommandError(_("Disk must be an integer.")) if flavorid == "auto": flavorid = None try: swap = int(swap) except (TypeError, ValueError): raise exceptions.CommandError(_("Swap must be an integer.")) try: ephemeral = int(ephemeral) except (TypeError, ValueError): raise exceptions.CommandError(_("Ephemeral must be an integer.")) try: rxtx_factor = float(rxtx_factor) except (TypeError, ValueError): raise exceptions.CommandError(_("rxtx_factor must be a float.")) try: is_public = strutils.bool_from_string(is_public, True) except Exception: raise exceptions.CommandError(_("is_public must be a boolean.")) body = self._build_body(name, ram, vcpus, disk, flavorid, swap, ephemeral, rxtx_factor, is_public) return self._create("/flavors", body, "flavor")
akash1808/python-novaclient
novaclient/v1_1/flavors.py
Python
apache-2.0
6,740
from flask import Flask app = Flask(__name__) @app.get("/") def index(): return "hello, world" if __name__ == "__main__": # Dev only: run "python main.py" and open http://localhost:8080 app.run(host="localhost", port=8080, debug=True)
GoogleCloudPlatform/buildpack-samples
sample-python/main.py
Python
apache-2.0
252
import json import random from datetime import datetime, timedelta import hashlib from django.http import HttpResponse, JsonResponse from django.shortcuts import render_to_response from django.template import loader from django.utils import encoding from core.grafana.GrafanaES import Grafana from core.grafana.QueryES import Query from core.grafana.data_tranformation import stacked_hist, pledges_merging from core.libs.cache import setCacheEntry, getCacheEntry from core.oauth.utils import login_customrequired from core.views import initRequest, DateTimeEncoder, DateEncoder colours_codes = { "0": "#AE3C51", "1": "#6298FF", "2": "#D97529", "3": "#009246", "AOD": "#006019", "Analysis": "#FF00FF", "CA": "#FF1F1F", "CAF processing": "#CAD141", "CERN": "#AE3C51", "Custodial": "#FF0000", "DE": "#000000", "DESD": "#4189FF", "DPD": "#FEF100", "Data Processing": "#FFFF00", "Data Processing (XP)": "#008800", "Default": "#808080", "ES": "#EDBF00", "ESD": "#001640", "Extra Production": "#FF0000", "FR": "#0055A5", "Group Analysis": "#808080", "Group Production": "#008800", "HITS": "#FF6666", "IT": "#009246", "MC Event Generation": "#356C20", "MC Production": "#0000FF", "MC Reconstruction": "#00006B", "MC Reconstruction (XP)": "#D97529", "MC Simulation": "#0000FF", "MC Simulation (XP)": "#AE3C51", "MC Simulation Fast": "#0099CC", "MC Simulation Fast (XP)": "#0099CC", "MC Simulation Full": "#00CCCC", "MC Simulation Full (XP)": "#00CCCC", "ND": "#6298FF", "NL": "#D97529", "Other": "#66008D", "Others": "#00FFFF", "Others (XP)": "#009246", "Primary": "#FFA500", "RAW": "#FF0000", "RU": "#66008D", "Rest": "#625D5D", "Secondary": "#00FFFF", "T0 processing": "#DB9900", "TW": "#89000F", "Testing": "#00FF00", "ToBeDeleted": "#FFFF00", "UK": "#356C20", "UNKNOWN": "#FFA500", "US": "#00006B", "User Analysis": "#FF00FF", "Validation": "#000000", "analysis": "#FF0000", "bstream": "#0055A5", "cancelled": "#FF9933", "closed": "#808080", "evgen": "#D97529", "evgentx": "#AE3C51", "failed": "#bf1b00", "filter": "#DB9900", "finished": "#248F24", "ganga": "#1433CC", "gangarobot": "#006666", "gangarobot-64": "#009999", "gangarobot-filestager": "#00CCCC", "gangarobot-new": "#00FFFF", "gangarobot-nightly": "#99FF00", "gangarobot-pft": "#99CC33", "gangarobot-pft-trial": "#999966", "gangarobot-rctest": "#996699", "gangarobot-root": "#CC0000", "gangarobot-squid": "#CC0066", "gangarobotnew": "#CC3399", "hammercloud": "#A5D3CA", "merge": "#FFA600", "merging": "#47D147", "non-panda_analysis": "#CCCCCC", "pandamover": "#FFE920", "pile": "#FF00FF", "prod_test": "#B4D1B6", "production": "#CAD141", "ptest": "#89C7FF", "rc_test": "#A5FF8A", "reco": "#00006B", "reprocessing": "#008800", "running": "#47D147", "simul": "#0000FF", "software": "#FFCFA4s", "t0_caf": "#CAD141", "t0_processing": "#FFA600", "test": "#00FF00", "transfering": "#47D147", "txtgen": "#29AFD6", "validation": "#000000" } @login_customrequired def index(request): """The main page containing drop-down menus to select group by options etc. Data delivers asynchroniously by request to grafana_api view""" valid, response = initRequest(request) # all possible group by options and plots to build group_by = {'dst_federation': 'Federation'} split_series = {'adcactivity': 'ADC Activity', 'jobstatus': 'Job status'} plots = {'cpuconsumption': 'CPU Consumption', 'wallclockhepspec06': 'WallClock HEPSPEC06'} data = { 'group_by': group_by, 'split_series': split_series, 'plots': plots, } response = render_to_response('grafana-api-plots.html', data, content_type='text/html') return response def chartjs(request): """The main page containing drop-down menus to select group by options etc. Data delivers asynchroniously by request to grafana_api view""" valid, response = initRequest(request) # all possible group by options and plots to build group_by = {'dst_federation': 'Federation'} split_series = {'adcactivity': 'ADC Activity', 'jobstatus': 'Job status'} plots = {'cpuconsumption': 'CPU Consumption', 'wallclockhepspec06': 'WallClock HEPSPEC06'} data = { 'group_by': group_by, 'split_series': split_series, 'plots': plots, } response = render_to_response('grafana-chartjs-plots.html', data, content_type='text/html') return response def grafana_api(request): valid, response = initRequest(request) group_by = None split_series = None if 'groupby' in request.session['requestParams']: groupby_params = request.session['requestParams']['groupby'].split(',') if 'time' in groupby_params: pass else: group_by = groupby_params[0] if len(groupby_params) > 1: split_series = groupby_params[1] result = [] q = Query() q = q.request_to_query(request) last_pledges = Query(agg_func='last', table='pledges_last', field='value', grouping='real_federation') # / api / datasources / proxy / 9267 / query?db = monit_production_rebus # sum_pledges = Query(agg_func='sum', table='pledges', field='atlas', grouping='time(1m),real_federation') try: if q.table == 'pledges_last' or q.table == 'pledges_sum' or q.table == 'pledges_hs06sec': result = Grafana(database='monit_production_rebus').get_data(q) else: result = Grafana().get_data(q) # last_pledges = Grafana().get_data(last_pledges) if 'type' in request.session['requestParams'] and request.session['requestParams']['type'] == 'd3js': data = stacked_hist(result['results'][0]['series'], group_by, split_series) return JsonResponse(data) if 'type' in request.session['requestParams'] and request.session['requestParams']['type'] == 'chartjs': last_pledges = Grafana(database='monit_production_rebus').get_data(last_pledges) data = {} data = stacked_hist(result['results'][0]['series'], group_by, split_series) last_pledges = stacked_hist(last_pledges['results'][0]['series'], 'real_federation') lables = list(data.keys()) pledges_keys = list(last_pledges.keys()) datasets = [] elements = {} for object in data: for element in data[object]: elements.setdefault(element, []).append(data[object][element]) if object in pledges_keys: elements.setdefault('pledges', []).append(last_pledges[object]['all'] * 7 * 24 * 60 * 60) else: elements.setdefault('pledges', []).append(0) background = '' for key in elements: if key in colours_codes: background = colours_codes[key] else: r = lambda: random.randint(0, 255) background = '#%02X%02X%02X' % (r(), r(), r()) if key != 'pledges': datasets.append( {'label': key, 'stack': 'Stack 0', 'data': elements[key], 'backgroundColor': background}) else: datasets.append( {'label': key, 'stack': 'Stack 1', 'data': elements[key], 'backgroundColor': '#FF0000'}) data = {'labels': lables, 'datasets': datasets} return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json') if 'export' in request.session['requestParams']: if request.session['requestParams']['export'] == 'csv': data = stacked_hist(result['results'][0]['series'], group_by, split_series) import csv import copy response = HttpResponse(content_type='text/csv') column_titles = copy.deepcopy(groupby_params) column_titles.append('value') response['Content-Disposition'] = 'attachment; filename={0}.csv'.format('_'.join(groupby_params)) writer = csv.writer(response, delimiter=";") writer.writerow(column_titles) csvList = [] if len(groupby_params) > 1: csvList = grab_children(data) else: for key, value in data.items(): csvList.append([key, value['all']]) writer.writerows(csvList) return response except Exception as ex: result.append(ex) return JsonResponse(result) def grab_children(data, parent=None, child=None): if child is None: child = [] for key, value in data.items(): if isinstance(value, dict): grab_children(value, key, child) else: child.append([parent, key, value]) return child #@login_customrequired def pledges(request): valid, response = initRequest(request) if 'date_from' in request.session['requestParams'] and 'date_to' in request.session['requestParams']: starttime = request.session['requestParams']['date_from'] endtime = request.session['requestParams']['date_to'] date_to = datetime.strptime(endtime, "%d.%m.%Y %H:%M:%S") date_from = datetime.strptime(starttime, "%d.%m.%Y %H:%M:%S") total_seconds = (date_to - date_from).total_seconds() total_days = (date_to - date_from).days date_list = [] if (date_to - date_from).days > 30: n = 20 while True: start_date = date_from end_date = (start_date + timedelta(days=n)) end_date = end_date - timedelta(minutes=1) if end_date >= date_to: end_date = date_to - timedelta(minutes=1) date_list.append([start_date.strftime("%d.%m.%Y %H:%M:%S"), end_date.strftime("%d.%m.%Y %H:%M:%S")]) break else: date_list.append([start_date.strftime("%d.%m.%Y %H:%M:%S"), end_date.strftime("%d.%m.%Y %H:%M:%S")]) date_from = end_date + timedelta(minutes=1) else: newendtime = (date_to - timedelta(minutes=1)).strftime("%d.%m.%Y %H:%M:%S") date_list.append([starttime, newendtime]) else: timebefore = timedelta(days=7) endtime = (datetime.utcnow()).replace(minute=00, hour=00, second=00, microsecond=000) starttime = (endtime - timebefore).replace(minute=00, hour=00, second=00, microsecond=000) total_seconds = (starttime - endtime).total_seconds() total_days = (endtime - starttime).days endtime = endtime - timedelta(minutes=1) endtime = endtime.strftime("%d.%m.%Y %H:%M:%S") starttime = starttime.strftime("%d.%m.%Y %H:%M:%S") if 'type' in request.session['requestParams'] and request.session['requestParams'] \ ['type'] == 'federation': key = hashlib.md5(encoding.force_bytes("{0}_{1}_federation".format(starttime, endtime))) key = key.hexdigest() federations = getCacheEntry(request, key, isData=True) if federations is not None: federations = json.loads(federations) return HttpResponse(json.dumps(federations), content_type='text/json') pledges_dict = {} pledges_list = [] federations_info = {} if len(date_list) > 1: for date in date_list: hs06sec = Query(agg_func='sum', table='completed', field=['sum_hs06sec','sum_count', 'sum_cpuconsumptiontime','sum_walltime'], grouping='time,dst_federation,dst_tier,dst_experiment_site,computingsite', starttime=date[0], endtime=date[1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,tier', starttime=date[0], endtime=date[1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict, federations_info = pledges_merging(hs06sec, pledges_sum, total_seconds, pledges_dict, federations_info) else: hs06sec = Query(agg_func='sum', table='completed', field=['sum_hs06sec','sum_count', 'sum_cpuconsumptiontime','sum_walltime'], grouping='time,dst_federation,dst_tier,dst_experiment_site,computingsite', starttime=date_list[0][0], endtime=date_list[0][1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,tier', starttime=date_list[0][0], endtime=date_list[0][1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict, federations_info = pledges_merging(hs06sec, pledges_sum, total_seconds, pledges_dict, federations_info) for pledges in pledges_dict: if pledges == 'NULL': continue else: # pledges_list.append( # {type: pledges, "hs06sec": pledges_dict[pledges]['hs06sec'], # 'pledges': pledges_dict[pledges]['pledges']}) pledges_list.append({"dst_federation": pledges, "hs06sec": int(round(float(pledges_dict[pledges]['hs06sec']) / 86400, 2)), 'pledges': int(round(float(pledges_dict[pledges]['pledges']) / 86400, 2)), 'tier': pledges_dict[pledges]['tier'], 'federation_info': federations_info[pledges] if pledges in federations_info else None} ) setCacheEntry(request, key, json.dumps(pledges_list), 60 * 60 * 24 * 30, isData=True) return HttpResponse(json.dumps(pledges_list), content_type='text/json') elif 'type' in request.session['requestParams'] and request.session['requestParams'] \ ['type'] == 'country': key = hashlib.md5(encoding.force_bytes("{0}_{1}_country".format(starttime, endtime))) key = key.hexdigest() countries = getCacheEntry(request, key, isData=True) if countries is not None: countries = json.loads(countries) return HttpResponse(json.dumps(countries), content_type='text/json') federations_info = {} pledges_dict = {} pledges_list = [] if len(date_list) > 1: for date in date_list: hs06sec = Query(agg_func='sum', table='completed', field='sum_hs06sec', grouping='time,dst_federation,dst_country', starttime=date[0], endtime=date[1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,country', starttime=date[0], endtime=date[1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict = pledges_merging(hs06sec, pledges_sum, total_seconds, pledges_dict, federations_info, type='dst_country') else: hs06sec = Query(agg_func='sum', table='completed', field='sum_hs06sec', grouping='time,dst_federation,dst_country', starttime=date_list[0][0], endtime=date_list[0][1]) hs06sec = Grafana().get_data(hs06sec) pledges_sum = Query(agg_func='mean', table='pledges_hs06sec', field='value', grouping='time,real_federation,country', starttime=date_list[0][0], endtime=date_list[0][1]) pledges_sum = Grafana(database='monit_production_rebus').get_data(pledges_sum) pledges_dict = pledges_merging(hs06sec, pledges_sum, total_seconds, federations_info, pledges_dict, type='dst_country') for pledges in pledges_dict: if pledges == 'NULL': continue else: pledges_list.append( {"dst_country": pledges, "hs06sec": int(round(float(pledges_dict[pledges]['hs06sec']) / 86400, 2)), 'pledges': int(round(float(pledges_dict[pledges]['pledges']) / 86400, 2))}) setCacheEntry(request, key, json.dumps(pledges_list), 60 * 60 * 24 * 30, isData=True) return HttpResponse(json.dumps(pledges_list), content_type='text/json') else: data = getCacheEntry(request, "pledges") # data = None if data is not None: data = json.loads(data) t = loader.get_template('grafana-pledges.html') return HttpResponse(t.render(data, request), content_type='text/html') else: key_fed = hashlib.md5(encoding.force_bytes("{0}_{1}_federation".format(starttime, endtime))) key_country = hashlib.md5(encoding.force_bytes("{0}_{1}_country".format(starttime, endtime))) key_fed = key_fed.hexdigest() key_country = key_country.hexdigest() setCacheEntry(request, key_fed, None, 60, isData=True) setCacheEntry(request, key_country, None, 60, isData=True) t = loader.get_template('grafana-pledges.html') data = { 'request': request, 'date_from': starttime, 'date_to': endtime, 'days': total_days, 'info': "This page was cached: {0}".format(str(datetime.utcnow())) } setCacheEntry(request, "pledges", json.dumps(data, cls=DateEncoder), 60 * 60 * 24 * 30) return HttpResponse(t.render({"date_from": starttime, "date_to": endtime, "days": total_days}, request), content_type='text/html') def grafana_api_es(request): valid, response = initRequest(request) group_by = None split_series = None if 'groupby' in request.session['requestParams']: groupby_params = request.session['requestParams']['groupby'].split(',') if 'time' in groupby_params: pass else: group_by = groupby_params[0] if len(groupby_params) > 1: split_series = groupby_params[1] else: split_series = group_by result = [] q = Query() q = q.request_to_query(request) result = Grafana().get_data(q) return JsonResponse(result)
PanDAWMS/panda-bigmon-core
core/grafana/views.py
Python
apache-2.0
19,477
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for trajectory.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.drivers import dynamic_episode_driver from tf_agents.drivers import test_utils as drivers_test_utils from tf_agents.environments import tf_py_environment from tf_agents.trajectories import time_step as ts from tf_agents.trajectories import trajectory from tf_agents.utils import test_utils class TrajectoryTest(test_utils.TestCase): def testFirstTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.first(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.MID] * 3) def testFirstArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.first(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.MID] * 3) def testMidTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.mid(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.MID] * 3) def testMidArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.mid(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.MID] * 3) def testLastTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.last(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.LAST] * 3) def testLastArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.last(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.MID] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.LAST] * 3) def testSingleStepTensors(self): observation = () action = () policy_info = () reward = tf.constant([1.0, 1.0, 2.0]) discount = tf.constant([1.0, 1.0, 1.0]) traj = trajectory.single_step(observation, action, policy_info, reward, discount) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val = self.evaluate(traj) self.assertAllEqual(traj_val.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj_val.next_step_type, [ts.StepType.LAST] * 3) def testSingleStepArrays(self): observation = () action = () policy_info = () reward = np.array([1.0, 1.0, 2.0]) discount = np.array([1.0, 1.0, 1.0]) traj = trajectory.single_step(observation, action, policy_info, reward, discount) self.assertFalse(tf.is_tensor(traj.step_type)) self.assertAllEqual(traj.step_type, [ts.StepType.FIRST] * 3) self.assertAllEqual(traj.next_step_type, [ts.StepType.LAST] * 3) def testFromEpisodeTensor(self): observation = tf.random.uniform((4, 5)) action = () policy_info = () reward = tf.random.uniform((4,)) traj = trajectory.from_episode( observation, action, policy_info, reward, discount=None) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val, obs_val, reward_val = self.evaluate((traj, observation, reward)) first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST self.assertAllEqual( traj_val.step_type, [first, mid, mid, mid]) self.assertAllEqual( traj_val.next_step_type, [mid, mid, mid, last]) self.assertAllClose(traj_val.observation, obs_val) self.assertAllEqual(traj_val.reward, reward_val) self.assertAllEqual(traj_val.discount, [1.0, 1.0, 1.0, 1.0]) def testFromEpisodeWithCompositeTensorOfTensors(self): observation = tf.SparseTensor( indices=tf.random.uniform((7, 2), maxval=9, dtype=tf.int64), values=tf.random.uniform((7,)), dense_shape=[4, 10]) # The 4 is important, it must match reward length. action = () policy_info = () reward = tf.random.uniform((4,)) traj = trajectory.from_episode( observation, action, policy_info, reward, discount=None) self.assertTrue(tf.is_tensor(traj.step_type)) traj_val, obs_val, reward_val = self.evaluate((traj, observation, reward)) first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST self.assertAllEqual( traj_val.step_type, [first, mid, mid, mid]) self.assertAllEqual( traj_val.next_step_type, [mid, mid, mid, last]) self.assertAllClose(traj_val.observation, obs_val) self.assertAllEqual(traj_val.reward, reward_val) self.assertAllEqual(traj_val.discount, [1.0, 1.0, 1.0, 1.0]) def testFromEpisodeArray(self): observation = np.random.rand(4, 5) action = () policy_info = () reward = np.random.rand(4) traj = trajectory.from_episode( observation, action, policy_info, reward, discount=None) self.assertFalse(tf.is_tensor(traj.step_type)) first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST self.assertAllEqual( traj.step_type, [first, mid, mid, mid]) self.assertAllEqual( traj.next_step_type, [mid, mid, mid, last]) self.assertAllEqual(traj.observation, observation) self.assertAllEqual(traj.reward, reward) self.assertAllEqual(traj.discount, [1.0, 1.0, 1.0, 1.0]) def testToTransition(self): first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST # Define a batch size 1, 3-step trajectory. traj = trajectory.Trajectory( step_type=np.array([[first, mid, last]]), next_step_type=np.array([[mid, last, first]]), observation=np.array([[10.0, 20.0, 30.0]]), action=np.array([[11.0, 22.0, 33.0]]), # reward at step 2 is an invalid dummy reward. reward=np.array([[0.0, 1.0, 2.0]]), discount=np.array([[1.0, 1.0, 0.0]]), policy_info=np.array([[1.0, 2.0, 3.0]])) transition = trajectory.to_transition(traj) self.assertIsInstance(transition, trajectory.Transition) time_steps, policy_steps, next_time_steps = transition self.assertAllEqual(time_steps.step_type, np.array([[first, mid]])) self.assertAllEqual(time_steps.observation, np.array([[10.0, 20.0]])) # reward and discount are filled with zero (dummy) values self.assertAllEqual(time_steps.reward, np.array([[0.0, 0.0]])) self.assertAllEqual(time_steps.discount, np.array([[0.0, 0.0]])) self.assertAllEqual(next_time_steps.step_type, np.array([[mid, last]])) self.assertAllEqual(next_time_steps.observation, np.array([[20.0, 30.0]])) self.assertAllEqual(next_time_steps.reward, np.array([[0.0, 1.0]])) self.assertAllEqual(next_time_steps.discount, np.array([[1.0, 1.0]])) self.assertAllEqual(policy_steps.action, np.array([[11.0, 22.0]])) self.assertAllEqual(policy_steps.info, np.array([[1.0, 2.0]])) def testToNStepTransitionForNEquals1(self): first = ts.StepType.FIRST last = ts.StepType.LAST # Define a batch size 1, 2-step trajectory. traj = trajectory.Trajectory( step_type=np.array([[first, last]]), next_step_type=np.array([[last, first]]), observation=np.array([[10.0, 20.0]]), action=np.array([[11.0, 22.0]]), # reward & discount values at step 1 is an invalid dummy reward. reward=np.array([[-1.0, 0.0]]), discount=np.array([[0.9, 0.0]]), policy_info=np.array([[10.0, 20.0]])) transition = trajectory.to_n_step_transition(traj, gamma=0.5) self.assertIsInstance(transition, trajectory.Transition) time_steps, policy_steps, next_time_steps = transition self.assertAllEqual(time_steps.step_type, np.array([first])) self.assertAllEqual(time_steps.observation, np.array([10.0])) self.assertAllEqual(time_steps.reward, np.array([np.nan])) self.assertAllEqual(time_steps.discount, np.array([np.nan])) self.assertAllEqual(next_time_steps.step_type, np.array([last])) self.assertAllEqual(next_time_steps.observation, np.array([20.0])) # r0 self.assertAllEqual(next_time_steps.reward, np.array([-1.0])) # d0 self.assertAllEqual(next_time_steps.discount, np.array([0.9])) self.assertAllEqual(policy_steps.action, np.array([11.0])) self.assertAllEqual(policy_steps.info, np.array([10.0])) def testToNStepTransition(self): first = ts.StepType.FIRST mid = ts.StepType.MID last = ts.StepType.LAST gamma = 0.5 # Define a batch size 1, 4-step trajectory. traj = trajectory.Trajectory( step_type=np.array([[first, mid, mid, last]]), next_step_type=np.array([[mid, mid, last, first]]), observation=np.array([[10.0, 20.0, 30.0, 40.0]]), action=np.array([[11.0, 22.0, 33.0, 44.0]]), # reward & discount values at step 3 is an invalid dummy reward. reward=np.array([[-1.0, 1.0, 2.0, 0.0]]), discount=np.array([[0.9, 0.95, 1.0, 0.0]]), policy_info=np.array([[10.0, 20.0, 30.0, 40.0]])) transition = trajectory.to_n_step_transition(traj, gamma=gamma) self.assertIsInstance(transition, trajectory.Transition) time_steps, policy_steps, next_time_steps = transition self.assertAllEqual(time_steps.step_type, np.array([first])) self.assertAllEqual(time_steps.observation, np.array([10.0])) self.assertAllEqual(time_steps.reward, np.array([np.nan])) self.assertAllEqual(time_steps.discount, np.array([np.nan])) self.assertAllEqual(next_time_steps.step_type, np.array([last])) self.assertAllEqual(next_time_steps.observation, np.array([40.0])) # r0 + r1 * g * d0 + r2 * g * d0 * d1 # == -1.0 + 1.0*0.5*(0.9) + 2.0*(0.5**2)*(0.9*0.95) self.assertAllEqual( next_time_steps.reward, np.array([-1.0 + 1.0 * gamma * 0.9 + 2.0 * gamma**2 * 0.9 * 0.95])) # gamma**2 * (d0 * d1 * d2) self.assertAllEqual( next_time_steps.discount, np.array([gamma**2 * (0.9 * 0.95 * 1.0)])) self.assertAllEqual(policy_steps.action, np.array([11.0])) self.assertAllEqual(policy_steps.info, np.array([10.0])) def testToTransitionHandlesTrajectoryFromDriverCorrectly(self): env = tf_py_environment.TFPyEnvironment( drivers_test_utils.PyEnvironmentMock()) policy = drivers_test_utils.TFPolicyMock( env.time_step_spec(), env.action_spec()) replay_buffer = drivers_test_utils.make_replay_buffer(policy) driver = dynamic_episode_driver.DynamicEpisodeDriver( env, policy, num_episodes=3, observers=[replay_buffer.add_batch]) run_driver = driver.run() rb_gather_all = replay_buffer.gather_all() self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(run_driver) trajectories = self.evaluate(rb_gather_all) transitions = trajectory.to_transition(trajectories) self.assertIsInstance(transitions, trajectory.Transition) time_steps, policy_step, next_time_steps = transitions self.assertAllEqual(time_steps.observation, trajectories.observation[:, :-1]) self.assertAllEqual(time_steps.step_type, trajectories.step_type[:, :-1]) self.assertAllEqual(next_time_steps.observation, trajectories.observation[:, 1:]) self.assertAllEqual(next_time_steps.step_type, trajectories.step_type[:, 1:]) self.assertAllEqual(next_time_steps.reward, trajectories.reward[:, :-1]) self.assertAllEqual(next_time_steps.discount, trajectories.discount[:, :-1]) self.assertAllEqual(policy_step.action, trajectories.action[:, :-1]) self.assertAllEqual(policy_step.info, trajectories.policy_info[:, :-1]) def testToTransitionSpec(self): env = tf_py_environment.TFPyEnvironment( drivers_test_utils.PyEnvironmentMock()) policy = drivers_test_utils.TFPolicyMock( env.time_step_spec(), env.action_spec()) trajectory_spec = policy.trajectory_spec transition_spec = trajectory.to_transition_spec(trajectory_spec) self.assertIsInstance(transition_spec, trajectory.Transition) ts_spec, ps_spec, nts_spec = transition_spec self.assertAllEqual(ts_spec, env.time_step_spec()) self.assertAllEqual(ps_spec.action, env.action_spec()) self.assertAllEqual(nts_spec, env.time_step_spec()) if __name__ == '__main__': tf.test.main()
tensorflow/agents
tf_agents/trajectories/trajectory_test.py
Python
apache-2.0
14,383
class yamMessageHandlerBase(object): """ Base class for message handlers for a :class:`ZMQProcess`. Inheriting classes only need to implement a handler function for each message type. It must assign the protobuf Message class to self.cls and create a mapping of message types to handler functions """ def __init__(self, rep_stream, stop): self._rep_stream = rep_stream self._stop = stop self.cls = None self.funcMap = {} self.subMessageHandler = False pass def __call__(self, msg): """ Gets called when a messages is received by the stream this handlers is registered at. *msg* is a list as return by :meth:`zmq.core.socket.Socket.recv_multipart`. """ if self.subMessageHandler: yamMessage = msg else: yamMessage = self.cls() fullMsg = "".join(msg) yamMessage.ParseFromString(fullMsg) handlerFunc = self.funcMap[yamMessage.type] responseMessage = handlerFunc(yamMessage) return responseMessage
dpquigl/YAM
src/pyyam/yam/handlers/yamMessageHandlerBase.py
Python
apache-2.0
1,148
"""Base configuration implementation.""" # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. # Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd. # Copyright (C) 2015--2018 The Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class InvalidConfigError(Exception): """This error is thrown when the config file is not valid.""" pass def test_config_condition(cond, msg): if cond: raise InvalidConfigError(msg) class Conf(object): """Base class for FAUCET configuration.""" defaults = {} # type: dict defaults_types = {} # type: dict dyn_finalized = False dyn_hash = None def __init__(self, _id, dp_id, conf=None): self._id = _id self.dp_id = dp_id if conf is None: conf = {} # TODO: handle conf as a sequence. # pylint: disable=fixme if isinstance(conf, dict): self.update(conf) self.set_defaults() self.check_config() def set_defaults(self): """Set default values and run any basic sanity checks.""" for key, value in list(self.defaults.items()): self._set_default(key, value) def _check_unknown_conf(self, conf): """Check that supplied conf dict doesn't specify keys not defined.""" sub_conf_names = set(conf.keys()) unknown_conf_names = sub_conf_names - set(self.defaults.keys()) test_config_condition(unknown_conf_names, '%s fields unknown in %s' % ( unknown_conf_names, self._id)) def _check_conf_types(self, conf, conf_types): """Check that conf value is of the correct type.""" for conf_key, conf_value in list(conf.items()): test_config_condition(conf_key not in conf_types, '%s field unknown in %s (known types %s)' % ( conf_key, self._id, conf_types)) if conf_value is not None: conf_type = conf_types[conf_key] test_config_condition(not isinstance(conf_value, conf_type), '%s value %s must be %s not %s' % ( conf_key, conf_value, conf_type, type(conf_value))) # pytype: disable=invalid-typevar @staticmethod def _set_unknown_conf(conf, conf_types): for conf_key, conf_type in list(conf_types.items()): if conf_key not in conf: if conf_type == list: conf[conf_key] = [] else: conf[conf_key] = None return conf def update(self, conf): """Parse supplied YAML config and sanity check.""" self.__dict__.update(conf) self._check_unknown_conf(conf) self._check_conf_types(conf, self.defaults_types) def check_config(self): """Check config at instantiation time for errors, typically via assert.""" return @staticmethod def _conf_keys(conf, dyn=False, subconf=True, ignore_keys=None): """Return a list of key/values of attributes with dyn/Conf attributes/filtered.""" conf_keys = [] for key, value in list(conf.__dict__.items()): if not dyn and key.startswith('dyn'): continue if not subconf and isinstance(value, Conf): continue if ignore_keys and key in ignore_keys: continue conf_keys.append((key, value)) return conf_keys def merge_dyn(self, other_conf): """Merge dynamic state from other conf object.""" for key, value in self._conf_keys(other_conf, dyn=True): self.__dict__[key] = value def _set_default(self, key, value): if key not in self.__dict__ or self.__dict__[key] is None: self.__dict__[key] = value def to_conf(self): """Return configuration as a dict.""" result = {} for key in self.defaults: if key != 'name': result[key] = self.__dict__[str(key)] return result def conf_hash(self, dyn=False, subconf=True, ignore_keys=None): """Return hash of keys configurably filtering attributes.""" return hash(frozenset(list(map( str, self._conf_keys(self, dyn=dyn, subconf=subconf, ignore_keys=ignore_keys))))) def __hash__(self): if self.dyn_hash is not None: return self.dyn_hash dyn_hash = self.conf_hash(dyn=False, subconf=True) if self.dyn_finalized: self.dyn_hash = dyn_hash return dyn_hash def finalize(self): """Configuration parsing marked complete.""" self.dyn_finalized = True def ignore_subconf(self, other, ignore_keys=None): """Return True if this config same as other, ignoring sub config.""" return (self.conf_hash(dyn=False, subconf=False, ignore_keys=ignore_keys) == other.conf_hash(dyn=False, subconf=False, ignore_keys=ignore_keys)) def __eq__(self, other): return self.__hash__() == other.__hash__() def __ne__(self, other): return not self.__eq__(other)
wackerly/faucet
faucet/conf.py
Python
apache-2.0
5,573
#!/usr/bin/env python # import libs import unittest import sys import os import random import uuid # import classes import analytics.utils.misc as misc import analytics.exceptions.exceptions as ex import analytics.service as service from analytics.datamanager.datamanager import DataManager class IntegrationTestSequence(unittest.TestCase): def setUp(self): filepath = os.path.dirname(os.path.realpath(__file__)) self.integrationpath = os.path.join(filepath, "datasets") self.datamanager = DataManager() self.datamanager.loadDatasets(self.integrationpath) self.datasets = self.datamanager._datasets def test_service_default(self): query = "" datasetId = random.choice(self.datasets.keys()) result = service.requestData(datasetId, query, self.datamanager) self.assertEqual(result["status"], "success") self.assertEqual(result["code"], 200) def test_service_wrongquery(self): query = uuid.uuid4().hex datasetId = random.choice(self.datasets.keys()) result = service.requestData(datasetId, query, self.datamanager) self.assertEqual(result["status"], "error") self.assertEqual(result["code"], 400) def test_service_simpleQuery(self): query = """select from ${pulses} where @1b4cf15c86ec31cd8838feab0f9856b1 |is| static and @1b4cf15c86ec31cd8838feab0f9856b1 = 2 and @b6db26b3972932b2862dac41cbb1493d = [up]""" datasetId = random.choice(self.datasets.keys()) result = service.requestData(datasetId, query, self.datamanager) self.assertEqual(result["status"], "success") self.assertEqual(result["code"], 200) def test_service_selectCluster(self): query = """select from ${clusters} where @id = [bc27b4dbbc0f34f9ae8e4b72f2d51b60]""" datasetId = random.choice(self.datasets.keys()) result = service.requestData(datasetId, query, self.datamanager) self.assertEqual(result["status"], "success") self.assertEqual(result["code"], 200) def service_warnings(self, warn=True): query = """select from ${pulses} where @f4b9ea9d3bf239f5a1c80578b0556a5e |is| dynamic""" datasetId = random.choice(self.datasets.keys()) result = service.requestData( datasetId, query, self.datamanager, iswarnings=warn ) # result should not fail and should generate warnings return result def test_service_warnings_on(self): # warnings are on by default result = self.service_warnings() self.assertEqual(result["status"], "success") self.assertEqual(result["code"], 200) self.assertEqual(len(result["messages"]), 1) def test_service_warnings_off(self): # warning is expected, but we turn it off result = self.service_warnings(False) self.assertEqual(result["status"], "success") self.assertEqual(result["code"], 200) self.assertEqual(len(result["messages"]), 0) # Load test suites def _suites(): return [ IntegrationTestSequence ] # Load tests def loadSuites(): # global test suite for this module gsuite = unittest.TestSuite() for suite in _suites(): gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite)) return gsuite if __name__ == '__main__': suite = loadSuites() print "" print "### Running tests ###" print "-" * 70 unittest.TextTestRunner(verbosity=2).run(suite)
sadikovi/pulsar
analytics/tests/integrationtest_service.py
Python
apache-2.0
3,628
import requests class Client(object): def __init__(self, tornado_server): self.tornado_server = tornado_server @property def base_url(self): return "http://localhost:{}/api/v1".format(self.tornado_server.port) def request(self, method, url, **kwargs): headers = {} if method.lower() in ("put", "post"): headers["Content-type"] = "application/json" return requests.request( method, self.base_url + url, headers=headers, **kwargs ) def get(self, url, **kwargs): return self.request("GET", url, **kwargs) def post(self, url, **kwargs): return self.request("POST", url, **kwargs) def put(self, url, **kwargs): return self.request("PUT", url, **kwargs) def delete(self, url, **kwargs): return self.request("DELETE", url, **kwargs) def create(self, url, **kwargs): return self.post(url, data=json.dumps(kwargs)) def update(self, url, **kwargs): return self.put(url, data=json.dumps(kwargs))
dropbox/notouch
tests/api_tests/util.py
Python
apache-2.0
1,077
from django.conf.urls import patterns, url urlpatterns = patterns('accounts.views', url(r'^$', 'home_view', name='home'), url(r'^login/$', 'login_view', name='login'), url(r'^logout/$', 'logout_view', name='logout'), url(r'^register/$', 'register_view', name='register'), url(r'^password/$', 'password_view', name='password'), url(r'^profile/$', 'profile_view', name='profile'), url(r'^hello/$', 'hello_view', name='hello'), )
goncha/django-accounts
urls.py
Python
apache-2.0
449
# pylint: skip-file # flake8: noqa # pylint: disable=too-many-lines # noqa: E301,E302,E303,T001 class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) self.all_namespaces = all_namespaces self.oc_binary = locate_oc_binary() # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, res['results'][0], separator=sep) changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''replace the current object with oc replace''' # We are removing the 'resourceVersion' to handle # a race condition when modifying oc objects yed = Yedit(fname) results = yed.delete('metadata.resourceVersion') if results[0]: yed.write() cmd = ['replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''create a temporary file and then call oc create on it''' fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501 '''process a template template_name: the name of the template to process create: whether to send to oc create after processing params: the parameters for the template template_data: the incoming template's data; instead of a file ''' cmd = ['process'] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = Utils.create_tmpfile(template_name + '-') yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['create', '-f', fname]) def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) def _run(self, cmds, input_data): ''' Actually executes the command. This makes mocking easier. ''' curr_env = os.environ.copy() curr_env.update({'KUBECONFIG': self.kubeconfig}) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=curr_env) stdout, stderr = proc.communicate(input_data) return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, "cmd": ' '.join(cmds)} if output_type == 'json': rval['results'] = {} if output and stdout: try: rval['results'] = json.loads(stdout) except ValueError as verr: if "No JSON object could be decoded" in verr.args: rval['err'] = verr.args elif output_type == 'raw': rval['results'] = stdout if output else '' if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, "stdout": stdout}) return rval class Utils(object): ''' utilities for openshiftcli modules ''' @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' tmp = Utils.create_tmpfile(prefix=rname) if ftype == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripDumper'): Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) else: Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) elif ftype == 'json': Utils._write(tmp, json.dumps(data)) else: Utils._write(tmp, data) # Register cleanup when module is done atexit.register(Utils.cleanup, [tmp]) return tmp @staticmethod def create_tmpfile_copy(inc_file): '''create a temporary copy of a file''' tmpfile = Utils.create_tmpfile('lib_openshift-') Utils._write(tmpfile, open(inc_file).read()) # Cleanup the tmpfile atexit.register(Utils.cleanup, [tmpfile]) return tmpfile @staticmethod def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name @staticmethod def create_tmp_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_tmp_file_from_contents(item['path'] + '-', item['data'], ftype=content_type) files.append({'name': os.path.basename(item['path']), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if 'metadata' in result and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripLoader'): contents = yaml.load(contents, yaml.RoundTripLoader) else: contents = yaml.safe_load(contents) elif sfile_type == 'json': contents = json.loads(contents) return contents @staticmethod def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict @staticmethod def add_custom_versions(versions): ''' create custom versions strings ''' versions_dict = {} for tech, version in versions.items(): # clean up "-" from version if "-" in version: version = version.split("-")[0] if version.startswith('v'): versions_dict[tech + '_numeric'] = version[1:].split('+')[0] # "v3.3.0.33" is what we have, we want "3.3" versions_dict[tech + '_short'] = version[1:4] return versions_dict @staticmethod def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0 # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if key not in user_def: if debug: print('User data does not have key [%s]' % key) print('User data: %s' % user_def) return False if not isinstance(user_def[key], list): if debug: print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) return False if len(user_def[key]) != len(value): if debug: print("List lengths are not equal.") print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) print("user_def: %s" % user_def[key]) print("value: %s" % value) return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print('sending list - list') print(type(values[0])) print(type(values[1])) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print('list compare returned false') return False elif value != user_def[key]: if debug: print('value should be identical') print(user_def[key]) print(value) return False # recurse on a dictionary elif isinstance(value, dict): if key not in user_def: if debug: print("user_def does not have key [%s]" % key) return False if not isinstance(user_def[key], dict): if debug: print("dict returned false: not instance of dict") return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print("keys are not equal in dict") print(user_values) print(api_values) return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print("dict returned false") print(result) return False # Verify each key, value pair is the same else: if key not in user_def or value != user_def[key]: if debug: print("value not equal; user_def does not have key") print(key) print(value) if key in user_def: print(user_def[key]) return False if debug: print('returning true') return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self, ascommalist=''): '''return all options as a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs''' return self.stringify(ascommalist) def stringify(self, ascommalist=''): ''' return the options hash as cli params in a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs ''' rval = [] for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ and (data['value'] is not None or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: val = data['value'] rval.append('--{}={}'.format(key.replace('_', '-'), val)) return rval
akubicharm/openshift-ansible
roles/lib_openshift/src/lib/base.py
Python
apache-2.0
21,165
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet eventlet.monkey_patch() import uuid import mock from oslo.config import cfg from mistral.tests import base from mistral.openstack.common import log as logging from mistral.openstack.common import importutils from mistral.engine import states from mistral.db import api as db_api from mistral.actions import std_actions from mistral import engine from mistral.engine import executor # We need to make sure that all configuration properties are registered. importutils.import_module("mistral.config") LOG = logging.getLogger(__name__) # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') WORKBOOK_NAME = 'my_workbook' TASK_NAME = 'create-vms' SAMPLE_WORKBOOK = { 'id': str(uuid.uuid4()), 'name': WORKBOOK_NAME, 'description': 'my description', 'definition': base.get_resource("test_rest.yaml"), 'tags': [], 'scope': 'public', 'updated_at': None, 'project_id': '123', 'trust_id': '1234' } SAMPLE_EXECUTION = { 'id': str(uuid.uuid4()), 'workbook_name': WORKBOOK_NAME, 'task': TASK_NAME, 'state': states.RUNNING, 'updated_at': None, 'context': None } SAMPLE_TASK = { 'name': TASK_NAME, 'workbook_name': WORKBOOK_NAME, 'action_spec': { 'name': 'my-action', 'class': 'std.http', 'base-parameters': { 'url': 'http://localhost:8989/v1/workbooks', 'method': 'GET' }, 'namespace': 'MyRest' }, 'task_spec': { 'action': 'MyRest.my-action', 'name': TASK_NAME}, 'requires': {}, 'state': states.IDLE} SAMPLE_CONTEXT = { 'user': 'admin', 'tenant': 'mistral' } class TestExecutor(base.DbTestCase): def __init__(self, *args, **kwargs): super(TestExecutor, self).__init__(*args, **kwargs) self.transport = base.get_fake_transport() @mock.patch.object( executor.ExecutorClient, 'handle_task', mock.MagicMock(side_effect=base.EngineTestCase.mock_handle_task)) @mock.patch.object( std_actions.HTTPAction, 'run', mock.MagicMock(return_value={})) @mock.patch.object( engine.EngineClient, 'convey_task_result', mock.MagicMock(side_effect=base.EngineTestCase.mock_task_result)) def test_handle_task(self): # Create a new workbook. workbook = db_api.workbook_create(SAMPLE_WORKBOOK) self.assertIsInstance(workbook, dict) # Create a new execution. execution = db_api.execution_create(SAMPLE_EXECUTION['workbook_name'], SAMPLE_EXECUTION) self.assertIsInstance(execution, dict) # Create a new task. SAMPLE_TASK['execution_id'] = execution['id'] task = db_api.task_create(SAMPLE_TASK['workbook_name'], SAMPLE_TASK['execution_id'], SAMPLE_TASK) self.assertIsInstance(task, dict) self.assertIn('id', task) # Send the task request to the Executor. ex_client = executor.ExecutorClient(self.transport) ex_client.handle_task(SAMPLE_CONTEXT, task=task) # Check task execution state. db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) self.assertEqual(db_task['state'], states.SUCCESS)
dmitryilyin/mistral
mistral/tests/unit/engine/default/test_executor.py
Python
apache-2.0
4,084
import codecs from pandas import read_csv import argparse import numpy as np import codecs import os FIELD_NAMES = ["context_id","target","target_pos","target_position","gold_sense_ids","predict_sense_ids", "golden_related","predict_related","context"] FIELD_TYPES = {"context_id":np.dtype(str),"target":np.dtype(str),"target_pos":np.dtype(str),"target_position":np.dtype(str),"gold_sense_ids":np.dtype(str),"predict_sense_ids":np.dtype(str), "golden_related":np.dtype(str),"predict_related":np.dtype(str),"context":np.dtype(str)} def cut_9_first(dataset_fpath, dataset_9_fpath): """ Cuts first 9 columns of the dataset file to make it openable with read_csv. """ with codecs.open(dataset_fpath, "r", "utf-8") as in_dataset, codecs.open(dataset_9_fpath, "w", "utf-8") as out_dataset: for line in in_dataset: print >> out_dataset, "\t".join(line.split("\t")[:9]) def convert_dataset2semevalkey(dataset_fpath, output_fpath, no_header=False): with codecs.open(output_fpath, "w", encoding="utf-8") as output: if no_header: df = read_csv(dataset_fpath, sep='\t', encoding='utf8', header=None, names=FIELD_NAMES, dtype=FIELD_TYPES, doublequote=False, quotechar='\0') df.target = df.target.astype(str) else: df = read_csv(dataset_fpath, encoding='utf-8', delimiter="\t", error_bad_lines=False, doublequote=False, quotechar='\0') for i, row in df.iterrows(): predicted_senses = " ".join(unicode(row.predict_sense_ids).split(",")) print >> output, "%s %s %s" % (row.target + "." + row.target_pos, row.context_id, predicted_senses) print "Key file:", output_fpath def main(): parser = argparse.ArgumentParser(description='Convert lexical sample dataset to SemEval 2013 key format.') parser.add_argument('input', help='Path to a file with input file.') parser.add_argument('output', help='Output file.') parser.add_argument('--no_header', action='store_true', help='No headers. Default -- false.') args = parser.parse_args() print "Input: ", args.input print "Output: ", args.output print "No header:", args.no_header tmp_fpath = args.input + "-9-columns.csv" cut_9_first(args.input, tmp_fpath) convert_dataset2semevalkey(tmp_fpath, args.output, args.no_header) #os.remove(tmp_fpath) if __name__ == '__main__': main()
mpelevina/context-eval
semeval_2013_13/dataset2key.py
Python
apache-2.0
2,427
from django.test import TestCase from django.utils.timezone import utc from datetime import datetime import json import logging import mock from dbkeeper.models import Organization, Team, Setting from piservice.models import PiStation, PiEvent import scoreboard.views as target def _mocked_utcNow(): return datetime(2001, 1, 1, 0, 0, 0).replace(tzinfo=utc) class ScoreboardStatusDockTestCase(TestCase): def _setUpStations(self): self.launchStation = PiStation.objects.create( station_type = PiStation.LAUNCH_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.dockStation = PiStation.objects.create( station_type = PiStation.DOCK_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.secureStation = PiStation.objects.create( station_type = PiStation.SECURE_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.returnStation = PiStation.objects.create( station_type = PiStation.RETURN_STATION_TYPE, serial_num = self._serialNum ) self._serialNum += 1 self.station = self.dockStation def _setUpTeams(self): org = Organization.objects.create( name = "School 1", type = Organization.SCHOOL_TYPE ) self.team1Name = "Team 1" self.team1 = Team.objects.create( name = self.team1Name, organization = org ) def _setUpEvents(self): # Some tests don't need these events. If not needed for a particular # test, use PiEvent.objects.all().delete() e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 0, 0).replace(tzinfo=utc), type = PiEvent.EVENT_STARTED_MSG_TYPE ) def _verify(self, expectedScore, expectedDuration_s): actual = target._recomputeTeamScore(self.team1Name) actualScore = actual['dock_score'] actualDuration_s = actual['dock_duration_s'] self.assertEqual(expectedScore, actualScore) self.assertEqual(expectedDuration_s, actualDuration_s) def setUp(self): PiEvent._meta.get_field("time").auto_now_add = False self._serialNum = 1 self._setUpStations() self._setUpTeams() self._setUpEvents() self._watchingTime_s = 45.0 Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(2.0)) Setting.objects.create(name = 'DOCK_SIM_PLAYBACK_TIME_S', value = str(self._watchingTime_s)) def test_recomputeDockScore_noEvents(self): PiEvent.objects.all().delete() expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_noEventStartedEvent(self, side_effect=_mocked_utcNow): PiEvent.objects.all().delete() e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": 0, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_eventsBeforeEventStartedEvent(self, side_effect=_mocked_utcNow): PiEvent.objects.all().delete() e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, pi = self.station, team = self.team1, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": 0, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 59).replace(tzinfo=utc), type = PiEvent.EVENT_STARTED_MSG_TYPE ) expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_noStartChallengeEvents(self, side_effect=_mocked_utcNow): e = PiEvent.objects.create( time = datetime(2001, 1, 1, 0, 0, 0).replace(tzinfo=utc), type = PiEvent.REGISTER_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS ) expectedScore = 0 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventSameTimestampNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2001, 1, 1, 0, 0, 0).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 0 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 10 self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 100 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampSuccessWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 68 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailOutcomeDnf2xPenaltyNoConclude(self, mock_utcNow): dnfPenalty = 2.0 Setting.objects.all().delete() Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(dnfPenalty)) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 213 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_DNF"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + (actualTime_s * dnfPenalty) self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailOutcomeDnf3xPenaltyNoConclude(self, mock_utcNow): dnfPenalty = 3.0 Setting.objects.all().delete() Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(dnfPenalty)) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 47 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_DNF"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + (actualTime_s * dnfPenalty) self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailOutcomeDnf8xPenaltyNoConclude(self, mock_utcNow): dnfPenalty = 8.0 Setting.objects.all().delete() Setting.objects.create(name = 'DNF_TIME_PENALTY_FACTOR', value = str(dnfPenalty)) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 33 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_DNF"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + (actualTime_s * dnfPenalty) self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 1684 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 1 expectedDuration_s = 10 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventEarlierTimestampFailWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 2000 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 8 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampSuccessNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 3000 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 319 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 4897 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s # ignore actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampSuccessSuccess(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 3213 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 228 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s # ignore acutalTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime_s = 283 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 14 - self._watchingTime_s + actualTime_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 9385 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 332 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailSuccessWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 123 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 456 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 6 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 345 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 678 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 14 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_twoStartChallengeEventsEarlierTimestampFailFailWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 4567 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 678 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 12 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 567 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 890 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 1 expectedDuration_s = 14 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 678 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 789 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 7654 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 9 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailSuccessWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 321 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 654 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 987 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 9 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 37 # this is less than 45 sec, so watchingTime will be used instead e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 54 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 76 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + self._watchingTime_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_threeStartChallengeEventsEarlierTimestampFailFailFailWithConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 23 # use watchTime_s instead since this is less than 45 sec e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 45 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 67 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.EVENT_CONCLUDED_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + self._watchingTime_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_fourStartChallengeEventsEarlierTimestampFailFailFailNoSuccessFail(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 123 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 45 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 6789 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_fourStartChallengeEventsEarlierTimestampFailFailFailFailNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 122 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 233 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 344 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime4_s = 455 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime4_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s # ignore actualTime4_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_fourStartChallengeEventsEarlierTimestampFailFailFailSuccessNoConclude(self, mock_utcNow): e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 46).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime1_s = 1223 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 48).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime1_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 50).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime2_s = 2334 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 52).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime2_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 54).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime3_s = 3445 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 56).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.FAIL_STATUS, data = json.dumps({"candidate_answer": actualTime3_s, "fail_message": "OUTCOME_TOO_SLOW"}, separators=(',',':')) ) e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 57).replace(tzinfo=utc), type = PiEvent.START_CHALLENGE_MSG_TYPE, team = self.team1, pi = self.station ) actualTime4_s = 4556 e = PiEvent.objects.create( time = datetime(2000, 12, 31, 23, 59, 58).replace(tzinfo=utc), type = PiEvent.SUBMIT_MSG_TYPE, team = self.team1, pi = self.station, status = PiEvent.SUCCESS_STATUS, data = json.dumps({"candidate_answer": actualTime4_s, "fail_message": "OUTCOME_SUCCESS"}, separators=(',',':')) ) expectedScore = 5 expectedDuration_s = 10 - self._watchingTime_s + actualTime1_s - self._watchingTime_s + actualTime2_s - self._watchingTime_s + actualTime3_s # ignore actualTime4_s self._verify(expectedScore, expectedDuration_s) @mock.patch('scoreboard.views._utcNow', side_effect=_mocked_utcNow) def test_recomputeDockScore_onlyOneStartChallengeEventLaterTimestamp(self, mock_utcNow): pass # Don't worry about later timestamps #TODO - Remaining items... # # Scoreboard # [x] 1. absent/present Registered indicator # [x] 2. make title larger and change to "Leaderboard" # [x] 3. fill width 100% # [x] 4. make 30 teams fit on the same page with roughly 20-30 chars # [x] 5. header row multiple lines--all text doesn't show up # [x] 6. don't need to show page footer; find another place for the attribution # [ ] 7. put "Harris Design Challenge 2016" along the left-hand side # [x] 8. ranking # [x] 11. remove team logo if not implementing this time # [ ] 12. Page has two jquery <script> tags--one looks WRONG_ARGUMENTS # # # Enhancements # [ ] 9. Change color (darker) for the ones that are zero (not started) # [ ] 10. Set color brighter to stand out for the ones that are done
brata-hsdc/brata.masterserver
workspace/ms/scoreboard/tests/test_dock.py
Python
apache-2.0
49,951
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for generating BigQuery data querying scirpts.""" from google.cloud import aiplatform as vertex_ai def _get_source_query(bq_dataset_name, bq_table_name, ml_use, limit=None): query = f""" SELECT IF(trip_month IS NULL, -1, trip_month) trip_month, IF(trip_day IS NULL, -1, trip_day) trip_day, IF(trip_day_of_week IS NULL, -1, trip_day_of_week) trip_day_of_week, IF(trip_hour IS NULL, -1, trip_hour) trip_hour, IF(trip_seconds IS NULL, -1, trip_seconds) trip_seconds, IF(trip_miles IS NULL, -1, trip_miles) trip_miles, IF(payment_type IS NULL, 'NA', payment_type) payment_type, IF(pickup_grid IS NULL, 'NA', pickup_grid) pickup_grid, IF(dropoff_grid IS NULL, 'NA', dropoff_grid) dropoff_grid, IF(euclidean IS NULL, -1, euclidean) euclidean, IF(loc_cross IS NULL, 'NA', loc_cross) loc_cross""" if ml_use: query += f""", tip_bin FROM {bq_dataset_name}.{bq_table_name} WHERE ML_use = '{ml_use}' """ else: query += f""" FROM {bq_dataset_name}.{bq_table_name} """ if limit: query += f"LIMIT {limit}" return query def get_training_source_query( project, region, dataset_display_name, ml_use, limit=None ): vertex_ai.init(project=project, location=region) dataset = vertex_ai.TabularDataset.list( filter=f"display_name={dataset_display_name}", order_by="update_time" )[-1] bq_source_uri = dataset.gca_resource.metadata["inputConfig"]["bigquerySource"][ "uri" ] _, bq_dataset_name, bq_table_name = bq_source_uri.replace("g://", "").split(".") return _get_source_query(bq_dataset_name, bq_table_name, ml_use, limit) def get_serving_source_query(bq_dataset_name, bq_table_name, limit=None): return _get_source_query(bq_dataset_name, bq_table_name, ml_use=None, limit=limit)
GoogleCloudPlatform/mlops-with-vertex-ai
src/common/datasource_utils.py
Python
apache-2.0
2,483
"""Insteon base entity.""" import functools import logging from pyinsteon import devices from homeassistant.core import callback from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import DeviceInfo, Entity from .const import ( DOMAIN, SIGNAL_ADD_DEFAULT_LINKS, SIGNAL_LOAD_ALDB, SIGNAL_PRINT_ALDB, SIGNAL_REMOVE_ENTITY, SIGNAL_SAVE_DEVICES, STATE_NAME_LABEL_MAP, ) from .utils import print_aldb_to_log _LOGGER = logging.getLogger(__name__) class InsteonEntity(Entity): """INSTEON abstract base entity.""" def __init__(self, device, group): """Initialize the INSTEON binary sensor.""" self._insteon_device_group = device.groups[group] self._insteon_device = device def __hash__(self): """Return the hash of the Insteon Entity.""" return hash(self._insteon_device) @property def should_poll(self): """No polling needed.""" return False @property def address(self): """Return the address of the node.""" return str(self._insteon_device.address) @property def group(self): """Return the INSTEON group that the entity responds to.""" return self._insteon_device_group.group @property def unique_id(self) -> str: """Return a unique ID.""" if self._insteon_device_group.group == 0x01: uid = self._insteon_device.id else: uid = f"{self._insteon_device.id}_{self._insteon_device_group.group}" return uid @property def name(self): """Return the name of the node (used for Entity_ID).""" # Set a base description if (description := self._insteon_device.description) is None: description = "Unknown Device" # Get an extension label if there is one extension = self._get_label() if extension: extension = f" {extension}" return f"{description} {self._insteon_device.address}{extension}" @property def extra_state_attributes(self): """Provide attributes for display on device card.""" return {"insteon_address": self.address, "insteon_group": self.group} @property def device_info(self) -> DeviceInfo: """Return device information.""" return DeviceInfo( identifiers={(DOMAIN, str(self._insteon_device.address))}, manufacturer="Smart Home", model=f"{self._insteon_device.model} ({self._insteon_device.cat!r}, 0x{self._insteon_device.subcat:02x})", name=f"{self._insteon_device.description} {self._insteon_device.address}", sw_version=f"{self._insteon_device.firmware:02x} Engine Version: {self._insteon_device.engine_version}", via_device=(DOMAIN, str(devices.modem.address)), ) @callback def async_entity_update(self, name, address, value, group): """Receive notification from transport that new data exists.""" _LOGGER.debug( "Received update for device %s group %d value %s", address, group, value, ) self.async_write_ha_state() async def async_added_to_hass(self): """Register INSTEON update events.""" _LOGGER.debug( "Tracking updates for device %s group %d name %s", self.address, self.group, self._insteon_device_group.name, ) self._insteon_device_group.subscribe(self.async_entity_update) load_signal = f"{self.entity_id}_{SIGNAL_LOAD_ALDB}" self.async_on_remove( async_dispatcher_connect(self.hass, load_signal, self._async_read_aldb) ) print_signal = f"{self.entity_id}_{SIGNAL_PRINT_ALDB}" async_dispatcher_connect(self.hass, print_signal, self._print_aldb) default_links_signal = f"{self.entity_id}_{SIGNAL_ADD_DEFAULT_LINKS}" async_dispatcher_connect( self.hass, default_links_signal, self._async_add_default_links ) remove_signal = f"{self._insteon_device.address.id}_{SIGNAL_REMOVE_ENTITY}" self.async_on_remove( async_dispatcher_connect( self.hass, remove_signal, functools.partial(self.async_remove, force_remove=True), ) ) async def async_will_remove_from_hass(self): """Unsubscribe to INSTEON update events.""" _LOGGER.debug( "Remove tracking updates for device %s group %d name %s", self.address, self.group, self._insteon_device_group.name, ) self._insteon_device_group.unsubscribe(self.async_entity_update) async def _async_read_aldb(self, reload): """Call device load process and print to log.""" await self._insteon_device.aldb.async_load(refresh=reload) self._print_aldb() async_dispatcher_send(self.hass, SIGNAL_SAVE_DEVICES) def _print_aldb(self): """Print the device ALDB to the log file.""" print_aldb_to_log(self._insteon_device.aldb) def _get_label(self): """Get the device label for grouped devices.""" label = "" if len(self._insteon_device.groups) > 1: if self._insteon_device_group.name in STATE_NAME_LABEL_MAP: label = STATE_NAME_LABEL_MAP[self._insteon_device_group.name] else: label = f"Group {self.group:d}" return label async def _async_add_default_links(self): """Add default links between the device and the modem.""" await self._insteon_device.async_add_default_links()
aronsky/home-assistant
homeassistant/components/insteon/insteon_entity.py
Python
apache-2.0
5,749
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: sample-weight-meta.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='sample-weight-meta.proto', package='com.webank.ai.fate.common.mlmodel.buffer', syntax='proto3', serialized_options=_b('B\025SampleWeightMetaProto'), serialized_pb=_b('\n\x18sample-weight-meta.proto\x12(com.webank.ai.fate.common.mlmodel.buffer\"S\n\x10SampleWeightMeta\x12\x10\n\x08need_run\x18\x01 \x01(\x08\x12\x1a\n\x12sample_weight_name\x18\x02 \x01(\t\x12\x11\n\tnormalize\x18\x03 \x01(\x08\x42\x17\x42\x15SampleWeightMetaProtob\x06proto3') ) _SAMPLEWEIGHTMETA = _descriptor.Descriptor( name='SampleWeightMeta', full_name='com.webank.ai.fate.common.mlmodel.buffer.SampleWeightMeta', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='need_run', full_name='com.webank.ai.fate.common.mlmodel.buffer.SampleWeightMeta.need_run', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sample_weight_name', full_name='com.webank.ai.fate.common.mlmodel.buffer.SampleWeightMeta.sample_weight_name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='normalize', full_name='com.webank.ai.fate.common.mlmodel.buffer.SampleWeightMeta.normalize', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=70, serialized_end=153, ) DESCRIPTOR.message_types_by_name['SampleWeightMeta'] = _SAMPLEWEIGHTMETA _sym_db.RegisterFileDescriptor(DESCRIPTOR) SampleWeightMeta = _reflection.GeneratedProtocolMessageType('SampleWeightMeta', (_message.Message,), { 'DESCRIPTOR' : _SAMPLEWEIGHTMETA, '__module__' : 'sample_weight_meta_pb2' # @@protoc_insertion_point(class_scope:com.webank.ai.fate.common.mlmodel.buffer.SampleWeightMeta) }) _sym_db.RegisterMessage(SampleWeightMeta) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
FederatedAI/FATE
python/federatedml/protobuf/generated/sample_weight_meta_pb2.py
Python
apache-2.0
3,206
from __future__ import absolute_import from django.utils.translation import ugettext as _ from django.conf import settings from django.views.decorators.csrf import csrf_exempt from django.contrib.auth import authenticate from zerver.decorator import authenticated_json_post_view, has_request_variables, REQ from zerver.lib.actions import do_change_password, \ do_change_full_name, do_change_enable_desktop_notifications, \ do_change_enter_sends, do_change_enable_sounds, \ do_change_enable_offline_email_notifications, do_change_enable_digest_emails, \ do_change_enable_offline_push_notifications, do_change_autoscroll_forever, \ do_change_default_desktop_notifications, \ do_change_enable_stream_desktop_notifications, do_change_enable_stream_sounds, \ do_regenerate_api_key, do_change_avatar_source, do_change_twenty_four_hour_time, do_change_left_side_userlist from zerver.lib.avatar import avatar_url from zerver.lib.response import json_success, json_error from zerver.lib.upload import upload_avatar_image from zerver.lib.validator import check_bool from zerver.models import UserProfile from zerver.lib.rest import rest_dispatch as _rest_dispatch rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs))) def name_changes_disabled(realm): return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled @authenticated_json_post_view @has_request_variables def json_change_ui_settings(request, user_profile, autoscroll_forever=REQ(validator=check_bool, default=None), default_desktop_notifications=REQ(validator=check_bool, default=None)): result = {} if autoscroll_forever is not None and \ user_profile.autoscroll_forever != autoscroll_forever: do_change_autoscroll_forever(user_profile, autoscroll_forever) result['autoscroll_forever'] = autoscroll_forever if default_desktop_notifications is not None and \ user_profile.default_desktop_notifications != default_desktop_notifications: do_change_default_desktop_notifications(user_profile, default_desktop_notifications) result['default_desktop_notifications'] = default_desktop_notifications return json_success(result) @authenticated_json_post_view @has_request_variables def json_change_settings(request, user_profile, full_name=REQ(), old_password=REQ(default=""), new_password=REQ(default=""), confirm_password=REQ(default="")): if new_password != "" or confirm_password != "": if new_password != confirm_password: return json_error(_("New password must match confirmation password!")) if not authenticate(username=user_profile.email, password=old_password): return json_error(_("Wrong password!")) do_change_password(user_profile, new_password) result = {} if user_profile.full_name != full_name and full_name.strip() != "": if name_changes_disabled(user_profile.realm): # Failingly silently is fine -- they can't do it through the UI, so # they'd have to be trying to break the rules. pass else: new_full_name = full_name.strip() if len(new_full_name) > UserProfile.MAX_NAME_LENGTH: return json_error(_("Name too long!")) do_change_full_name(user_profile, new_full_name) result['full_name'] = new_full_name return json_success(result) @authenticated_json_post_view @has_request_variables def json_time_setting(request, user_profile, twenty_four_hour_time=REQ(validator=check_bool, default=None)): result = {} if twenty_four_hour_time is not None and \ user_profile.twenty_four_hour_time != twenty_four_hour_time: do_change_twenty_four_hour_time(user_profile, twenty_four_hour_time) result['twenty_four_hour_time'] = twenty_four_hour_time return json_success(result) @authenticated_json_post_view @has_request_variables def json_left_side_userlist(request, user_profile, left_side_userlist=REQ(validator=check_bool, default=None)): result = {} if left_side_userlist is not None and \ user_profile.left_side_userlist != left_side_userlist: do_change_left_side_userlist(user_profile, left_side_userlist) result['left_side_userlist'] = left_side_userlist return json_success(result) @authenticated_json_post_view @has_request_variables def json_change_notify_settings(request, user_profile, enable_stream_desktop_notifications=REQ(validator=check_bool, default=None), enable_stream_sounds=REQ(validator=check_bool, default=None), enable_desktop_notifications=REQ(validator=check_bool, default=None), enable_sounds=REQ(validator=check_bool, default=None), enable_offline_email_notifications=REQ(validator=check_bool, default=None), enable_offline_push_notifications=REQ(validator=check_bool, default=None), enable_digest_emails=REQ(validator=check_bool, default=None)): result = {} # Stream notification settings. if enable_stream_desktop_notifications is not None and \ user_profile.enable_stream_desktop_notifications != enable_stream_desktop_notifications: do_change_enable_stream_desktop_notifications( user_profile, enable_stream_desktop_notifications) result['enable_stream_desktop_notifications'] = enable_stream_desktop_notifications if enable_stream_sounds is not None and \ user_profile.enable_stream_sounds != enable_stream_sounds: do_change_enable_stream_sounds(user_profile, enable_stream_sounds) result['enable_stream_sounds'] = enable_stream_sounds # PM and @-mention settings. if enable_desktop_notifications is not None and \ user_profile.enable_desktop_notifications != enable_desktop_notifications: do_change_enable_desktop_notifications(user_profile, enable_desktop_notifications) result['enable_desktop_notifications'] = enable_desktop_notifications if enable_sounds is not None and \ user_profile.enable_sounds != enable_sounds: do_change_enable_sounds(user_profile, enable_sounds) result['enable_sounds'] = enable_sounds if enable_offline_email_notifications is not None and \ user_profile.enable_offline_email_notifications != enable_offline_email_notifications: do_change_enable_offline_email_notifications(user_profile, enable_offline_email_notifications) result['enable_offline_email_notifications'] = enable_offline_email_notifications if enable_offline_push_notifications is not None and \ user_profile.enable_offline_push_notifications != enable_offline_push_notifications: do_change_enable_offline_push_notifications(user_profile, enable_offline_push_notifications) result['enable_offline_push_notifications'] = enable_offline_push_notifications if enable_digest_emails is not None and \ user_profile.enable_digest_emails != enable_digest_emails: do_change_enable_digest_emails(user_profile, enable_digest_emails) result['enable_digest_emails'] = enable_digest_emails return json_success(result) @authenticated_json_post_view def json_set_avatar(request, user_profile): if len(request.FILES) != 1: return json_error(_("You must upload exactly one avatar.")) user_file = list(request.FILES.values())[0] upload_avatar_image(user_file, user_profile, user_profile.email) do_change_avatar_source(user_profile, UserProfile.AVATAR_FROM_USER) user_avatar_url = avatar_url(user_profile) json_result = dict( avatar_url = user_avatar_url ) return json_success(json_result) @has_request_variables def regenerate_api_key(request, user_profile): do_regenerate_api_key(user_profile) json_result = dict( api_key = user_profile.api_key ) return json_success(json_result) @has_request_variables def change_enter_sends(request, user_profile, enter_sends=REQ('enter_sends', validator=check_bool)): do_change_enter_sends(user_profile, enter_sends) return json_success()
peiwei/zulip
zerver/views/user_settings.py
Python
apache-2.0
9,076
# coding=utf-8 from ..base import BitbucketBase class BitbucketCloudBase(BitbucketBase): def __init__(self, url, *args, **kwargs): """ Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing """ expected_type = kwargs.pop("expected_type", None) super(BitbucketCloudBase, self).__init__(url, *args, **kwargs) if expected_type is not None and not expected_type == self.get_data("type"): raise ValueError("Expected type of data is [{}], got [{}].".format(expected_type, self.get_data("type"))) def get_link(self, link): """ Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present """ links = self.get_data("links") if links is None or link not in links: return None return links[link]["href"] def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False): """ Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameters :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :return: A generator object for the data elements """ if params is None: params = {} while True: response = super(BitbucketCloudBase, self).get( url, trailing=trailing, params=params, data=data, flags=flags, absolute=absolute, ) if "values" not in response: return for value in response.get("values", []): yield value url = response.get("next") if url is None: break # From now on we have absolute URLs absolute = True return
MattAgile/atlassian-python-api
atlassian/bitbucket/cloud/base.py
Python
apache-2.0
2,467
from django.core.files import locks from django.core.urlresolvers import reverse from django.db.models import Count, F, Q, Min from django.template import RequestContext, TemplateDoesNotExist from django.template.loader import get_template, select_template from django.utils import timezone from django.views.decorators.clickjacking import xframe_options_exempt from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_GET, require_POST from django.http import HttpResponse, Http404, HttpResponseBadRequest from datetime import datetime from base64 import b64encode import pytz import json import os import logging import random import uuid import numpy as np from basecrowd.interface import CrowdRegistry from basecrowd.models import TaskGroupRetainerStatus from basecrowd.models import RetainerPoolStatus from basecrowd.tasks import gather_answer logger = logging.getLogger('crowd_server') @require_POST @csrf_exempt def create_task_group(request, crowd_name): """ See README.md for API. """ # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) # Response dictionaries correct_response = {'status': 'ok'} wrong_response = {'status': 'wrong'} # Parse information contained in the URL json_dict = request.POST.get('data') # Validate the format. if not interface.validate_create_request(json_dict): wrong_response['reason'] = 'Invalid request data.' return HttpResponse(json.dumps(wrong_response)) # Pull out important data fields json_dict = json.loads(json_dict) configuration = json_dict['configuration'] group_id = json_dict['group_id'] group_context = json.dumps(json_dict['group_context']) content = json_dict['content'] point_identifiers = content.keys() # Create a new group for the tasks. if model_spec.group_model.objects.filter(group_id=group_id).exists(): wrong_response['reason'] = 'Group id %s is already in use.' % group_id return HttpResponse(json.dumps(wrong_response)) current_group = model_spec.group_model( group_id=group_id, tasks_finished=0, callback_url=configuration['callback_url'], group_context=group_context, crowd_config=json.dumps(configuration.get(crowd_name, {})), global_config=json.dumps(configuration)) # Call the group hook function, then save the new group to the database. interface.group_pre_save(current_group) current_group.save() # Build crowd tasks from the group if 'retainer_pool' in configuration: # Retainer pool tasks # The specified crowd must support retainer pools retainer_pool_model = model_spec.retainer_pool_model if not retainer_pool_model: wrong_response['reason'] = 'Crowd does not support retainer pools.' return HttpResponse(json.dumps(wrong_response)) # Create or find the retainer pool. retainer_config = configuration['retainer_pool'] create_pool = retainer_config['create_pool'] pool_id = retainer_config.get('pool_id', '') if create_pool: (retainer_pool, created) = retainer_pool_model.objects.get_or_create( external_id=pool_id, defaults={ 'capacity': retainer_config['pool_size'], 'status': RetainerPoolStatus.RECRUITING, }) if created == False: # pool id already taken wrong_response['reason'] = 'Pool id %s already in use' % pool_id return HttpResponse(json.dumps(wrong_response)) else: try: retainer_pool = retainer_pool_model.objects.get( external_id=pool_id) # TODO: Make sure this pool is compatible with the new task group except retainer_pool_model.DoesNotExist: # clean up current_group.delete() wrong_response['reason'] = 'Pool %s does not exist' % pool_id return HttpResponse(json.dumps(wrong_response)) current_group.retainer_pool = retainer_pool # Don't call interface.create_task, the `post_retainer_tasks` celery # task will do so. # Batch and create the tasks. batch_size = configuration['task_batch_size'] for i in range(0, len(point_identifiers), batch_size): batch_point_ids = point_identifiers[i:i+batch_size] batch_content = { j: content[j] for j in batch_point_ids } task_id = str(uuid.uuid4()) # generate a random id for this task task = model_spec.task_model( task_type=configuration['task_type'], data=json.dumps(batch_content), create_time=timezone.now(), task_id=task_id, group=current_group, num_assignments=configuration['num_assignments'], is_retainer=True, ) interface.task_pre_save(task) task.save() #for point_id, point_content in content.iteritems(): # task_id = str(uuid.uuid4()) # generate a random id for this task # task = model_spec.task_model( # task_type=configuration['task_type'], # data=json.dumps({point_id: point_content}), # create_time=pytz.utc.localize(datetime.now()), # task_id=task_id, # group=current_group, # num_assignments=configuration['num_assignments'], # is_retainer=True, # ) # interface.task_pre_save(task) # task.save() # start the work right away if the pool is ready if retainer_pool.status in [RetainerPoolStatus.IDLE, RetainerPoolStatus.ACTIVE]: current_group.retainer_pool_status = TaskGroupRetainerStatus.RUNNING retainer_pool.status = RetainerPoolStatus.ACTIVE retainer_pool.save() else: current_group.retainer_pool_status = TaskGroupRetainerStatus.WAITING current_group.save() else: # Not retainer, create a task for each batch of points. for i in range(0, len(point_identifiers), configuration['task_batch_size']): # build the batch current_content = {} for j in range(i, i + configuration['task_batch_size']): if j >= len(point_identifiers): break current_content[point_identifiers[j]] = content[ point_identifiers[j]] current_content = json.dumps(current_content) # Call the create task hook current_task_id = interface.create_task(configuration, current_content) # Build the task object current_task = model_spec.task_model( task_type=configuration['task_type'], data=current_content, create_time=pytz.utc.localize(datetime.now()), task_id=current_task_id, group=current_group, num_assignments=configuration['num_assignments']) # Call the pre-save hook, then save the task to the database. interface.task_pre_save(current_task) current_task.save() return HttpResponse(json.dumps(correct_response)) # Delete all tasks from the system. def purge_tasks(request, crowd_name): interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) tasks = model_spec.task_model.objects.all() # Call the delete hook, then delete the tasks from our database. # TODO: clean up retainer pool tasks correctly. interface.delete_tasks(tasks) tasks.delete() return HttpResponse('ok') # we need this view to load in AMT's iframe, so disable Django's built-in # clickjacking protection. @xframe_options_exempt @require_GET def get_assignment(request, crowd_name): # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) logger.info('Non-retainer worker requested task assignment.') # get assignment context context = interface.get_assignment_context(request) try: interface.require_context( context, ['task_id', 'is_accepted'], ValueError('Task id unavailable in assignment request context.')) except ValueError: # This task is no longer available (due to a race condition). # Return the 'No available tasks' template. template = get_scoped_template(crowd_name, 'unavailable.html') return HttpResponse(template.render(RequestContext(request, {}))) return _get_assignment(request, crowd_name, interface, model_spec, context) def _get_assignment(request, crowd_name, interface, model_spec, context, **custom_template_context): # Retrieve the task based on task_id from the database try: current_task = (model_spec.task_model.objects .select_related('group') .get(task_id=context['task_id'])) task_group = current_task.group except model_spec.task_model.DoesNotExist: response_str = ''' <html><head></head><body> <h1>Error: Expired Task!</h1> <p>Task %s has expired, and isn't currently available for work. <b>Please return this task</b> and pick up a new one.</p> </body></html> ''' % context['task_id'] return HttpResponse(response_str) # Save the information of this worker worker_id = context.get('worker_id') if worker_id: try: current_worker = model_spec.worker_model.objects.get( worker_id=worker_id) except model_spec.worker_model.DoesNotExist: current_worker = model_spec.worker_model( worker_id=worker_id) # Call the pre-save hook, the save to the database interface.worker_pre_save(current_worker) current_worker.save() else: current_worker = None is_accepted = context.get('is_accepted', False) # If this is a retainer task, add the worker to the pool (if the worker # isn't already in the pool, i.e., they're trying to accept multiple HITs # for the same pool). if current_task.task_type == 'retainer': # TODO: consider making this all pools (i.e., a worker can't be in # more than one pool at a time). pool = task_group.retainer_pool if ((pool.active_workers.filter(worker_id=worker_id).exists() or pool.reserve_workers.filter(worker_id=worker_id).exists()) and (current_worker.assignments.filter( task__group__retainer_pool=pool, task__task_type='retainer') .exclude(task=current_task).exists())): response_str = ''' <html><head></head><body> <h1>Error: Multiple pool memberships detected</h1> <p>You can't accept more than one retainer task at a time, and we've detected that you are already active in another retainer task.</p> <p>Please return this task, or leave the pool in your other active task.</p> <p><b>Note:</b> You may see this error if you have recently finished another retainer task. In that case, simply wait 5-10 seconds and refresh this page, and the error should be gone. </p> </body></html> ''' return HttpResponse(response_str) global_config = json.loads(task_group.global_config) retainer_config = global_config['retainer_pool'] exp_config = global_config.get('experimental') churn_thresh = exp_config.get('churn_threshold') if exp_config else None context.update({ 'waiting_rate': retainer_config['waiting_rate'], 'per_task_rate': retainer_config['task_rate'], 'min_required_tasks': retainer_config['min_tasks_per_worker'], 'pool_status': pool.get_status_display(), }) # Relate workers and tasks (after a worker accepts the task). if is_accepted: if not current_worker: raise ValueError("Accepted tasks must have an associated worker.") assignment_id = context['assignment_id'] try: assignment = current_worker.assignments.get(assignment_id=assignment_id) except model_spec.assignment_model.DoesNotExist: assignment = model_spec.assignment_model.objects.create( assignment_id=assignment_id, worker=current_worker, task=current_task) # Add the new worker to the session task's retainer pool. if current_task.task_type == 'retainer': # Put the worker on reserve if the pool is full and we're churning if pool.active_workers.count() >= pool.capacity and churn_thresh is not None: assignment.on_reserve = True else: assignment.on_reserve = False current_worker.pools.add(pool) assignment.save() context.update({ 'wait_time': assignment.time_waited, 'tasks_completed': current_worker.completed_assignments_for_pool_session( current_task).count(), 'understands_retainer': current_worker.understands_retainer, }) else: if not current_task.group.work_start_time: current_task.group.work_start_time = timezone.now() current_task.group.save() # Add task data to the context. content = json.loads(current_task.data) group_context = json.loads(task_group.group_context) crowd_config = json.loads(task_group.crowd_config) context.update(group_context=group_context, content=content, backend_submit_url=interface.get_backend_submit_url(), frontend_submit_url=interface.get_frontend_submit_url(crowd_config), crowd_name=crowd_name) context.update(**custom_template_context) # Load the template and render it. template = get_scoped_template(crowd_name, current_task.task_type + '.html', context=context) return HttpResponse(template.render(RequestContext(request, context))) def get_scoped_template(crowd_name, template_name, context=None): base_template_name = os.path.join(crowd_name, 'base.html') if context is not None: try: t = get_template(base_template_name) except TemplateDoesNotExist: base_template_name = 'basecrowd/base.html' context['base_template_name'] = base_template_name return select_template([ os.path.join(crowd_name, template_name), os.path.join('basecrowd', template_name)]) # When workers submit assignments, we should send data to this view via AJAX # before submitting to AMT. @require_POST @csrf_exempt def post_response(request, crowd_name): # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) # get context from the request context = interface.get_response_context(request) # validate context interface.require_context( context, ['assignment_id', 'task_id', 'worker_id', 'answers'], ValueError("Response context missing required keys.")) # Check if this is a duplicate response assignment_id = context['assignment_id'] if model_spec.assignment_model.objects.filter( assignment_id=assignment_id, finished_at__isnull=False).exists(): return HttpResponse('Duplicate!') # Retrieve the task and worker from the database based on ids. current_task = model_spec.task_model.objects.get(task_id=context['task_id']) assignment = model_spec.assignment_model.objects.get(assignment_id=assignment_id) # Store this response into the database assignment.content = context['answers'] assignment.finished_at = timezone.now() interface.response_pre_save(assignment) assignment.save() # Check if this task has been finished # If we've gotten too many responses, ignore. if (not current_task.is_complete and (current_task.assignments.filter(finished_at__isnull=False).count() >= current_task.num_assignments)): current_task.is_complete = True current_task.pre_celery = timezone.now() current_task.save() gather_answer.delay(current_task.task_id, model_spec) # terminate in progress retainer tasks (model_spec.assignment_model.objects .exclude(task__task_type='retainer') .filter(task=current_task, finished_at__isnull=True) .update(finished_at=timezone.now(), terminated=True)) return HttpResponse('ok') # AJAX call succeded. # Views related to Retainer Pool tasks ####################################### @require_POST @csrf_exempt def ping(request, crowd_name): try: interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) now = timezone.now() # get and validate context context = interface.get_response_context(request) interface.require_context( context, ['task_id', 'worker_id', 'assignment_id'], ValueError("ping context missing required keys.")) task = model_spec.task_model.objects.get(task_id=context['task_id']) worker = model_spec.worker_model.objects.get(worker_id=context['worker_id']) assignment = model_spec.assignment_model.objects.get( assignment_id=context['assignment_id']) pool_status = task.group.retainer_pool.get_status_display() terminate_work = False terminate_worker = assignment.worker_released_at is not None # update waiting time ping_type = request.POST['ping_type'] # Task started waiting, create a new session if ping_type == 'starting': assignment.finish_waiting_session() # Task is waiting, increment wait time. elif ping_type == 'waiting' and pool_status != 'finished': last_ping = assignment.last_ping time_since_last_ping = (now - last_ping).total_seconds() assignment.time_waited_session += time_since_last_ping # Task is working, verify that the assignment hasn't been terminated. elif ping_type == 'working': active_task_id = request.POST.get('active_task', None) if not active_task_id: logger.warning('Ping from %s, but no active task id.' % assignment) terminate_worker = False # Don't kill them if we don't know what they're working on else: try: active_assignment = model_spec.assignment_model.objects.filter( worker=worker, task_id=active_task_id)[0] if active_assignment.terminated: terminate_work = True except IndexError: # No active assignment terminate_worker = False # Don't kill the worker if we don't know what they're working on. # if terminate_worker: # make sure their current task can be recycled # active_assignment.finished_at = now # active_assignment.terminated = True # active_assignment.save() assignment.last_ping = now assignment.save() worker.last_ping = now worker.save() logger.info('ping from worker %s, task %s' % (worker, task)) retainer_config = json.loads(task.group.global_config)['retainer_pool'] data = { 'ping_type': ping_type, 'wait_time': assignment.time_waited, 'tasks_completed': worker.completed_assignments_for_pool_session( task).count(), 'pool_status': pool_status, 'waiting_rate': retainer_config['waiting_rate'], 'per_task_rate': retainer_config['task_rate'], 'min_required_tasks': retainer_config['min_tasks_per_worker'], 'terminate_work': terminate_work, 'terminate_worker': terminate_worker, } return HttpResponse(json.dumps(data), content_type='application/json') except Exception as e: logger.exception(e) raise e @require_GET def assign_retainer_task(request, crowd_name): try: # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) context = interface.get_response_context(request) interface.require_context( context, ['task_id', 'worker_id'], ValueError("retainer assignment context missing required keys.")) try: task = (model_spec.task_model.objects .select_related('group__retainer_pool') .get(task_id=context['task_id'])) group = task.group pool = group.retainer_pool worker = model_spec.worker_model.objects.get(worker_id=context['worker_id']) logger.info('Retainer task %s requested work.' % task) except Exception: # Issue loading models from IDs, finish this assignment return HttpResponse(json.dumps({'start': False, 'pool_status': 'finished'}), content_type='application/json') exp_config = json.loads(group.global_config).get('experimental') if exp_config: straggler_mitigation = exp_config.get('mitigate_stragglers', False) straggler_routing_policy = exp_config.get('straggler_routing_policy', 'random') churn_threshold = exp_config.get('churn_threshold') else: straggler_mitigation = False churn_threshold = None # Acquire an exclusive lock to avoid duplicate assignments lockf = open('/tmp/ASSIGNMENT_LOCK', 'wb') logger.debug("Locking assignment lock...") locks.lock(lockf, locks.LOCK_EX) # Don't assign a task if the worker is on reserve or the pool is inactive. on_reserve = (task.assignments.filter(worker=worker, on_reserve=True).exists() if churn_threshold is not None else False) pool_inactive = pool.status not in (RetainerPoolStatus.ACTIVE, RetainerPoolStatus.REFILLING, RetainerPoolStatus.IDLE) no_work_response = HttpResponse(json.dumps({'start': False, 'pool_status': pool.get_status_display()}), content_type='application/json') if on_reserve: logger.info("Worker on reserve: not assigning work.") return no_work_response if pool_inactive: logger.info("Pool still recruiting or otherwise inactive: not assigning work.") return no_work_response # Look for a task the worker is already assigned to assignment_task = None existing_assignments = (worker.assignments .filter(finished_at__isnull=True) .filter(task__group__retainer_pool=pool) .exclude(task__task_type='retainer')) logger.info('Looking for assignments for retainer worker...') if existing_assignments.exists(): assignment_task = existing_assignments[0].task logger.info('Found an existing assignment for this worker') else: # Look for open tasks incomplete_tasks = ( # incomplete tasks model_spec.task_model.objects.filter(is_complete=False) # in this pool's tasks .filter(group__retainer_pool=pool) # that aren't dummy retainer tasks .exclude(task_type='retainer') # that the worker hasn't worked on already .exclude(assignments__worker=worker)) # First check if the open tasks haven't been assigned to enough workers. # TODO: avoid gross SQL non_terminated_assignments = """ SELECT COUNT(*) FROM %(crowdname)s_%(assignment_model)s WHERE %(crowdname)s_%(assignment_model)s.terminated = False AND %(crowdname)s_%(assignment_model)s.task_id = %(crowdname)s_%(task_model)s.task_id """ % { 'crowdname': crowd_name, 'assignment_model': model_spec.assignment_model.__name__.lower(), 'task_model': model_spec.task_model.__name__.lower(), } open_tasks = incomplete_tasks.extra( where=["num_assignments > (%s)" % non_terminated_assignments]) if open_tasks.exists(): logger.info('Found an unassigned but open task') assignment_task = open_tasks.order_by('?')[0] # Then, check if there in-progress tasks with enough assignments. elif incomplete_tasks.exists(): if not straggler_mitigation: # only assign tasks that have been abandoned # Bad performance characteristics! consider rewriting. active_workers = set(pool.active_workers.all()) abandoned_tasks = [ t for t in incomplete_tasks if len([a for a in t.assignments.select_related('worker').all() if a.worker in active_workers]) < t.num_assignments] if abandoned_tasks: logger.info('Found an assigned but abandoned task.') assignment_task = random.choice(abandoned_tasks) else: logger.info('All tasks are assigned.') # Straggler mitigation else: logger.info('Assigning to an active task for straggler mitigation with policy %s.' % straggler_routing_policy) if straggler_routing_policy == 'random': assignment_task = incomplete_tasks.order_by('?')[0] elif straggler_routing_policy == 'oldest': now = timezone.now() annotated = incomplete_tasks.annotate(start=Min('assignments__assigned_at')) weights = [(now - t.start).total_seconds() for t in annotated] weights = np.array(weights) / sum(weights) assignment_task = np.random.choice(list(annotated), size=1, p=weights)[0] elif straggler_routing_policy == 'young-workers': now = timezone.now() weights = [ 1 / (now - min([a.worker.assignments .filter(task__task_type='retainer', task__group__retainer_pool=pool) .order_by('assigned_at')[0].assigned_at for a in task.assignments.all()])).total_seconds() for task in incomplete_tasks] weights = np.array(weights) / sum(weights) assignment_task = np.random.choice(list(incomplete_tasks), size=1, p=weights)[0] elif straggler_routing_policy == 'fair': # assign to the task with the fewest assignments assignment_task = (incomplete_tasks .extra(select={'n_assignments': non_terminated_assignments}, order_by=['n_assignments']))[0] else: logger.info('Unkown straggler routing policy: %s. Using random instead...' % straggler_routing_policy) assignment_task = incomplete_tasks.order_by('?')[0] # return a url to the assignment if assignment_task: # create the assignment if necessary try: logger.info('Looking up assignment...') assignment = worker.assignments.get( task=assignment_task, worker=worker) if not assignment.retainer_session_task: assignment.retainer_session_task = task assignment.save() except model_spec.assignment_model.DoesNotExist: logger.info('No assignment found: creating new one.') assignment_id = str(uuid.uuid4()) assignment = model_spec.assignment_model.objects.create( assignment_id=assignment_id, worker=worker, task=assignment_task, retainer_session_task=task) if not assignment_task.group.work_start_time: assignment_task.group.work_start_time = timezone.now() assignment_task.group.save() url_args = { 'crowd_name': crowd_name, 'worker_id': worker.worker_id, 'task_id': assignment_task.task_id, } response_data = json.dumps({ 'start': True, 'task_url': reverse('basecrowd:get_retainer_assignment', kwargs=url_args), 'task_id': assignment_task.task_id, 'pool_status': pool.get_status_display() }) logger.info('Linking task to assignment.') return HttpResponse(response_data, content_type='application/json') else: logger.info('No tasks found!') return no_work_response except Exception as e: logger.exception(e) raise e finally: # Release the assignment lock--either an assignment has been created in the DB, or an error occurred. logger.debug("Unlocking assignment lock...") locks.unlock(lockf) lockf.close() # we need this view to load in AMT's iframe, so disable Django's built-in # clickjacking protection. @xframe_options_exempt @require_GET def get_retainer_assignment(request, crowd_name, worker_id, task_id): # get the interface implementation from the crowd name. interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) logger.info('Retainer worker fetched task assignment.') # fetch assignment if it already exists (e.g. the user refreshed the browser). try: assignment_id = model_spec.assignment_model.objects.get( task_id=task_id, worker_id=worker_id).assignment_id except model_spec.assignment_model.DoesNotExist: assignment_id = str(uuid.uuid4()) context = { 'task_id': task_id, 'worker_id': worker_id, 'is_accepted': True, 'assignment_id': assignment_id } return _get_assignment(request, crowd_name, interface, model_spec, context) @require_POST @csrf_exempt def finish_pool(request, crowd_name): pool_id = request.POST.get('pool_id') interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) try: pool = model_spec.retainer_pool_model.objects.get(external_id=pool_id) except model_spec.retainer_pool_model.DoesNotExist: return HttpResponse(json.dumps({'error': 'Invalid pool id'})) _finish_pool(pool, model_spec) logger.info("Retainer pool %s finished" % pool) return HttpResponse(json.dumps({'status': 'ok'})) def _finish_pool(pool, model_spec): # Mark open sessions as interrupted so we don't penalize them unfairly. (model_spec.assignment_model.objects .filter(task__group__retainer_pool=pool, task__task_type='retainer') .exclude(Q(finished_at__isnull=False) & Q(terminated=False)) .update(pool_ended_mid_assignment=True)) pool.status = RetainerPoolStatus.FINISHED pool.finished_at = timezone.now() pool.save() @require_POST @csrf_exempt def understands_retainer(request, crowd_name, worker_id): interface, model_spec = CrowdRegistry.get_registry_entry(crowd_name) try: worker = model_spec.worker_model.objects.get(worker_id=worker_id) except model_spec.worker_model.DoesNotExist: return HttpResponse(json.dumps({'error': 'Invalid worker id'})) worker.understands_retainer = True worker.save() logger.info('%s understands the retainer model.' % worker) return HttpResponse(json.dumps({'status': 'ok'}))
amplab/ampcrowd
ampcrowd/basecrowd/views.py
Python
apache-2.0
33,310
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.rst')).read() CHANGES = open(os.path.join(here, 'CHANGES.rst')).read() requires = [ 'cdo', 'bokeh', 'ocgis', 'pandas', 'nose', ] classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Atmospheric Science', ] setup(name='flyingpigeon', version='0.2.0', description='Processes for climate data, indices and extrem events', long_description=README + '\n\n' + CHANGES, classifiers=classifiers, author='Nils Hempelmann', author_email='[email protected]', url='http://www.lsce.ipsl.fr/', license = "http://www.apache.org/licenses/LICENSE-2.0", keywords='wps flyingpigeon pywps malleefowl ipsl birdhouse conda anaconda', packages=find_packages(), include_package_data=True, zip_safe=False, test_suite='nose.collector', install_requires=requires, entry_points = { 'console_scripts': [ ]} , )
sradanov/flyingpigeon
setup.py
Python
apache-2.0
1,385
# Copyright (c) OpenMMLab. All rights reserved. import itertools import numpy as np import torch from .general_data import GeneralData class InstanceData(GeneralData): """Data structure for instance-level annnotations or predictions. Subclass of :class:`GeneralData`. All value in `data_fields` should have the same length. This design refer to https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501 Examples: >>> from mmdet.core import InstanceData >>> import numpy as np >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) >>> results = InstanceData(img_meta) >>> img_shape in results True >>> results.det_labels = torch.LongTensor([0, 1, 2, 3]) >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3]) >>> results["det_masks"] = np.ndarray(4, 2, 2) >>> len(results) 4 >>> print(resutls) <InstanceData( META INFORMATION pad_shape: (800, 1216, 3) img_shape: (800, 1196, 3) PREDICTIONS shape of det_labels: torch.Size([4]) shape of det_masks: (4, 2, 2) shape of det_scores: torch.Size([4]) ) at 0x7fe26b5ca990> >>> sorted_results = results[results.det_scores.sort().indices] >>> sorted_results.det_scores tensor([0.0100, 0.3000, 0.6000, 0.7000]) >>> sorted_results.det_labels tensor([0, 3, 2, 1]) >>> print(results[results.scores > 0.5]) <InstanceData( META INFORMATION pad_shape: (800, 1216, 3) img_shape: (800, 1196, 3) PREDICTIONS shape of det_labels: torch.Size([2]) shape of det_masks: (2, 2, 2) shape of det_scores: torch.Size([2]) ) at 0x7fe26b6d7790> >>> results[results.det_scores > 0.5].det_labels tensor([1, 2]) >>> results[results.det_scores > 0.5].det_scores tensor([0.7000, 0.6000]) """ def __setattr__(self, name, value): if name in ('_meta_info_fields', '_data_fields'): if not hasattr(self, name): super().__setattr__(name, value) else: raise AttributeError( f'{name} has been used as a ' f'private attribute, which is immutable. ') else: assert isinstance(value, (torch.Tensor, np.ndarray, list)), \ f'Can set {type(value)}, only support' \ f' {(torch.Tensor, np.ndarray, list)}' if self._data_fields: assert len(value) == len(self), f'the length of ' \ f'values {len(value)} is ' \ f'not consistent with' \ f' the length ' \ f'of this :obj:`InstanceData` ' \ f'{len(self)} ' super().__setattr__(name, value) def __getitem__(self, item): """ Args: item (str, obj:`slice`, obj`torch.LongTensor`, obj:`torch.BoolTensor`): get the corresponding values according to item. Returns: obj:`InstanceData`: Corresponding values. """ assert len(self), ' This is a empty instance' assert isinstance( item, (str, slice, int, torch.LongTensor, torch.BoolTensor)) if isinstance(item, str): return getattr(self, item) if type(item) == int: if item >= len(self) or item < -len(self): raise IndexError(f'Index {item} out of range!') else: # keep the dimension item = slice(item, None, len(self)) new_data = self.new() if isinstance(item, (torch.Tensor)): assert item.dim() == 1, 'Only support to get the' \ ' values along the first dimension.' if isinstance(item, torch.BoolTensor): assert len(item) == len(self), f'The shape of the' \ f' input(BoolTensor)) ' \ f'{len(item)} ' \ f' does not match the shape ' \ f'of the indexed tensor ' \ f'in results_filed ' \ f'{len(self)} at ' \ f'first dimension. ' for k, v in self.items(): if isinstance(v, torch.Tensor): new_data[k] = v[item] elif isinstance(v, np.ndarray): new_data[k] = v[item.cpu().numpy()] elif isinstance(v, list): r_list = [] # convert to indexes from boolTensor if isinstance(item, torch.BoolTensor): indexes = torch.nonzero(item).view(-1) else: indexes = item for index in indexes: r_list.append(v[index]) new_data[k] = r_list else: # item is a slice for k, v in self.items(): new_data[k] = v[item] return new_data @staticmethod def cat(instances_list): """Concat the predictions of all :obj:`InstanceData` in the list. Args: instances_list (list[:obj:`InstanceData`]): A list of :obj:`InstanceData`. Returns: obj:`InstanceData` """ assert all( isinstance(results, InstanceData) for results in instances_list) assert len(instances_list) > 0 if len(instances_list) == 1: return instances_list[0] new_data = instances_list[0].new() for k in instances_list[0]._data_fields: values = [results[k] for results in instances_list] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, np.ndarray): values = np.concatenate(values, axis=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) else: raise ValueError( f'Can not concat the {k} which is a {type(v0)}') new_data[k] = values return new_data def __len__(self): if len(self._data_fields): for v in self.values(): return len(v) else: raise AssertionError('This is an empty `InstanceData`.')
open-mmlab/mmdetection
mmdet/core/data_structures/instance_data.py
Python
apache-2.0
6,926
from ajenti.api import * from ajenti.plugins import * info = PluginInfo( title='Resource Manager', icon='link', dependencies=[ ], ) def init(): import server
lupyuen/RaspberryPiImage
usr/share/pyshared/ajenti/plugins/resources/__init__.py
Python
apache-2.0
182
#!/usr/bin/python -Werror # # Copyright (c) 2015 Midokura SARL, All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import os import os.path import yaml from fabric.api import * from netaddr import IPNetwork as CIDR from fabric.colors import yellow, blue, green from fabric.utils import puts import cuisine class Check(object): def __init__(self, metadata): self._metadata = metadata def check_broken_cuisine(self): run("rm -f /tmp/check_broken_cuisine.txt") cuisine.file_write("/tmp/check_broken_cuisine.txt", "WORKING") run("grep WORKING /tmp/check_broken_cuisine.txt") class Configure(object): def __init__(self, metadata): self._metadata = metadata def configure(self): self.localegen() self.name_resolution() self.os_release() self.datastax() self.midonet() def localegen(self): if env.host_string in self._metadata.roles["all_containers"]: run("locale-gen de_DE.UTF-8") def name_resolution(self): if env.host_string not in self._metadata.roles["all_containers"]: run("hostname %s" % env.host_string.split(".")[0]) run("ip address add %s/32 dev lo || echo" % self._metadata.servers[env.host_string]["ip"]) cuisine.file_write("/etc/hostname", env.host_string.split(".")[0]) cuisine.file_write("/etc/resolv.conf", """ nameserver %s options single-request """ % self._metadata.config["nameserver"]) local_ip = self._metadata.servers[env.host_string]["ip"] cuisine.file_write("/etc/hosts", """ 127.0.0.1 localhost.localdomain localhost ::1 ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ff02::3 ip6-allhosts %s %s.%s # %s %s """ % ( local_ip, env.host_string, self._metadata.config["domain"], env.host_string.split(".")[0], open("%s/etc/hosts" % os.environ["TMPDIR"]).read() )) @classmethod def repokey(cls, url): run(""" URL="%s" wget -SO- "${URL}" | apt-key add - """ % url) def datastax(self): if env.host_string in self._metadata.containers: run(""" apt-key add - <<EOF -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mQENBExkbXsBCACgUAbMWASAz/fmnMoWE4yJ/YHeuFHTK8zloJ/mApwizlQXTIVp U4UV8nbLJrbkFY92VTcC2/IBtvnHpZl8eVm/JSI7nojXc5Kmm4Ek/cY7uW2KKPr4 cuka/5cNsOg2vsgTIMOZT6vWAbag2BGHtEJbriMLhT3v1tlu9caJfybu3QFWpahC wRYtG3B4tkypt21ssWwNnmp2bjFRGpLssc5HCCxUCBFLYoIkAGAFRZ6ymglsLDBn SCEzCkn9zQfmyqs0lZk4odBx6rzE350xgEnzFktT2uekFYqRqPQY8f7AhVfj2DJF gVM4wXbSoVrTnDiFsaJt/Ea4OJ263jRUHeIRABEBAAG0LVJpcHRhbm8gUGFja2Fn ZSBSZXBvc2l0b3J5IDxwYXVsQHJpcHRhbm8uY29tPokBPgQTAQIAKAIbAwYLCQgH AwIGFQgCCQoLBBYCAwECHgECF4AFAlW/zKMFCRLBYKQACgkQNQIA8rmZo3LebAgA gAwWkvBrPaD5Kf8H4uw9rXtHnHYxX5G6cOVJ3vuWCs1ov7m3JWq918q00hWfLtOs zb15kFcjcEJ7kiRFJmAXZhcX2I0DHTmTZSl9orKzoUlXQqAANJGdek8pzdTDUQfz V26k63d6eLqjXotrb0hFzg7B8VSolxRE44S5k1xhzUCedOqYYsWVv3xnRIP6UBPt WLvzrLa0o9x/hT4w81dOP4rzZMuq2RApnenoz9AZwJrmZ14QW2ncy4RbqK6pKdRJ y57vBv8F0LkGlLwBd/JYWwQ85lUTkNG5wCWdj0IEYTO3+fGyO1LHU6bVZCrNtkUE ahSZUiRdidiktIkbtNXImYkCHAQQAQgABgUCTGRt2QAKCRATbpzxe100LaUfD/9D q84HarIQMEoUiRBklg+afgTMaNNdvhU3V59KoMja2vMeE4JjE3SvNoKCHjPZj6Ti 720KL6V5O/Uo1VjtSXzAPRJywcE9aS5HRjM2Dr1mp5GnmpvbiKBdl91G9aPc3D2Z LpG7vZr8E/vYLc5h1DMz2XDqi6gAqW2yxb2vnmHL4FiAdoXfpZimC9KZpUdTsGPO VbXEDEn3y/AiIC35Bq66Sp3W4gVNakV7Y5RUPPDDBIsTZEOhzd9nl5FXOnPtONp5 dtp5NoWl6q3BjYe2P52TloCp+BJ62donfFTRSGfqyvtaRgmnHHEIWgypMghW6wSb O/BxFpdggHTItMfBg2a8tWDFjYmBoFd3iP9SfcmBb/7zB5YXC5b1/s3RNCtR76hf +iXjm/zy22tb6qy5XJsnCoORjEoFaWNH6ckgACK7HQyJZ2Lo2MuCYYaQLs6gTd6a zMEQHT08cPF+I5It9mOzAtUOkCcVK8dIXRFETXFVdQqFMTmZmuK1Iv1CFBeUIHnM iyoYv1bzNsUg/hJpW8ximVmBg5Apza2K0p3XKHkw9MPBqnQ4PbBM1nqb/+o56p+o 8mVZmjn4bdraB8c0Br15Mi19Zne7b65OZ5k+SVripUk5/XeJD9M9U6+DG+/uxemD Fzp9XjnnAe8T/u8JpqHYQ2mRONFM7ZMOAFeEe4yIEIkBPgQTAQIAKAUCTGRtewIb AwUJA8JnAAYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQNQIA8rmZo3K3HAf/ V+6OSdt/Zwdsk+WsUwi75ndOIz60TN8Wg16WOMq5KOBuYIneG2+CEFJHTppNLc2j r/ugTjTPeS/DAo5MtnK+zzHxT7JmMKypb23t6MaahSlER4THbYvWUwsw5mm2LsTe PTlb5mkvQnXkt6pN2UzZVyIdNFXRv1YZLdTcf4aJ0pZySvCdYoE9RaoP4/JI9GfS NXH7oOxI8YaxRGK5i6w/LZyhxkfbkPX+pbbe1Ept+SZCcwWVc/S6veGZWQ1pNHR2 RW6F3WE0Mle6xWtvW1NlMs4ATEqS13GS4RVlgE07KTe/oBRkd+4NwXAQoEzUvoRr j5Ad7LVKeygeUUyaWP+qN7kBDQRMZG17AQgAypZBEfm9pM8Tr4ktsHp1xThYHvzT OScLPZcCaF1Gjg8em0cQI4z4yN+yffsmUD4/dGcRxZgVms/jTexKQ8Z/Ps3e4vRG b4RCFaY0KhW4t+TTJJ9I5wvFzXZj7zNFxiQWpueiq/cDiBY+Liv3zMSOBaXzxR6L 7igNPKi/0ELLyCIU/okUwqc0O/4r5PgFANkMyvvVNqzxjC5s8MXbGivJXiML67/Y 0M/siNqDSia/TGItpXjvi7v1zulbiIV0iSBkO3vsxNE0xXGBXY/UztAShN3FTbx9 CZDupi35wgqK7McJ3WSjEDzwkElmwkmh7JdLziyH09kS1wRqiLcB+wSTywARAQAB iQElBBgBAgAPAhsMBQJVv8zOBQkSwWDOAAoJEDUCAPK5maNyLl4H/3n/+xZsuKia fHtBUMh44YRabEX1Bd10LAfxGlOZtKV/Dr1RaKetci6RRa5sJj0wKra6FhIryuqS jFTalPF3o8WjVEA5AjJ3ddSgAwX5gGJ3u+C0XMI0E6h/vAXh6meFxHtGinYr1Gcp P1/S3/Jy+0cmTt3FvqBtXtU3VIyb/4vUNZ+dY+jcw/gs/yS+s+jtR8hWUDbSrbU9 pja+p1icNwU5pMbEfx1HYB7JCKuE0iJNbAFagRtPCOKq4vUTPDUQUB5MjWV+89+f cizh+doQR9z8e+/02drCCMWiUf4iiFs2dNHwaIPDOJ8Xn9xcxiUaKk32sjT3sict XO5tB2KhE3A= =YO7C -----END PGP PUBLIC KEY BLOCK----- EOF """) cuisine.file_write("/etc/apt/sources.list.d/datastax.list", """ deb [arch=amd64] http://debian.datastax.com/community 2.0 main """) #self.repokey("https://debian.datastax.com/debian/repo_key") def midonet(self): # Install(self._metadata).apt_get_update() if "OS_MIDOKURA_REPOSITORY_USER" in os.environ: username = os.environ["OS_MIDOKURA_REPOSITORY_USER"] else: username = "" if "OS_MIDOKURA_REPOSITORY_PASS" in os.environ: password = os.environ["OS_MIDOKURA_REPOSITORY_PASS"] else: password = "" if "midonet_repo" in self._metadata.config: repo_flavor = self._metadata.config["midonet_repo"] else: repo_flavor = "OSS" if "midonet_manager" in self._metadata.roles: if env.host_string in self._metadata.roles["container_midonet_manager"]: if username <> "": if password <> "": repo_flavor = "MEM" if "OS_MIDOKURA_URL_OVERRIDE" in os.environ: url_override = os.environ["OS_MIDOKURA_URL_OVERRIDE"] else: url_override = "" if "OS_MIDOKURA_PLUGIN_URL_OVERRIDE" in os.environ: plugin_url_override = os.environ["OS_MIDOKURA_PLUGIN_URL_OVERRIDE"] else: plugin_url_override = "" puts(blue("setting up Midokura repos")) run(""" if [[ "%s" == "True" ]] ; then set -x; fi # # initialize the password cache # %s USERNAME="%s" PASSWORD="%s" MIDONET_VERSION="%s" OPENSTACK_PLUGIN_VERSION="%s" REPO_FLAVOR="%s" URL_OVERRIDE="%s" PLUGIN_URL_OVERRIDE="%s" rm -fv -- /etc/apt/sources.list.d/midonet* rm -fv -- /etc/apt/sources.list.d/midokura* if [[ "${REPO_FLAVOR}" == "MEM" ]]; then FILENAME="/etc/apt/sources.list.d/midokura.list" wget -SO- "http://${USERNAME}:${PASSWORD}@apt.midokura.com/packages.midokura.key" | apt-key add - if [[ "${URL_OVERRIDE}" == "" && "${PLUGIN_URL_OVERRIDE}" == "" ]]; then cat>"${FILENAME}"<<EOF # # MEM midolman # deb [arch=amd64] http://${USERNAME}:${PASSWORD}@apt.midokura.com/midonet/v${MIDONET_VERSION}/stable trusty main non-free # # MEM midonet neutron plugin # deb [arch=amd64] http://${USERNAME}:${PASSWORD}@apt.midokura.com/openstack/${OPENSTACK_PLUGIN_VERSION}/stable trusty main EOF else cat>"${FILENAME}"<<EOF # # MEM midolman (url override) # ${URL_OVERRIDE} # # MEM midonet neutron plugin (plugin url override) # ${PLUGIN_URL_OVERRIDE} EOF fi fi if [[ "${REPO_FLAVOR}" == "OSS" ]]; then FILENAME="/etc/apt/sources.list.d/midonet.list" wget -SO- http://repo.midonet.org/packages.midokura.key | apt-key add - cat>"${FILENAME}"<<EOF # OSS MidoNet deb http://repo.midonet.org/midonet/v${MIDONET_VERSION} stable main # OSS MidoNet OpenStack Integration deb http://repo.midonet.org/openstack-${OPENSTACK_PLUGIN_VERSION} stable main # OSS MidoNet 3rd Party Tools and Libraries deb http://repo.midonet.org/misc stable main EOF fi """ % ( self._metadata.config["debug"], open(os.environ["PASSWORDCACHE"]).read(), username, password, self._metadata.config["midonet_%s_version" % repo_flavor.lower()], self._metadata.config["midonet_%s_openstack_plugin_version" % repo_flavor.lower()], repo_flavor.upper(), url_override, plugin_url_override )) def os_release(self): if env.host_string in self._metadata.containers: self.__lib_orizuru_operations_ubuntu_repo(self._metadata.config["container_os_release_codename"]) else: self.__lib_orizuru_operations_ubuntu_repo(self._metadata.config["os_release_codename"]) def __lib_orizuru_operations_ubuntu_repo(self, codename): archive_country = self._metadata.config["archive_country"] apt_cacher = self._metadata.config["apt-cacher"] run(""" if [[ "%s" == "True" ]] ; then set -x; fi XC="%s" # ubuntu release XD="%s" # country code XX="%s" # apt-cacher cat>/etc/apt/sources.list<<EOF # # autogenerated file - do not modify - modify %s instead # EOF for TYPE in 'deb' 'deb-src'; do for realm in "main restricted" "universe" "multiverse"; do echo "${TYPE} ${XX}/${XD}.archive.ubuntu.com/ubuntu/ ${XC} ${realm}" echo "${TYPE} ${XX}/${XD}.archive.ubuntu.com/ubuntu/ ${XC}-updates ${realm}" echo "${TYPE} ${XX}/security.archive.ubuntu.com/ubuntu/ ${XC}-security ${realm}" done echo "${TYPE} ${XX}/${XD}.archive.ubuntu.com/ubuntu/ ${XC}-backports main restricted universe multiverse" done | tee -a /etc/apt/sources.list """ % (self._metadata.config["debug"], codename, archive_country, apt_cacher, sys._getframe().f_code.co_name)) class Install(object): def __init__(self, metadata): self._metadata = metadata def install(self): self.rsyslog() self.screen() self.login_stuff() self.apt_get_update() self.common_packages() self.rp_filter() self.cloud_repository() self.apt_get_update() self.ntp() self.dist_upgrade() self.constrictor() self.kmod("openvswitch") self.kmod("nbd") self.kmod("kvm") self.kmod("vhost_net") self.lldpd() def lldpd(self): cuisine.package_ensure("lldpd") def kmod(self, module_name): if env.host_string not in self._metadata.roles["all_containers"]: run("modprobe %s || true" % module_name) def constrictor(self): constrictor_bin = self._metadata.config["constrictor"] run("mkdir -pv $(dirname %s)" % constrictor_bin) cuisine.file_write(constrictor_bin, """#!/usr/bin/python -Werror import sys import ConfigParser def add_section(configuration, section): if not(section == 'DEFAULT' or configuration.has_section(section)): configuration.add_section(section) def set_option(configfile, configuration, section, option, value): configuration.set(section, option, value) cfgfile = open(configfile, "w") configuration.write(cfgfile) cfgfile.close() def get_option(configuration, section, option): print configuration.get(section, option) def handle_command(args): command = args[1] configfile = args[2] section = args[3] option = args[4] configuration = ConfigParser.RawConfigParser() configuration.read(configfile) if command == 'set': value = args[5] add_section(configuration, section) set_option(configfile, configuration, section, option, value) if command == 'get': get_option(configuration, section, option) return 0 if __name__ == "__main__": sys.exit(handle_command(sys.argv)) """) run("chmod 0755 %s" % constrictor_bin) def screen(self): screenrc_string = "%s.%s" % (env.host_string, self._metadata.config["domain"]) cuisine.package_ensure("screen") run(""" mkdir -pv /var/run/screen chmod 0755 /usr/bin/screen chmod 0777 /var/run/screen """) cuisine.file_write("/root/.screenrc", """ hardstatus alwayslastline hardstatus string '%%{= kG} %s [%%= %%{= kw}%%?%%-Lw%%?%%{r}[%%{W}%%n*%%f %%t%%?{%%u}%%?%%{r}]%%{w}%%?%%+Lw%%?%%?%%= %%{g}] %%{W}%%{g}%%{.w} screen %%{.c} [%%H]' """ % screenrc_string) @classmethod def login_stuff(cls): run(""" chmod 0755 /usr/bin/sudo chmod u+s /usr/bin/sudo """) @classmethod def apt_get_update(cls): puts(yellow("updating repositories, this may take a long time.")) run(""" # # Round 1: try to apt-get update without purging the cache # apt-get update 1>/dev/null # # Round 2: clean cache and update again # if [[ ! "${?}" == "0" ]]; then rm -rf /var/lib/apt/lists/* rm -f /etc/apt/apt.conf sync apt-get update 2>&1 fi """) def common_packages(self): cuisine.package_ensure(self._metadata.config["common_packages"]) def rsyslog(self): cuisine.package_ensure("rsyslog") controller_name = self._metadata.roles["openstack_controller"][0] controller_ip_suffix = self._metadata.config["idx"][controller_name] controller_ip = "%s.%s" % (self._metadata.config["vpn_base"], controller_ip_suffix) if env.host_string <> controller_name: cuisine.file_write("/etc/rsyslog.conf", """ $KLogPermitNonKernelFacility on $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat $RepeatedMsgReduction on $FileOwner syslog $FileGroup adm $FileCreateMode 0640 $DirCreateMode 0755 $Umask 0022 $PrivDropToUser syslog $PrivDropToGroup syslog $WorkDirectory /var/spool/rsyslog $IncludeConfig /etc/rsyslog.d/*.conf $ModLoad imuxsock $ModLoad imklog *.* @%s:514 *.* @@%s:514 """ % (controller_ip, controller_ip)) else: cuisine.file_write("/etc/rsyslog.conf", """ $ModLoad imuxsock # provides support for local system logging $ModLoad imklog # provides kernel logging support $KLogPermitNonKernelFacility on $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat $RepeatedMsgReduction on $FileOwner syslog $FileGroup adm $FileCreateMode 0640 $DirCreateMode 0755 $Umask 0022 $PrivDropToUser syslog $PrivDropToGroup syslog $WorkDirectory /var/spool/rsyslog $IncludeConfig /etc/rsyslog.d/*.conf $ModLoad imudp $UDPServerRun 514 $template FILENAME,"/var/log/%fromhost-ip%/syslog.log" *.* ?FILENAME """) run("service rsyslog restart") run("logger ping") def rp_filter(self): # # async routing traffic floating from neutron metadata/dhcp midonet agent to hypervisors and gateways # if 'physical_midonet_gateway' in self._metadata.roles or 'physical_openstack_compute' in self._metadata.roles: if env.host_string not in self._metadata.containers: run(""" for RP in /proc/sys/net/ipv4/conf/*/rp_filter; do echo 0 > "${RP}" done """) def cloud_repository(self): run("rm -rf /etc/apt/sources.list.d/cloudarchive-*") cuisine.package_ensure(["python-software-properties", "software-properties-common", "ubuntu-cloud-keyring"]) self.dist_upgrade() if self._metadata.config["container_os_release_codename"] == "precise": if self._metadata.config["openstack_release"] in ["icehouse", "juno"]: run("add-apt-repository --yes cloud-archive:%s" % self._metadata.config["openstack_release"]) if self._metadata.config["container_os_release_codename"] == "trusty": if self._metadata.config["openstack_release"] in ["juno", "kilo"]: run("add-apt-repository --yes cloud-archive:%s" % self._metadata.config["openstack_release"]) run(""" OPENSTACK_RELEASE="%s" APT_CACHER="%s" SOURCES_LIST="/etc/apt/sources.list.d/cloudarchive-${OPENSTACK_RELEASE}.list" test -f "${SOURCES_LIST}" && \ sed -i 's,http://ubuntu-cloud.archive.canonical.com,'"${APT_CACHER}"'/ubuntu-cloud.archive.canonical.com,g;' "${SOURCES_LIST}" exit 0 """ % ( self._metadata.config["openstack_release"], self._metadata.config["apt-cacher"] )) self.dist_upgrade() @classmethod def dist_upgrade(cls): run(""" export DEBIAN_FRONTEND=noninteractive debconf-set-selections <<EOF grub grub/update_grub_changeprompt_threeway select install_new grub-legacy-ec2 grub/update_grub_changeprompt_threeway select install_new EOF yes | dpkg --configure -a apt-get -y -u --force-yes install apt-get -y -u --force-yes dist-upgrade 1>/dev/null """) run("apt-get clean") run(""" export DEBIAN_FRONTEND=noninteractive apt-get -y autoremove """) def ntp(self): if env.host_string not in self._metadata.containers: cuisine.package_ensure("ntpdate") cuisine.package_ensure("ntp") run(""" /etc/init.d/ntp stop || true ln -sfv "/usr/share/zoneinfo/%s" /etc/localtime ntpdate zeit.fu-berlin.de || true /etc/init.d/ntp start || true """ % self._metadata.config["timezone"])
midonet/orizuru
lib/orizuru/operations.py
Python
apache-2.0
17,776
# Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!! VERSION = '1.33.0.dev0'
donnadionne/grpc
src/python/grpcio_health_checking/grpc_version.py
Python
apache-2.0
710
import time import django from django import forms try: from django.forms.utils import ErrorDict except ImportError: from django.forms.util import ErrorDict from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.utils.crypto import salted_hmac, constant_time_compare from django.utils.encoding import force_text from django.utils.text import get_text_list from django.utils import timezone from django.utils.translation import ungettext, ugettext, ugettext_lazy as _ from comments.models import Comment, ThreadedComment COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000) DEFAULT_COMMENTS_TIMEOUT = getattr(settings, 'COMMENTS_TIMEOUT', (2 * 60 * 60)) # 2h class CommentSecurityForm(forms.Form): """ Handles the security aspects (anti-spoofing) for comment forms. """ content_type = forms.CharField(widget=forms.HiddenInput) object_pk = forms.CharField(widget=forms.HiddenInput) timestamp = forms.IntegerField(widget=forms.HiddenInput) security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput) def __init__(self, target_object, data=None, initial=None, **kwargs): self.target_object = target_object if initial is None: initial = {} initial.update(self.generate_security_data()) super(CommentSecurityForm, self).__init__(data=data, initial=initial, **kwargs) def security_errors(self): """Return just those errors associated with security""" errors = ErrorDict() for f in ["honeypot", "timestamp", "security_hash"]: if f in self.errors: errors[f] = self.errors[f] return errors def clean_security_hash(self): """Check the security hash.""" security_hash_dict = { 'content_type': self.data.get("content_type", ""), 'object_pk': self.data.get("object_pk", ""), 'timestamp': self.data.get("timestamp", ""), } expected_hash = self.generate_security_hash(**security_hash_dict) actual_hash = self.cleaned_data["security_hash"] if not constant_time_compare(expected_hash, actual_hash): raise forms.ValidationError("Security hash check failed.") return actual_hash def clean_timestamp(self): """Make sure the timestamp isn't too far (default is > 2 hours) in the past.""" ts = self.cleaned_data["timestamp"] if time.time() - ts > DEFAULT_COMMENTS_TIMEOUT: raise forms.ValidationError("Timestamp check failed") return ts def generate_security_data(self): """Generate a dict of security data for "initial" data.""" timestamp = int(time.time()) security_dict = { 'content_type': str(self.target_object._meta), 'object_pk': str(self.target_object._get_pk_val()), 'timestamp': str(timestamp), 'security_hash': self.initial_security_hash(timestamp), } return security_dict def initial_security_hash(self, timestamp): """ Generate the initial security hash from self.content_object and a (unix) timestamp. """ initial_security_dict = { 'content_type': str(self.target_object._meta), 'object_pk': str(self.target_object._get_pk_val()), 'timestamp': str(timestamp), } return self.generate_security_hash(**initial_security_dict) def generate_security_hash(self, content_type, object_pk, timestamp): """ Generate a HMAC security hash from the provided info. """ info = (content_type, object_pk, timestamp) key_salt = "django.contrib.forms.CommentSecurityForm" value = "-".join(info) return salted_hmac(key_salt, value).hexdigest() class CommentDetailsForm(CommentSecurityForm): """ Handles the specific details of the comment (name, comment, etc.). """ name = forms.CharField(label=_("Name"), max_length=50, widget=forms.HiddenInput) email = forms.EmailField(label=_("Email address"), widget=forms.HiddenInput) url = forms.URLField(label=_("URL"), required=False, widget=forms.HiddenInput) comment = forms.CharField(label=_('Comment'), widget=forms.Textarea, max_length=COMMENT_MAX_LENGTH) def get_comment_object(self): """ Return a new (unsaved) comment object based on the information in this form. Assumes that the form is already validated and will throw a ValueError if not. Does not set any of the fields that would come from a Request object (i.e. ``user`` or ``ip_address``). """ if not self.is_valid(): raise ValueError("get_comment_object may only be called on valid forms") CommentModel = self.get_comment_model() new = CommentModel(**self.get_comment_create_data()) new = self.check_for_duplicate_comment(new) return new def get_comment_model(self): """ Get the comment model to create with this form. Subclasses in custom comment apps should override this, get_comment_create_data, and perhaps check_for_duplicate_comment to provide custom comment models. """ return Comment def get_comment_create_data(self): """ Returns the dict of data to be used to create a comment. Subclasses in custom comment apps that override get_comment_model can override this method to add extra fields onto a custom comment model. """ return dict( content_type=ContentType.objects.get_for_model(self.target_object), object_pk=force_text(self.target_object._get_pk_val()), user_name=self.cleaned_data["name"], user_email=self.cleaned_data["email"], user_url=self.cleaned_data["url"], comment=self.cleaned_data["comment"], submit_date=timezone.now(), site_id=settings.SITE_ID, is_public=True, is_removed=False, ) def check_for_duplicate_comment(self, new): """ Check that a submitted comment isn't a duplicate. This might be caused by someone posting a comment twice. If it is a dup, silently return the *previous* comment. """ possible_duplicates = self.get_comment_model()._default_manager.using( self.target_object._state.db ).filter( content_type=new.content_type, object_pk=new.object_pk, user_name=new.user_name, user_email=new.user_email, user_url=new.user_url, ) for old in possible_duplicates: if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment: return old return new def clean_comment(self): """ If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't contain anything in PROFANITIES_LIST. """ comment = self.cleaned_data["comment"] if (not getattr(settings, 'COMMENTS_ALLOW_PROFANITIES', False) and getattr(settings, 'PROFANITIES_LIST', False)): bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()] if bad_words: raise forms.ValidationError(ungettext( "Watch your mouth! The word %s is not allowed here.", "Watch your mouth! The words %s are not allowed here.", len(bad_words)) % get_text_list( ['"%s%s%s"' % (i[0], '-' * (len(i) - 2), i[-1]) for i in bad_words], ugettext('and'))) return comment class CommentForm(CommentDetailsForm): honeypot = forms.CharField(required=False, label=_('If you enter anything in this field ' 'your comment will be treated as spam')) def clean_honeypot(self): """Check that nothing's been entered into the honeypot.""" value = self.cleaned_data["honeypot"] if value: raise forms.ValidationError(self.fields["honeypot"].label) return value class ThreadedCommentForm(CommentForm): title = forms.CharField(label=_('Title'), required=False, max_length=getattr(settings, 'COMMENTS_TITLE_MAX_LENGTH', 255), widget=forms.HiddenInput) parent = forms.IntegerField(required=False, widget=forms.HiddenInput) def __init__(self, target_object, parent=None, data=None, initial=None): if django.VERSION >= (1,7): # Using collections.OrderedDict from Python 2.7+ # This class does not have an insert method, have to replace it. from collections import OrderedDict keys = list(self.base_fields.keys()) keys.remove('title') keys.insert(keys.index('comment'), 'title') self.base_fields = OrderedDict((k, self.base_fields[k]) for k in keys) else: self.base_fields.insert( self.base_fields.keyOrder.index('comment'), 'title', self.base_fields.pop('title') ) self.parent = parent if initial is None: initial = {} initial.update({'parent': self.parent}) super(ThreadedCommentForm, self).__init__(target_object, data=data, initial=initial) def get_comment_model(self): return ThreadedComment def get_comment_create_data(self): d = super(ThreadedCommentForm, self).get_comment_create_data() d['parent_id'] = self.cleaned_data['parent'] d['title'] = self.cleaned_data['title'] return d
sheshkovsky/jaryan
comments/forms.py
Python
apache-2.0
9,802
from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the version from version.py with open('humanname/version.py') as f: exec(f.read(), globals(), locals()) setup( name='humanname', version=__version__, description='Python bindings for the Rust crate `human_name`, a library for parsing and comparing human names', url='https://github.com/djudd/human-name-py', author='David Judd', author_email='[email protected]', license='Apache 2.0', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Text Processing :: Linguistic', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: Apache Software License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], # What does your project relate to? keywords='human names, people', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Alternatively, if you want to distribute just a my_module.py, uncomment # this: # py_modules=["my_module"], # List run-time dependencies here. These will be installed by pip when # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=[], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev,test] extras_require={ 'dev': ['check-manifest'], 'test': ['coverage'], }, # If there are data files included in your packages that need to be # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. package_data={ 'humanname': ['libhuman_name.dylib', 'libhuman_name.so'], }, )
djudd/human-name-py
setup.py
Python
apache-2.0
2,800
""" Configuration file for database connections """ class MySQLConfig: """configuration for MySQL""" username = 'root' password = 'root' host = 'localhost' database = 'ecosystem_mapping'
BIDS-projects/aggregator
bidsaggregator/utils/config.py
Python
apache-2.0
209
"""Chessboard: Describe an application once, deploy and manage it anywhere. Chessboard includes utilities for modeling application topologies and deploying/managing applications in a provider-agnostic way. """ import setuptools VERSION = '0.1.0' setuptools.setup( name='chessboard', version=VERSION, maintainer='Rackspace Hosting, Inc.', url='https://github.com/checkmate/chessboard', description='Describe an application once, deploy and manage it anywhere', platforms=['any'], packages=setuptools.find_packages( exclude=['chessboard.tests', 'chessboard.tests.*'] ), provides=['chessboard (%s)' % VERSION], license='Apache License 2.0', keywords=( 'application model topology deployment manage orchestration ' 'configuration automation checkmate' ), include_package_data=True, data_files=[('chessboard', ['chessboard/schema_docs.yaml'])], classifiers=( 'Development Status :: 1 - Planning', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Topic :: Software Development', 'Topic :: System :: Systems Administration', ), )
larsbutler/chessboard
setup.py
Python
apache-2.0
1,395
# Copyright 2011 Tsutomu Uchino # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def create_popupmenu(ctx, items): """ create popup menu from items [ [id, pos, type, 'label', "command", acc_key], [] ] """ try: menu = ctx.getServiceManager().createInstanceWithContext( "com.sun.star.awt.PopupMenu", ctx) for i in items: if i[0] is None or i[0] == -1: menu.insertSeparator(i[1]) else: menu.insertItem(i[0], i[3], i[2], i[1]) menu.setCommand(i[0], i[4]) if i[5] is not None: try: menu.setAcceleratorKeyEvent(i[0], i[5]) except: pass except Exception as e: print(e) return menu def get_current_sentence(target, mini): """ (\n| )... min ... """ lfstart = target.rfind("\n", 0, mini) lfend = target.find("\n", mini) if lfend < 0: lfend = len(target) spstart = target.rfind(" ", 0, mini) spend = target.find(" ", mini) if spend < 0: spend = len(target) if lfstart >= spstart: start = lfstart +1 if start < 0: start = 0 else: start = spstart +2 if spend < lfend: end = spend else: end = lfend return (start, end) def get_current_line(target, mini): """ # xxx\n...min....\nxxx """ start = target.rfind("\n", 0, mini) +1 if start < 0: start = 0 end = target.find("\n", mini) if end < 0: end = len(target) return (start, end) def get_first_word(line): pos = line.lstrip().find(" ") if pos < 0: pos = len(line) return line[0:pos]
hanya/MRI
pythonpath/mytools_Mri/ui/tools.py
Python
apache-2.0
2,328
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ''' Example main program for ESGF-RCMES integration. ''' # constant parameters DATA_DIRECTORY = "/tmp" from ocw.esgf.logon import logon from ocw.esgf.search import SearchClient from ocw.esgf.download import download def main(): '''Example driver program''' username = raw_input('Enter your ESGF Username:\n') password = raw_input('Enter your ESGF Password:\n') # step 1: obtain short-term certificate print 'Retrieving ESGF certificate...' # logon using client-side MyProxy libraries if logon(username, password): print "...done." # step 2: execute faceted search for files urls = main_obs4mips() #urls = main_cmip5() # step 3: download file(s) for i, url in enumerate(urls): if i>=1: break download(url, toDirectory=DATA_DIRECTORY) def main_cmip5(): ''' Example workflow to search for CMIP5 files ''' searchClient = SearchClient(searchServiceUrl="http://pcmdi9.llnl.gov/esg-search/search", distrib=False) print '\nAvailable projects=%s' % searchClient.getFacets('project') searchClient.setConstraint(project='CMIP5') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() print '\nAvailable models=%s' % searchClient.getFacets('model') searchClient.setConstraint(model='INM-CM4') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() print '\nAvailable experiments=%s' % searchClient.getFacets('experiment') searchClient.setConstraint(experiment='historical') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() print '\nAvailable time frequencies=%s' % searchClient.getFacets('time_frequency') searchClient.setConstraint(time_frequency='mon') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() print '\nAvailable CF standard names=%s' % searchClient.getFacets('cf_standard_name') searchClient.setConstraint(cf_standard_name='air_temperature') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() urls = searchClient.getFiles() return urls def main_obs4mips(): ''' Example workflow to search for obs4MIPs files. ''' searchClient = SearchClient(distrib=False) # obs4MIPs print '\nAvailable projects=%s' % searchClient.getFacets('project') searchClient.setConstraint(project='obs4MIPs') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() print '\nAvailable variables=%s' % searchClient.getFacets('variable') searchClient.setConstraint(variable='hus') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() print '\nAvailable time frequencies=%s' % searchClient.getFacets('time_frequency') searchClient.setConstraint(time_frequency='mon') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() print '\nAvailable models=%s' % searchClient.getFacets('model') searchClient.setConstraint(model='Obs-MLS') print "Number of Datasets=%d" % searchClient.getNumberOfDatasets() urls = searchClient.getFiles() return urls if __name__ == '__main__': main()
MBoustani/climate
ocw/esgf/main.py
Python
apache-2.0
3,986
from datetime import datetime from bson import ObjectId class SettingsService(object): def __init__(self, db): self.db = db self.settings = db.settings_collection self._id = ObjectId("5bbbaee7bacf833c1203d7b3") def save(self, item): item['_id'] = self._id item['created_at'] = datetime.now() item['status'] = True return self.settings.save(item) def get(self): return self.settings.find_one({'_id': self._id})
cackharot/fbeazt
src/foodbeazt/service/SettingsService.py
Python
apache-2.0
491
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import Counter import airflow from airflow.ti_deps.deps.base_ti_dep import BaseTIDep from airflow.utils.session import provide_session from airflow.utils.state import State class TriggerRuleDep(BaseTIDep): """ Determines if a task's upstream tasks are in a state that allows a given task instance to run. """ NAME = "Trigger Rule" IGNOREABLE = True IS_TASK_DEP = True @staticmethod def _get_states_count_upstream_ti(ti, finished_tasks): """ This function returns the states of the upstream tis for a specific ti in order to determine whether this ti can run in this iteration :param ti: the ti that we want to calculate deps for :type ti: airflow.models.TaskInstance :param finished_tasks: all the finished tasks of the dag_run :type finished_tasks: list[airflow.models.TaskInstance] """ counter = Counter(task.state for task in finished_tasks if task.task_id in ti.task.upstream_task_ids) return counter.get(State.SUCCESS, 0), counter.get(State.SKIPPED, 0), counter.get(State.FAILED, 0), \ counter.get(State.UPSTREAM_FAILED, 0), sum(counter.values()) @provide_session def _get_dep_statuses(self, ti, session, dep_context): TR = airflow.utils.trigger_rule.TriggerRule # Checking that all upstream dependencies have succeeded if not ti.task.upstream_list: yield self._passing_status( reason="The task instance did not have any upstream tasks.") return if ti.task.trigger_rule == TR.DUMMY: yield self._passing_status(reason="The task had a dummy trigger rule set.") return # see if the task name is in the task upstream for our task successes, skipped, failed, upstream_failed, done = self._get_states_count_upstream_ti( ti=ti, finished_tasks=dep_context.ensure_finished_tasks(ti.task.dag, ti.execution_date, session)) yield from self._evaluate_trigger_rule( ti=ti, successes=successes, skipped=skipped, failed=failed, upstream_failed=upstream_failed, done=done, flag_upstream_failed=dep_context.flag_upstream_failed, session=session) @provide_session def _evaluate_trigger_rule( # pylint: disable=too-many-branches self, ti, successes, skipped, failed, upstream_failed, done, flag_upstream_failed, session): """ Yields a dependency status that indicate whether the given task instance's trigger rule was met. :param ti: the task instance to evaluate the trigger rule of :type ti: airflow.models.TaskInstance :param successes: Number of successful upstream tasks :type successes: int :param skipped: Number of skipped upstream tasks :type skipped: int :param failed: Number of failed upstream tasks :type failed: int :param upstream_failed: Number of upstream_failed upstream tasks :type upstream_failed: int :param done: Number of completed upstream tasks :type done: int :param flag_upstream_failed: This is a hack to generate the upstream_failed state creation while checking to see whether the task instance is runnable. It was the shortest path to add the feature :type flag_upstream_failed: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ TR = airflow.utils.trigger_rule.TriggerRule task = ti.task upstream = len(task.upstream_task_ids) trigger_rule = task.trigger_rule upstream_done = done >= upstream upstream_tasks_state = { "total": upstream, "successes": successes, "skipped": skipped, "failed": failed, "upstream_failed": upstream_failed, "done": done } # TODO(aoen): Ideally each individual trigger rules would be its own class, but # this isn't very feasible at the moment since the database queries need to be # bundled together for efficiency. # handling instant state assignment based on trigger rules if flag_upstream_failed: if trigger_rule == TR.ALL_SUCCESS: if upstream_failed or failed: ti.set_state(State.UPSTREAM_FAILED, session) elif skipped: ti.set_state(State.SKIPPED, session) elif trigger_rule == TR.ALL_FAILED: if successes or skipped: ti.set_state(State.SKIPPED, session) elif trigger_rule == TR.ONE_SUCCESS: if upstream_done and not successes: ti.set_state(State.SKIPPED, session) elif trigger_rule == TR.ONE_FAILED: if upstream_done and not (failed or upstream_failed): ti.set_state(State.SKIPPED, session) elif trigger_rule == TR.NONE_FAILED: if upstream_failed or failed: ti.set_state(State.UPSTREAM_FAILED, session) elif trigger_rule == TR.NONE_FAILED_OR_SKIPPED: if upstream_failed or failed: ti.set_state(State.UPSTREAM_FAILED, session) elif skipped == upstream: ti.set_state(State.SKIPPED, session) elif trigger_rule == TR.NONE_SKIPPED: if skipped: ti.set_state(State.SKIPPED, session) if trigger_rule == TR.ONE_SUCCESS: if successes <= 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires one upstream " "task success, but none were found. " "upstream_tasks_state={1}, upstream_task_ids={2}" .format(trigger_rule, upstream_tasks_state, task.upstream_task_ids)) elif trigger_rule == TR.ONE_FAILED: if not failed and not upstream_failed: yield self._failing_status( reason="Task's trigger rule '{0}' requires one upstream " "task failure, but none were found. " "upstream_tasks_state={1}, upstream_task_ids={2}" .format(trigger_rule, upstream_tasks_state, task.upstream_task_ids)) elif trigger_rule == TR.ALL_SUCCESS: num_failures = upstream - successes if num_failures > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have succeeded, but found {1} non-success(es). " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(trigger_rule, num_failures, upstream_tasks_state, task.upstream_task_ids)) elif trigger_rule == TR.ALL_FAILED: num_successes = upstream - failed - upstream_failed if num_successes > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have failed, but found {1} non-failure(s). " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(trigger_rule, num_successes, upstream_tasks_state, task.upstream_task_ids)) elif trigger_rule == TR.ALL_DONE: if not upstream_done: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have completed, but found {1} task(s) that " "weren't done. upstream_tasks_state={2}, " "upstream_task_ids={3}" .format(trigger_rule, upstream_done, upstream_tasks_state, task.upstream_task_ids)) elif trigger_rule == TR.NONE_FAILED: num_failures = upstream - successes - skipped if num_failures > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have succeeded or been skipped, but found {1} non-success(es). " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(trigger_rule, num_failures, upstream_tasks_state, task.upstream_task_ids)) elif trigger_rule == TR.NONE_FAILED_OR_SKIPPED: num_failures = upstream - successes - skipped if num_failures > 0: yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to have succeeded or been skipped, but found {1} non-success(es). " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(trigger_rule, num_failures, upstream_tasks_state, task.upstream_task_ids)) elif trigger_rule == TR.NONE_SKIPPED: if not upstream_done or (skipped > 0): yield self._failing_status( reason="Task's trigger rule '{0}' requires all upstream " "tasks to not have been skipped, but found {1} task(s) skipped. " "upstream_tasks_state={2}, upstream_task_ids={3}" .format(trigger_rule, skipped, upstream_tasks_state, task.upstream_task_ids)) else: yield self._failing_status( reason="No strategy to evaluate trigger rule '{0}'.".format(trigger_rule))
wooga/airflow
airflow/ti_deps/deps/trigger_rule_dep.py
Python
apache-2.0
10,691
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import _log from openstack.baremetal.v1 import node as _node from openstack.baremetal_introspection.v1 import introspection as _introspect from openstack import exceptions from openstack import proxy _logger = _log.setup_logging('openstack') class Proxy(proxy.Proxy): def introspections(self, **query): """Retrieve a generator of introspection records. :param dict query: Optional query parameters to be sent to restrict the records to be returned. Available parameters include: * ``fields``: A list containing one or more fields to be returned in the response. This may lead to some performance gain because other fields of the resource are not refreshed. * ``limit``: Requests at most the specified number of items be returned from the query. * ``marker``: Specifies the ID of the last-seen introspection. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen introspection from the response as the ``marker`` value in a subsequent limited request. * ``sort_dir``: Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. * ``sort_key``: Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. :returns: A generator of :class:`~.introspection.Introspection` objects """ return _introspect.Introspection.list(self, **query) def start_introspection(self, node, manage_boot=None): """Create a new introspection from attributes. :param node: The value can be either the name or ID of a node or a :class:`~openstack.baremetal.v1.node.Node` instance. :param bool manage_boot: Whether to manage boot parameters for the node. Defaults to the server default (which is `True`). :returns: :class:`~.introspection.Introspection` instance. """ node = self._get_resource(_node.Node, node) res = _introspect.Introspection.new(connection=self._get_connection(), id=node.id) kwargs = {} if manage_boot is not None: kwargs['manage_boot'] = manage_boot return res.create(self, **kwargs) def get_introspection(self, introspection): """Get a specific introspection. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :returns: :class:`~.introspection.Introspection` instance. :raises: :class:`~openstack.exceptions.ResourceNotFound` when no introspection matching the name or ID could be found. """ return self._get(_introspect.Introspection, introspection) def get_introspection_data(self, introspection, processed=True): """Get introspection data. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :param processed: Whether to fetch the final processed data (the default) or the raw unprocessed data as received from the ramdisk. :returns: introspection data from the most recent successful run. :rtype: dict """ res = self._get_resource(_introspect.Introspection, introspection) return res.get_data(self, processed=processed) def abort_introspection(self, introspection, ignore_missing=True): """Abort an introspection. Note that the introspection is not aborted immediately, you may use `wait_for_introspection` with `ignore_error=True`. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :param bool ignore_missing: When set to ``False``, an exception :class:`~openstack.exceptions.ResourceNotFound` will be raised when the introspection could not be found. When set to ``True``, no exception will be raised when attempting to abort a non-existent introspection. :returns: nothing """ res = self._get_resource(_introspect.Introspection, introspection) try: res.abort(self) except exceptions.ResourceNotFound: if not ignore_missing: raise def wait_for_introspection(self, introspection, timeout=None, ignore_error=False): """Wait for the introspection to finish. :param introspection: The value can be the name or ID of an introspection (matching bare metal node name or ID) or an :class:`~.introspection.Introspection` instance. :param timeout: How much (in seconds) to wait for the introspection. The value of ``None`` (the default) means no client-side timeout. :param ignore_error: If ``True``, this call will raise an exception if the introspection reaches the ``error`` state. Otherwise the error state is considered successful and the call returns. :returns: :class:`~.introspection.Introspection` instance. :raises: :class:`~openstack.exceptions.ResourceFailure` if introspection fails and ``ignore_error`` is ``False``. :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. """ res = self._get_resource(_introspect.Introspection, introspection) return res.wait(self, timeout=timeout, ignore_error=ignore_error)
stackforge/python-openstacksdk
openstack/baremetal_introspection/v1/_proxy.py
Python
apache-2.0
7,038
## # Copyright (c) 2009-2015 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## from twistedcaldav.database import AbstractADBAPIDatabase, ADBAPISqliteMixin import twistedcaldav.test.util from twisted.internet.defer import inlineCallbacks import os import time class Database (twistedcaldav.test.util.TestCase): """ Test abstract SQL DB class """ class TestDB(ADBAPISqliteMixin, AbstractADBAPIDatabase): def __init__(self, path, persistent=False, version="1"): self.version = version self.dbpath = path super(Database.TestDB, self).__init__("sqlite", "sqlite3", (path,), persistent, cp_min=3, cp_max=3) def _db_version(self): """ @return: the schema version assigned to this index. """ return self.version def _db_type(self): """ @return: the collection type assigned to this index. """ return "TESTTYPE" def _db_init_data_tables(self): """ Initialise the underlying database tables. @param q: a database cursor to use. """ # # TESTTYPE table # return self._db_execute( """ create table TESTTYPE ( KEY text unique, VALUE text ) """ ) def _db_remove_data_tables(self): return self._db_execute("drop table TESTTYPE") class TestDBRecreateUpgrade(TestDB): class RecreateDBException(Exception): pass class UpgradeDBException(Exception): pass def __init__(self, path, persistent=False): super(Database.TestDBRecreateUpgrade, self).__init__(path, persistent, version="2") def _db_recreate(self): raise self.RecreateDBException() class TestDBCreateIndexOnUpgrade(TestDB): def __init__(self, path, persistent=False): super(Database.TestDBCreateIndexOnUpgrade, self).__init__(path, persistent, version="2") def _db_upgrade_data_tables(self, old_version): return self._db_execute( """ create index TESTING on TESTTYPE (VALUE) """ ) class TestDBPauseInInit(TestDB): def _db_init(self): time.sleep(1) super(Database.TestDBPauseInInit, self)._db_init() @inlineCallbacks def inlineCallbackRaises(self, exc, f, *args, **kwargs): try: yield f(*args, **kwargs) except exc: pass except Exception, e: self.fail("Wrong exception raised: %s" % (e,)) else: self.fail("%s not raised" % (exc,)) @inlineCallbacks def test_connect(self): """ Connect to database and create table """ db = Database.TestDB(self.mktemp()) self.assertFalse(db.initialized) yield db.open() self.assertTrue(db.initialized) @inlineCallbacks def test_connectFailure(self): """ Failure to connect cleans up the pool """ db = Database.TestDB(self.mktemp()) # Make _db_init fail db._db_init = lambda : 1 / 0 self.assertFalse(db.initialized) try: yield db.open() except: pass self.assertFalse(db.initialized) self.assertEquals(db.pool, None) @inlineCallbacks def test_readwrite(self): """ Add a record, search for it """ db = Database.TestDB(self.mktemp()) yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",)) items = (yield db.query("SELECT * from TESTTYPE")) self.assertEqual(items, (("FOO", "BAR"),)) items = (yield db.queryList("SELECT * from TESTTYPE")) self.assertEqual(items, ("FOO",)) @inlineCallbacks def test_close(self): """ Close database """ db = Database.TestDB(self.mktemp()) self.assertFalse(db.initialized) yield db.open() db.close() self.assertFalse(db.initialized) db.close() @inlineCallbacks def test_version_upgrade_nonpersistent(self): """ Connect to database and create table """ db_file = self.mktemp() db = Database.TestDB(db_file) yield db.open() yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",)) items = (yield db.query("SELECT * from TESTTYPE")) self.assertEqual(items, (("FOO", "BAR"),)) db.close() db = None db = Database.TestDBRecreateUpgrade(db_file) yield self.inlineCallbackRaises(Database.TestDBRecreateUpgrade.RecreateDBException, db.open) items = (yield db.query("SELECT * from TESTTYPE")) self.assertEqual(items, ()) @inlineCallbacks def test_version_upgrade_persistent(self): """ Connect to database and create table """ db_file = self.mktemp() db = Database.TestDB(db_file, persistent=True) yield db.open() yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",)) items = (yield db.query("SELECT * from TESTTYPE")) self.assertEqual(items, (("FOO", "BAR"),)) db.close() db = None db = Database.TestDBRecreateUpgrade(db_file, persistent=True) yield self.inlineCallbackRaises(NotImplementedError, db.open) self.assertTrue(os.path.exists(db_file)) db.close() db = None db = Database.TestDB(db_file, persistent=True) yield db.open() items = (yield db.query("SELECT * from TESTTYPE")) self.assertEqual(items, (("FOO", "BAR"),)) @inlineCallbacks def test_version_upgrade_persistent_add_index(self): """ Connect to database and create table """ db_file = self.mktemp() db = Database.TestDB(db_file, persistent=True) yield db.open() yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",)) items = (yield db.query("SELECT * from TESTTYPE")) self.assertEqual(items, (("FOO", "BAR"),)) db.close() db = None db = Database.TestDBCreateIndexOnUpgrade(db_file, persistent=True) yield db.open() items = (yield db.query("SELECT * from TESTTYPE")) self.assertEqual(items, (("FOO", "BAR"),))
red-hood/calendarserver
twistedcaldav/test/test_database.py
Python
apache-2.0
7,176
''' This module provides some handle-related functions that are needed across various modules of the pyhandle library. ''' from __future__ import absolute_import import base64 from future.standard_library import install_aliases install_aliases() from urllib.parse import quote from . import handleexceptions from . import util def remove_index_from_handle(handle_with_index): ''' Returns index and handle separately, in a tuple. :handle_with_index: The handle string with an index (e.g. 500:prefix/suffix) :return: index and handle as a tuple, where index is integer. ''' split = handle_with_index.split(':') if len(split) == 2: split[0] = int(split[0]) return split elif len(split) == 1: return (None, handle_with_index) elif len(split) > 2: raise handleexceptions.HandleSyntaxError( msg='Too many colons', handle=handle_with_index, expected_syntax='index:prefix/suffix') def check_handle_syntax(string): ''' Checks the syntax of a handle without an index (are prefix and suffix there, are there too many slashes?). :string: The handle without index, as string prefix/suffix. :raise: :exc:`~pyhandle.handleexceptions.handleexceptions.HandleSyntaxError` :return: True. If it's not ok, exceptions are raised. ''' expected = 'prefix/suffix' try: arr = string.split('/') except AttributeError: raise handleexceptions.HandleSyntaxError(msg='The provided handle is None', expected_syntax=expected) if len(arr) < 2: msg = 'No slash' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) if len(arr[0]) == 0: msg = 'Empty prefix' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) if len(arr[1]) == 0: msg = 'Empty suffix' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) if ':' in string: check_handle_syntax_with_index(string, base_already_checked=True) return True def check_handle_syntax_with_index(string, base_already_checked=False): ''' Checks the syntax of a handle with an index (is index there, is it an integer?), and of the handle itself. :string: The handle with index, as string index:prefix/suffix. :raise: :exc:`~pyhandle.handleexceptions.handleexceptions.HandleSyntaxError` :return: True. If it's not ok, exceptions are raised. ''' expected = 'index:prefix/suffix' try: arr = string.split(':') except AttributeError: raise handleexceptions.HandleSyntaxError(msg='The provided handle is None.', expected_syntax=expected) if len(arr) > 2: msg = 'Too many colons' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) elif len(arr) < 2: msg = 'No colon' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) try: int(arr[0]) except ValueError: msg = 'Index is not an integer' raise handleexceptions.HandleSyntaxError(msg=msg, handle=string, expected_syntax=expected) if not base_already_checked: check_handle_syntax(string) return True def create_authentication_string(username, password): ''' Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string. ''' username_utf8 = username.encode('utf-8') userpw_utf8 = password.encode('utf-8').decode('utf-8') username_perc = quote(username_utf8) authinfostring = username_perc + ':' + userpw_utf8 authinfostring_base64 = base64.b64encode(authinfostring.encode('utf-8')).decode('utf-8') return authinfostring_base64 def make_request_log_message(**args): ''' Creates a string containing all relevant information about a request made to the Handle System, for logging purposes. :handle: The handle that the request is about. :url: The url the request is sent to. :headers: The headers sent along with the request. :verify: Boolean parameter passed to the requests module (https verification). :resp: The request's response. :op: The library operation during which the request was sent. :payload: Optional. The payload sent with the request. :return: A formatted string. ''' mandatory_args = ['op', 'handle', 'url', 'headers', 'verify', 'resp'] optional_args = ['payload'] util.check_presence_of_mandatory_args(args, mandatory_args) util.add_missing_optional_args_with_value_none(args, optional_args) space = '\n ' message = '' message += '\n'+args['op']+' '+args['handle'] message += space+'URL: '+args['url'] message += space+'HEADERS: '+str(args['headers']) message += space+'VERIFY: '+str(args['verify']) if 'payload' in args.keys(): message += space+'PAYLOAD:'+space+str(args['payload']) message += space+'RESPONSECODE: '+str(args['resp'].status_code) message += space+'RESPONSE:'+space+str(args['resp'].content) return message
EUDAT-B2SAFE/PYHANDLE
pyhandle/utilhandle.py
Python
apache-2.0
5,268
import sys def test(suspect): for i in range(2, suspect): if suspect % i is 0: return False return True if len(sys.argv) == 1: print('please call me like "python prime.py range"') sys.exit(0) primes = [] for i in range(int(sys.argv[1])): if test(i) == True: primes.append(i) print(len(primes))
tomecho/PrimeSpeedTest
prime.py
Python
apache-2.0
343
from __future__ import absolute_import, division, print_function, unicode_literals from amaascore.market_data.eod_price import EODPrice from amaascore.market_data.fx_rate import FXRate from amaascore.market_data.curve import Curve from amaascore.market_data.corporate_action import CorporateAction def json_to_eod_price(json_eod_price): eod_price = EODPrice(**json_eod_price) return eod_price def json_to_fx_rate(json_fx_rate): fx_rate = FXRate(**json_fx_rate) return fx_rate def json_to_curve(json_curve): curve = Curve(**json_curve) return curve def json_to_corporate_action(json_corporate_action): corporate_action = CorporateAction(**json_corporate_action) return corporate_action
amaas-fintech/amaas-core-sdk-python
amaascore/market_data/utils.py
Python
apache-2.0
723
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from nova.compute import instance_list from nova import context from nova import db from nova import exception from nova import objects from nova import test from nova.tests import uuidsentinel as uuids class InstanceListTestCase(test.TestCase): NUMBER_OF_CELLS = 3 def setUp(self): super(InstanceListTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.num_instances = 3 self.instances = [] start = datetime.datetime(1985, 10, 25, 1, 21, 0) dt = start spread = datetime.timedelta(minutes=10) self.cells = objects.CellMappingList.get_all(self.context) # Create three instances in each of the real cells. Leave the # first cell empty to make sure we don't break with an empty # one. for cell in self.cells[1:]: for i in range(0, self.num_instances): with context.target_cell(self.context, cell) as cctx: inst = objects.Instance( context=cctx, project_id=self.context.project_id, user_id=self.context.user_id, created_at=start, launched_at=dt, instance_type_id=i, hostname='%s-inst%i' % (cell.name, i)) inst.create() if i % 2 == 0: # Make some faults for this instance for n in range(0, i + 1): msg = 'fault%i-%s' % (n, inst.hostname) f = objects.InstanceFault(context=cctx, instance_uuid=inst.uuid, code=i, message=msg, details='fake', host='fakehost') f.create() self.instances.append(inst) im = objects.InstanceMapping(context=self.context, project_id=inst.project_id, user_id=inst.user_id, instance_uuid=inst.uuid, cell_mapping=cell) im.create() dt += spread def test_get_sorted(self): filters = {} limit = None marker = None columns = [] sort_keys = ['uuid'] sort_dirs = ['asc'] insts = instance_list.get_instances_sorted(self.context, filters, limit, marker, columns, sort_keys, sort_dirs) uuids = [inst['uuid'] for inst in insts] self.assertEqual(sorted(uuids), uuids) self.assertEqual(len(self.instances), len(uuids)) def test_get_sorted_descending(self): filters = {} limit = None marker = None columns = [] sort_keys = ['uuid'] sort_dirs = ['desc'] insts = instance_list.get_instances_sorted(self.context, filters, limit, marker, columns, sort_keys, sort_dirs) uuids = [inst['uuid'] for inst in insts] self.assertEqual(list(reversed(sorted(uuids))), uuids) self.assertEqual(len(self.instances), len(uuids)) def test_get_sorted_with_filter(self): filters = {'instance_type_id': 1} limit = None marker = None columns = [] sort_keys = ['uuid'] sort_dirs = ['asc'] insts = instance_list.get_instances_sorted(self.context, filters, limit, marker, columns, sort_keys, sort_dirs) uuids = [inst['uuid'] for inst in insts] expected = [inst['uuid'] for inst in self.instances if inst['instance_type_id'] == 1] self.assertEqual(list(sorted(expected)), uuids) def test_get_sorted_by_defaults(self): filters = {} limit = None marker = None columns = [] sort_keys = None sort_dirs = None insts = instance_list.get_instances_sorted(self.context, filters, limit, marker, columns, sort_keys, sort_dirs) uuids = set([inst['uuid'] for inst in insts]) expected = set([inst['uuid'] for inst in self.instances]) self.assertEqual(expected, uuids) def test_get_sorted_with_limit(self): insts = instance_list.get_instances_sorted(self.context, {}, 5, None, [], ['uuid'], ['asc']) uuids = [inst['uuid'] for inst in insts] had_uuids = [inst.uuid for inst in self.instances] self.assertEqual(sorted(had_uuids)[:5], uuids) self.assertEqual(5, len(uuids)) def test_get_sorted_with_large_limit(self): insts = instance_list.get_instances_sorted(self.context, {}, 5000, None, [], ['uuid'], ['asc']) uuids = [inst['uuid'] for inst in insts] self.assertEqual(sorted(uuids), uuids) self.assertEqual(len(self.instances), len(uuids)) def _test_get_sorted_with_limit_marker(self, sort_by, pages=2, pagesize=2, sort_dir='asc'): """Get multiple pages by a sort key and validate the results. This requests $pages of $pagesize, followed by a final page with no limit, and a final-final page which should be empty. It validates that we got a consistent set of results no patter where the page boundary is, that we got all the results after the unlimited query, and that the final page comes back empty when we use the last instance as a marker. """ insts = [] page = 0 while True: if page >= pages: # We've requested the specified number of limited (by pagesize) # pages, so request a penultimate page with no limit which # should always finish out the result. limit = None else: # Request a limited-size page for the first $pages pages. limit = pagesize if insts: # If we're not on the first page, use the last instance we # received as the marker marker = insts[-1]['uuid'] else: # No marker for the first page marker = None batch = list( instance_list.get_instances_sorted(self.context, {}, limit, marker, [], [sort_by], [sort_dir])) if not batch: # This should only happen when we've pulled the last empty # page because we used the marker of the last instance. If # we end up with a non-deterministic ordering, we'd loop # forever. break insts.extend(batch) page += 1 if page > len(self.instances) * 2: # Do this sanity check in case we introduce (or find) another # repeating page bug like #1721791. Without this we loop # until timeout, which is less obvious. raise Exception('Infinite paging loop') # We should have requested exactly (or one more unlimited) pages self.assertIn(page, (pages, pages + 1)) # Make sure the full set matches what we know to be true found = [x[sort_by] for x in insts] had = [x[sort_by] for x in self.instances] if sort_by in ('launched_at', 'created_at'): # We're comparing objects and database entries, so we need to # squash the tzinfo of the object ones so we can compare had = [x.replace(tzinfo=None) for x in had] self.assertEqual(len(had), len(found)) if sort_dir == 'asc': self.assertEqual(sorted(had), found) else: self.assertEqual(list(reversed(sorted(had))), found) def test_get_sorted_with_limit_marker_stable(self): """Test sorted by hostname. This will be a stable sort that won't change on each run. """ self._test_get_sorted_with_limit_marker(sort_by='hostname') def test_get_sorted_with_limit_marker_stable_reverse(self): """Test sorted by hostname. This will be a stable sort that won't change on each run. """ self._test_get_sorted_with_limit_marker(sort_by='hostname', sort_dir='desc') def test_get_sorted_with_limit_marker_stable_different_pages(self): """Test sorted by hostname with different page sizes. Just do the above with page seams in different places. """ self._test_get_sorted_with_limit_marker(sort_by='hostname', pages=3, pagesize=1) def test_get_sorted_with_limit_marker_stable_different_pages_reverse(self): """Test sorted by hostname with different page sizes. Just do the above with page seams in different places. """ self._test_get_sorted_with_limit_marker(sort_by='hostname', pages=3, pagesize=1, sort_dir='desc') def test_get_sorted_with_limit_marker_random(self): """Test sorted by uuid. This will not be stable and the actual ordering will depend on uuid generation and thus be different on each run. Do this in addition to the stable sort above to keep us honest. """ self._test_get_sorted_with_limit_marker(sort_by='uuid') def test_get_sorted_with_limit_marker_random_different_pages(self): """Test sorted by uuid with different page sizes. Just do the above with page seams in different places. """ self._test_get_sorted_with_limit_marker(sort_by='uuid', pages=3, pagesize=2) def test_get_sorted_with_limit_marker_datetime(self): """Test sorted by launched_at. This tests that we can do all of this, but with datetime fields. """ self._test_get_sorted_with_limit_marker(sort_by='launched_at') def test_get_sorted_with_limit_marker_datetime_same(self): """Test sorted by created_at. This tests that we can do all of this, but with datetime fields that are identical. """ self._test_get_sorted_with_limit_marker(sort_by='created_at') def test_get_sorted_with_deleted_marker(self): marker = self.instances[1]['uuid'] before = list( instance_list.get_instances_sorted(self.context, {}, None, marker, [], None, None)) db.instance_destroy(self.context, marker) after = list( instance_list.get_instances_sorted(self.context, {}, None, marker, [], None, None)) self.assertEqual(before, after) def test_get_sorted_with_invalid_marker(self): self.assertRaises(exception.MarkerNotFound, list, instance_list.get_instances_sorted( self.context, {}, None, 'not-a-marker', [], None, None)) def test_get_sorted_with_purged_instance(self): """Test that we handle a mapped but purged instance.""" im = objects.InstanceMapping(self.context, instance_uuid=uuids.missing, project_id=self.context.project_id, user_id=self.context.user_id, cell=self.cells[0]) im.create() self.assertRaises(exception.MarkerNotFound, list, instance_list.get_instances_sorted( self.context, {}, None, uuids.missing, [], None, None)) def _test_get_paginated_with_filter(self, filters): found_uuids = [] marker = None while True: # Query for those instances, sorted by a different key in # pages of one until we've consumed them all batch = list( instance_list.get_instances_sorted(self.context, filters, 1, marker, [], ['hostname'], ['asc'])) if not batch: break found_uuids.extend([x['uuid'] for x in batch]) marker = found_uuids[-1] return found_uuids def test_get_paginated_with_uuid_filter(self): """Test getting pages with uuid filters. This runs through the results of a uuid-filtered query in pages of length one to ensure that we land on markers that are filtered out of the query and are not accidentally returned. """ # Pick a set of the instances by uuid, when sorted by uuid all_uuids = [x['uuid'] for x in self.instances] filters = {'uuid': sorted(all_uuids)[:7]} found_uuids = self._test_get_paginated_with_filter(filters) # Make sure we found all (and only) the instances we asked for self.assertEqual(set(found_uuids), set(filters['uuid'])) self.assertEqual(7, len(found_uuids)) def test_get_paginated_with_other_filter(self): """Test getting pages with another filter. This runs through the results of a filtered query in pages of length one to ensure we land on markers that are filtered out of the query and are not accidentally returned. """ expected = [inst['uuid'] for inst in self.instances if inst['instance_type_id'] == 1] filters = {'instance_type_id': 1} found_uuids = self._test_get_paginated_with_filter(filters) self.assertEqual(set(expected), set(found_uuids)) def test_get_paginated_with_uuid_and_other_filter(self): """Test getting pages with a uuid and other type of filter. We do this to make sure that we still find (but exclude) the marker even if one of the other filters would have included it. """ # Pick a set of the instances by uuid, when sorted by uuid all_uuids = [x['uuid'] for x in self.instances] filters = {'uuid': sorted(all_uuids)[:7], 'user_id': 'fake'} found_uuids = self._test_get_paginated_with_filter(filters) # Make sure we found all (and only) the instances we asked for self.assertEqual(set(found_uuids), set(filters['uuid'])) self.assertEqual(7, len(found_uuids)) def test_get_sorted_with_faults(self): """Make sure we get faults when we ask for them.""" insts = list( instance_list.get_instances_sorted(self.context, {}, None, None, ['fault'], ['hostname'], ['asc'])) # Two of the instances in each cell have faults (0th and 2nd) expected_faults = self.NUMBER_OF_CELLS * 2 expected_no_fault = len(self.instances) - expected_faults faults = [inst['fault'] for inst in insts] self.assertEqual(expected_no_fault, faults.count(None)) def test_get_sorted_paginated_with_faults(self): """Get pages of one with faults. Do this specifically so we make sure we land on faulted marker instances to ensure we don't omit theirs. """ insts = [] while True: if insts: marker = insts[-1]['uuid'] else: marker = None batch = list( instance_list.get_instances_sorted(self.context, {}, 1, marker, ['fault'], ['hostname'], ['asc'])) if not batch: break insts.extend(batch) self.assertEqual(len(self.instances), len(insts)) # Two of the instances in each cell have faults (0th and 2nd) expected_faults = self.NUMBER_OF_CELLS * 2 expected_no_fault = len(self.instances) - expected_faults faults = [inst['fault'] for inst in insts] self.assertEqual(expected_no_fault, faults.count(None)) def test_instance_list_minimal_cells(self): """Get a list of instances with a subset of cell mappings.""" last_cell = self.cells[-1] with context.target_cell(self.context, last_cell) as cctxt: last_cell_instances = db.instance_get_all(cctxt) last_cell_uuids = [inst['uuid'] for inst in last_cell_instances] instances = list( instance_list.get_instances_sorted(self.context, {}, None, None, [], ['uuid'], ['asc'], cell_mappings=self.cells[:-1])) found_uuids = [inst['hostname'] for inst in instances] had_uuids = [inst['hostname'] for inst in self.instances if inst['uuid'] not in last_cell_uuids] self.assertEqual(sorted(had_uuids), sorted(found_uuids)) class TestInstanceListObjects(test.TestCase): def setUp(self): super(TestInstanceListObjects, self).setUp() self.context = context.RequestContext('fake', 'fake') self.num_instances = 3 self.instances = [] start = datetime.datetime(1985, 10, 25, 1, 21, 0) dt = start spread = datetime.timedelta(minutes=10) cells = objects.CellMappingList.get_all(self.context) # Create three instances in each of the real cells. Leave the # first cell empty to make sure we don't break with an empty # one for cell in cells[1:]: for i in range(0, self.num_instances): with context.target_cell(self.context, cell) as cctx: inst = objects.Instance( context=cctx, project_id=self.context.project_id, user_id=self.context.user_id, created_at=start, launched_at=dt, instance_type_id=i, hostname='%s-inst%i' % (cell.name, i)) inst.create() if i % 2 == 0: # Make some faults for this instance for n in range(0, i + 1): msg = 'fault%i-%s' % (n, inst.hostname) f = objects.InstanceFault(context=cctx, instance_uuid=inst.uuid, code=i, message=msg, details='fake', host='fakehost') f.create() self.instances.append(inst) im = objects.InstanceMapping(context=self.context, project_id=inst.project_id, user_id=inst.user_id, instance_uuid=inst.uuid, cell_mapping=cell) im.create() dt += spread def test_get_instance_objects_sorted(self): filters = {} limit = None marker = None expected_attrs = [] sort_keys = ['uuid'] sort_dirs = ['asc'] insts = instance_list.get_instance_objects_sorted( self.context, filters, limit, marker, expected_attrs, sort_keys, sort_dirs) found_uuids = [x.uuid for x in insts] had_uuids = sorted([x['uuid'] for x in self.instances]) self.assertEqual(had_uuids, found_uuids) # Make sure none of the instances have fault set self.assertEqual(0, len([inst for inst in insts if 'fault' in inst])) def test_get_instance_objects_sorted_with_fault(self): filters = {} limit = None marker = None expected_attrs = ['fault'] sort_keys = ['uuid'] sort_dirs = ['asc'] insts = instance_list.get_instance_objects_sorted( self.context, filters, limit, marker, expected_attrs, sort_keys, sort_dirs) found_uuids = [x.uuid for x in insts] had_uuids = sorted([x['uuid'] for x in self.instances]) self.assertEqual(had_uuids, found_uuids) # They should all have fault set, but only some have # actual faults self.assertEqual(2, len([inst for inst in insts if inst.fault])) def test_get_instance_objects_sorted_paged(self): """Query a full first page and ensure an empty second one. This uses created_at which is enforced to be the same across each instance by setUp(). This will help make sure we still have a stable ordering, even when we only claim to care about created_at. """ instp1 = instance_list.get_instance_objects_sorted( self.context, {}, None, None, [], ['created_at'], ['asc']) self.assertEqual(len(self.instances), len(instp1)) instp2 = instance_list.get_instance_objects_sorted( self.context, {}, None, instp1[-1]['uuid'], [], ['created_at'], ['asc']) self.assertEqual(0, len(instp2))
phenoxim/nova
nova/tests/functional/compute/test_instance_list.py
Python
apache-2.0
23,474
# Copyright 2011 OpenStack Foundation # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 Grid Dynamics # Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova.compute import utils as compute_utils from nova import exception from nova.i18n import _ from nova.i18n import _LW from nova import network from nova.openstack.common import log as logging from nova.openstack.common import uuidutils LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'floating_ips') def make_float_ip(elem): elem.set('id') elem.set('ip') elem.set('pool') elem.set('fixed_ip') elem.set('instance_id') class FloatingIPTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('floating_ip', selector='floating_ip') make_float_ip(root) return xmlutil.MasterTemplate(root, 1) class FloatingIPsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('floating_ips') elem = xmlutil.SubTemplateElement(root, 'floating_ip', selector='floating_ips') make_float_ip(elem) return xmlutil.MasterTemplate(root, 1) def _translate_floating_ip_view(floating_ip): result = { 'id': floating_ip['id'], 'ip': floating_ip['address'], 'pool': floating_ip['pool'], } try: result['fixed_ip'] = floating_ip['fixed_ip']['address'] except (TypeError, KeyError, AttributeError): result['fixed_ip'] = None try: result['instance_id'] = floating_ip['fixed_ip']['instance_uuid'] except (TypeError, KeyError, AttributeError): result['instance_id'] = None return {'floating_ip': result} def _translate_floating_ips_view(floating_ips): return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] for ip in floating_ips]} def get_instance_by_floating_ip_addr(self, context, address): snagiibfa = self.network_api.get_instance_id_by_floating_address instance_id = snagiibfa(context, address) if instance_id: return self.compute_api.get(context, instance_id) def disassociate_floating_ip(self, context, instance, address): try: self.network_api.disassociate_floating_ip(context, instance, address) except exception.Forbidden: raise webob.exc.HTTPForbidden() except exception.CannotDisassociateAutoAssignedFloatingIP: msg = _('Cannot disassociate auto assigned floating ip') raise webob.exc.HTTPForbidden(explanation=msg) class FloatingIPController(object): """The Floating IPs API controller for the OpenStack API.""" def __init__(self): self.compute_api = compute.API() self.network_api = network.API() super(FloatingIPController, self).__init__() @wsgi.serializers(xml=FloatingIPTemplate) def show(self, req, id): """Return data about the given floating ip.""" context = req.environ['nova.context'] authorize(context) try: floating_ip = self.network_api.get_floating_ip(context, id) except (exception.NotFound, exception.InvalidID): msg = _("Floating ip not found for id %s") % id raise webob.exc.HTTPNotFound(explanation=msg) return _translate_floating_ip_view(floating_ip) @wsgi.serializers(xml=FloatingIPsTemplate) def index(self, req): """Return a list of floating ips allocated to a project.""" context = req.environ['nova.context'] authorize(context) floating_ips = self.network_api.get_floating_ips_by_project(context) return _translate_floating_ips_view(floating_ips) @wsgi.serializers(xml=FloatingIPTemplate) def create(self, req, body=None): context = req.environ['nova.context'] authorize(context) pool = None if body and 'pool' in body: pool = body['pool'] try: address = self.network_api.allocate_floating_ip(context, pool) ip = self.network_api.get_floating_ip_by_address(context, address) except exception.NoMoreFloatingIps: if pool: msg = _("No more floating ips in pool %s.") % pool else: msg = _("No more floating ips available.") raise webob.exc.HTTPNotFound(explanation=msg) except exception.FloatingIpLimitExceeded: if pool: msg = _("IP allocation over quota in pool %s.") % pool else: msg = _("IP allocation over quota.") raise webob.exc.HTTPForbidden(explanation=msg) except exception.FloatingIpPoolNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return _translate_floating_ip_view(ip) def delete(self, req, id): context = req.environ['nova.context'] authorize(context) # get the floating ip object try: floating_ip = self.network_api.get_floating_ip(context, id) except (exception.NotFound, exception.InvalidID): msg = _("Floating ip not found for id %s") % id raise webob.exc.HTTPNotFound(explanation=msg) address = floating_ip['address'] # get the associated instance object (if any) instance = get_instance_by_floating_ip_addr(self, context, address) try: self.network_api.disassociate_and_release_floating_ip( context, instance, floating_ip) except exception.Forbidden: raise webob.exc.HTTPForbidden() except exception.CannotDisassociateAutoAssignedFloatingIP: msg = _('Cannot disassociate auto assigned floating ip') raise webob.exc.HTTPForbidden(explanation=msg) return webob.Response(status_int=202) class FloatingIPActionController(wsgi.Controller): def __init__(self, ext_mgr=None, *args, **kwargs): super(FloatingIPActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.network_api = network.API() self.ext_mgr = ext_mgr @wsgi.action('addFloatingIp') def _add_floating_ip(self, req, id, body): """Associate floating_ip to an instance.""" context = req.environ['nova.context'] authorize(context) try: address = body['addFloatingIp']['address'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Address not specified") raise webob.exc.HTTPBadRequest(explanation=msg) instance = common.get_instance(self.compute_api, context, id) cached_nwinfo = compute_utils.get_nw_info_for_instance(instance) if not cached_nwinfo: msg = _('No nw_info cache associated with instance') raise webob.exc.HTTPBadRequest(explanation=msg) fixed_ips = cached_nwinfo.fixed_ips() if not fixed_ips: msg = _('No fixed ips associated to instance') raise webob.exc.HTTPBadRequest(explanation=msg) fixed_address = None if self.ext_mgr.is_loaded('os-extended-floating-ips'): if 'fixed_address' in body['addFloatingIp']: fixed_address = body['addFloatingIp']['fixed_address'] for fixed in fixed_ips: if fixed['address'] == fixed_address: break else: msg = _('Specified fixed address not assigned to instance') raise webob.exc.HTTPBadRequest(explanation=msg) if not fixed_address: fixed_address = fixed_ips[0]['address'] if len(fixed_ips) > 1: LOG.warn(_LW('multiple fixed_ips exist, using the first: ' '%s'), fixed_address) try: self.network_api.associate_floating_ip(context, instance, floating_address=address, fixed_address=fixed_address) except exception.FloatingIpAssociated: msg = _('floating ip is already associated') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.NoFloatingIpInterface: msg = _('l3driver call to add floating ip failed') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.FloatingIpNotFoundForAddress: msg = _('floating ip not found') raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.format_message()) except Exception: msg = _('Error. Unable to associate floating ip') LOG.exception(msg) raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) @wsgi.action('removeFloatingIp') def _remove_floating_ip(self, req, id, body): """Dissociate floating_ip from an instance.""" context = req.environ['nova.context'] authorize(context) try: address = body['removeFloatingIp']['address'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Address not specified") raise webob.exc.HTTPBadRequest(explanation=msg) # get the floating ip object try: floating_ip = self.network_api.get_floating_ip_by_address(context, address) except exception.FloatingIpNotFoundForAddress: msg = _("floating ip not found") raise webob.exc.HTTPNotFound(explanation=msg) # get the associated instance object (if any) instance = get_instance_by_floating_ip_addr(self, context, address) # disassociate if associated if (instance and floating_ip.get('fixed_ip_id') and (uuidutils.is_uuid_like(id) and [instance['uuid'] == id] or [instance['id'] == id])[0]): try: disassociate_floating_ip(self, context, instance, address) except exception.FloatingIpNotAssociated: msg = _('Floating ip is not associated') raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) else: msg = _("Floating ip %(address)s is not associated with instance " "%(id)s.") % {'address': address, 'id': id} raise webob.exc.HTTPUnprocessableEntity(explanation=msg) class Floating_ips(extensions.ExtensionDescriptor): """Floating IPs support.""" name = "FloatingIps" alias = "os-floating-ips" namespace = "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1" updated = "2011-06-16T00:00:00Z" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-floating-ips', FloatingIPController(), member_actions={}) resources.append(res) return resources def get_controller_extensions(self): controller = FloatingIPActionController(self.ext_mgr) extension = extensions.ControllerExtension(self, 'servers', controller) return [extension]
jumpstarter-io/nova
nova/api/openstack/compute/contrib/floating_ips.py
Python
apache-2.0
12,423
#/usr/bin/python """ Author: David Benson ([email protected]) Date: 02/09/2013 Description: This python module is responsible for storing information about all systems within the eve universe. """ import system import datetime import evelink.api class EveUniverse: """ This class acts as a storage class for the all the space systems in Eve Online. A very simple version control system is implemented using timestamps. """ def __init__(self): # Constructor intialises empty array, the last updated timestamp and the connection to Eve APIs. self.systems = {} self.api = evelink.api.API() self.current_timestamp = datetime.datetime.now() def retrieve_systems(self): # Method returns a list of all systems from eve api. response = self.api.get('map/Sovereignty') for item in response.iter('row'): temp = system.System(item.attrib['solarSystemName'], item.attrib['allianceID'], item.attrib['factionID'], item.attrib['corporationID']) self.systems[item.attrib['solarSystemID']] = temp self.current_timestamp = datetime.datetime.now() def __repr__(self): # Presents a string representation of the object. result = "Last Updated: %s \n" % self.current_timestamp result += self.systems.__str__() return result sys = EveUniverse() sys.retrieve_systems() print(sys.__repr__())
Funi1234/InternetSpaceships
python/main/eve_universe.py
Python
apache-2.0
1,437
"""Check that available RPM packages match the required versions.""" from openshift_checks import OpenShiftCheck from openshift_checks.mixins import NotContainerizedMixin class PackageVersion(NotContainerizedMixin, OpenShiftCheck): """Check that available RPM packages match the required versions.""" name = "package_version" tags = ["preflight"] # NOTE: versions outside those specified are mapped to least/greatest openshift_to_ovs_version = { (3, 4): "2.4", (3, 5): ["2.6", "2.7"], (3, 6): ["2.6", "2.7", "2.8", "2.9"], (3, 7): ["2.6", "2.7", "2.8", "2.9"], (3, 8): ["2.6", "2.7", "2.8", "2.9"], (3, 9): ["2.6", "2.7", "2.8", "2.9"], (3, 10): ["2.6", "2.7", "2.8", "2.9"], } openshift_to_docker_version = { (3, 1): "1.8", (3, 2): "1.10", (3, 3): "1.10", (3, 4): "1.12", (3, 5): "1.12", (3, 6): "1.12", (3, 7): "1.12", (3, 8): "1.12", (3, 9): ["1.12", "1.13"], } def is_active(self): """Skip hosts that do not have package requirements.""" group_names = self.get_var("group_names", default=[]) master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names return super(PackageVersion, self).is_active() and master_or_node def run(self): rpm_prefix = self.get_var("openshift_service_type") if self._templar is not None: rpm_prefix = self._templar.template(rpm_prefix) openshift_release = self.get_var("openshift_release", default='') deployment_type = self.get_var("openshift_deployment_type") check_multi_minor_release = deployment_type in ['openshift-enterprise'] args = { "package_mgr": self.get_var("ansible_pkg_mgr"), "package_list": [ { "name": "openvswitch", "version": self.get_required_ovs_version(), "check_multi": False, }, { "name": "docker", "version": self.get_required_docker_version(), "check_multi": False, }, { "name": "{}".format(rpm_prefix), "version": openshift_release, "check_multi": check_multi_minor_release, }, { "name": "{}-master".format(rpm_prefix), "version": openshift_release, "check_multi": check_multi_minor_release, }, { "name": "{}-node".format(rpm_prefix), "version": openshift_release, "check_multi": check_multi_minor_release, }, ], } return self.execute_module_with_retries("aos_version", args) def get_required_ovs_version(self): """Return the correct Open vSwitch version(s) for the current OpenShift version.""" return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version) def get_required_docker_version(self): """Return the correct Docker version(s) for the current OpenShift version.""" return self.get_required_version("Docker", self.openshift_to_docker_version)
wbrefvem/openshift-ansible
roles/openshift_health_checker/openshift_checks/package_version.py
Python
apache-2.0
3,386
from .layout_helpers import *
armstrong/armstrong.core.arm_layout
tests/templatetags/__init__.py
Python
apache-2.0
30
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from keystoneauth1 import exceptions as ks_exc import mock from six.moves.urllib import parse import nova.conf from nova import context from nova import exception from nova import objects from nova import rc_fields as fields from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova import test from nova.tests.unit import fake_requests from nova.tests import uuidsentinel as uuids CONF = nova.conf.CONF class SafeConnectedTestCase(test.NoDBTestCase): """Test the safe_connect decorator for the scheduler client.""" def setUp(self): super(SafeConnectedTestCase, self).setUp() self.context = context.get_admin_context() with mock.patch('keystoneauth1.loading.load_auth_from_conf_options'): self.client = report.SchedulerReportClient() @mock.patch('keystoneauth1.session.Session.request') def test_missing_endpoint(self, req): """Test EndpointNotFound behavior. A missing endpoint entry should not explode. """ req.side_effect = ks_exc.EndpointNotFound() self.client._get_resource_provider(self.context, "fake") # reset the call count to demonstrate that future calls still # work req.reset_mock() self.client._get_resource_provider(self.context, "fake") self.assertTrue(req.called) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_create_client') @mock.patch('keystoneauth1.session.Session.request') def test_missing_endpoint_create_client(self, req, create_client): """Test EndpointNotFound retry behavior. A missing endpoint should cause _create_client to be called. """ req.side_effect = ks_exc.EndpointNotFound() self.client._get_resource_provider(self.context, "fake") # This is the second time _create_client is called, but the first since # the mock was created. self.assertTrue(create_client.called) @mock.patch('keystoneauth1.session.Session.request') def test_missing_auth(self, req): """Test Missing Auth handled correctly. A missing auth configuration should not explode. """ req.side_effect = ks_exc.MissingAuthPlugin() self.client._get_resource_provider(self.context, "fake") # reset the call count to demonstrate that future calls still # work req.reset_mock() self.client._get_resource_provider(self.context, "fake") self.assertTrue(req.called) @mock.patch('keystoneauth1.session.Session.request') def test_unauthorized(self, req): """Test Unauthorized handled correctly. An unauthorized configuration should not explode. """ req.side_effect = ks_exc.Unauthorized() self.client._get_resource_provider(self.context, "fake") # reset the call count to demonstrate that future calls still # work req.reset_mock() self.client._get_resource_provider(self.context, "fake") self.assertTrue(req.called) @mock.patch('keystoneauth1.session.Session.request') def test_connect_fail(self, req): """Test Connect Failure handled correctly. If we get a connect failure, this is transient, and we expect that this will end up working correctly later. """ req.side_effect = ks_exc.ConnectFailure() self.client._get_resource_provider(self.context, "fake") # reset the call count to demonstrate that future calls do # work req.reset_mock() self.client._get_resource_provider(self.context, "fake") self.assertTrue(req.called) @mock.patch.object(report, 'LOG') def test_warning_limit(self, mock_log): # Assert that __init__ initializes _warn_count as we expect self.assertEqual(0, self.client._warn_count) mock_self = mock.MagicMock() mock_self._warn_count = 0 for i in range(0, report.WARN_EVERY + 3): report.warn_limit(mock_self, 'warning') mock_log.warning.assert_has_calls([mock.call('warning'), mock.call('warning')]) @mock.patch('keystoneauth1.session.Session.request') def test_failed_discovery(self, req): """Test DiscoveryFailure behavior. Failed discovery should not blow up. """ req.side_effect = ks_exc.DiscoveryFailure() self.client._get_resource_provider(self.context, "fake") # reset the call count to demonstrate that future calls still # work req.reset_mock() self.client._get_resource_provider(self.context, "fake") self.assertTrue(req.called) class TestConstructor(test.NoDBTestCase): @mock.patch('keystoneauth1.loading.load_session_from_conf_options') @mock.patch('keystoneauth1.loading.load_auth_from_conf_options') def test_constructor(self, load_auth_mock, load_sess_mock): client = report.SchedulerReportClient() load_auth_mock.assert_called_once_with(CONF, 'placement') load_sess_mock.assert_called_once_with(CONF, 'placement', auth=load_auth_mock.return_value) self.assertEqual(['internal', 'public'], client._client.interface) self.assertEqual({'accept': 'application/json'}, client._client.additional_headers) @mock.patch('keystoneauth1.loading.load_session_from_conf_options') @mock.patch('keystoneauth1.loading.load_auth_from_conf_options') def test_constructor_admin_interface(self, load_auth_mock, load_sess_mock): self.flags(valid_interfaces='admin', group='placement') client = report.SchedulerReportClient() load_auth_mock.assert_called_once_with(CONF, 'placement') load_sess_mock.assert_called_once_with(CONF, 'placement', auth=load_auth_mock.return_value) self.assertEqual(['admin'], client._client.interface) self.assertEqual({'accept': 'application/json'}, client._client.additional_headers) class SchedulerReportClientTestCase(test.NoDBTestCase): def setUp(self): super(SchedulerReportClientTestCase, self).setUp() self.context = context.get_admin_context() self.ks_adap_mock = mock.Mock() self.compute_node = objects.ComputeNode( uuid=uuids.compute_node, hypervisor_hostname='foo', vcpus=8, cpu_allocation_ratio=16.0, memory_mb=1024, ram_allocation_ratio=1.5, local_gb=10, disk_allocation_ratio=1.0, ) with test.nested( mock.patch('keystoneauth1.adapter.Adapter', return_value=self.ks_adap_mock), mock.patch('keystoneauth1.loading.load_auth_from_conf_options') ): self.client = report.SchedulerReportClient() def _init_provider_tree(self, generation_override=None, resources_override=None): cn = self.compute_node resources = resources_override if resources_override is None: resources = { 'VCPU': { 'total': cn.vcpus, 'reserved': 0, 'min_unit': 1, 'max_unit': cn.vcpus, 'step_size': 1, 'allocation_ratio': cn.cpu_allocation_ratio, }, 'MEMORY_MB': { 'total': cn.memory_mb, 'reserved': 512, 'min_unit': 1, 'max_unit': cn.memory_mb, 'step_size': 1, 'allocation_ratio': cn.ram_allocation_ratio, }, 'DISK_GB': { 'total': cn.local_gb, 'reserved': 0, 'min_unit': 1, 'max_unit': cn.local_gb, 'step_size': 1, 'allocation_ratio': cn.disk_allocation_ratio, }, } generation = generation_override or 1 rp_uuid = self.client._provider_tree.new_root( cn.hypervisor_hostname, cn.uuid, generation=generation, ) self.client._provider_tree.update_inventory(rp_uuid, resources) def _validate_provider(self, name_or_uuid, **kwargs): """Validates existence and values of a provider in this client's _provider_tree. :param name_or_uuid: The name or UUID of the provider to validate. :param kwargs: Optional keyword arguments of ProviderData attributes whose values are to be validated. """ found = self.client._provider_tree.data(name_or_uuid) # If kwargs provided, their names indicate ProviderData attributes for attr, expected in kwargs.items(): try: self.assertEqual(getattr(found, attr), expected) except AttributeError: self.fail("Provider with name or UUID %s doesn't have " "attribute %s (expected value: %s)" % (name_or_uuid, attr, expected)) class TestPutAllocations(SchedulerReportClientTestCase): @mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') def test_put_allocations(self, mock_put): mock_put.return_value.status_code = 204 mock_put.return_value.text = "cool" rp_uuid = mock.sentinel.rp consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid resp = self.client.put_allocations(self.context, rp_uuid, consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertTrue(resp) mock_put.assert_called_once_with( expected_url, mock.ANY, version='1.8', global_request_id=self.context.global_id) @mock.patch.object(report.LOG, 'warning') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') def test_put_allocations_fail(self, mock_put, mock_warn): mock_put.return_value.status_code = 400 mock_put.return_value.text = "not cool" rp_uuid = mock.sentinel.rp consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid resp = self.client.put_allocations(self.context, rp_uuid, consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertFalse(resp) mock_put.assert_called_once_with( expected_url, mock.ANY, version='1.8', global_request_id=self.context.global_id) log_msg = mock_warn.call_args[0][0] self.assertIn("Unable to submit allocation for instance", log_msg) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') def test_put_allocations_retries_conflict(self, mock_put): failed = mock.MagicMock() failed.status_code = 409 failed.text = "concurrently updated" succeeded = mock.MagicMock() succeeded.status_code = 204 mock_put.side_effect = (failed, succeeded) rp_uuid = mock.sentinel.rp consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid resp = self.client.put_allocations(self.context, rp_uuid, consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertTrue(resp) mock_put.assert_has_calls([ mock.call(expected_url, mock.ANY, version='1.8', global_request_id=self.context.global_id)] * 2) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') def test_put_allocations_retry_gives_up(self, mock_put): failed = mock.MagicMock() failed.status_code = 409 failed.text = "concurrently updated" mock_put.return_value = failed rp_uuid = mock.sentinel.rp consumer_uuid = mock.sentinel.consumer data = {"MEMORY_MB": 1024} expected_url = "/allocations/%s" % consumer_uuid resp = self.client.put_allocations(self.context, rp_uuid, consumer_uuid, data, mock.sentinel.project_id, mock.sentinel.user_id) self.assertFalse(resp) mock_put.assert_has_calls([ mock.call(expected_url, mock.ANY, version='1.8', global_request_id=self.context.global_id)] * 3) def test_claim_resources_success_with_old_version(self): get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': {}, # build instance, not move } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = resp_mock consumer_uuid = uuids.consumer_uuid alloc_req = { 'allocations': [ { 'resource_provider': { 'uuid': uuids.cn1 }, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, } }, ], } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources( self.context, consumer_uuid, alloc_req, project_id, user_id) expected_url = "/allocations/%s" % consumer_uuid expected_payload = { 'allocations': { alloc['resource_provider']['uuid']: { 'resources': alloc['resources'] } for alloc in alloc_req['allocations'] } } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.12', json=expected_payload, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) def test_claim_resources_success(self): get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': {}, # build instance, not move } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = resp_mock consumer_uuid = uuids.consumer_uuid alloc_req = { 'allocations': { uuids.cn1: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, } }, }, } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources(self.context, consumer_uuid, alloc_req, project_id, user_id, allocation_request_version='1.12') expected_url = "/allocations/%s" % consumer_uuid expected_payload = {'allocations': { rp_uuid: alloc for rp_uuid, alloc in alloc_req['allocations'].items()}} expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.12', json=expected_payload, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) def test_claim_resources_success_move_operation_no_shared(self): """Tests that when a move operation is detected (existing allocations for the same instance UUID) that we end up constructing an appropriate allocation that contains the original resources on the source host as well as the resources on the destination host. """ get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': { uuids.source: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, }, } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = resp_mock consumer_uuid = uuids.consumer_uuid alloc_req = { 'allocations': { uuids.destination: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024 } }, }, } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources(self.context, consumer_uuid, alloc_req, project_id, user_id, allocation_request_version='1.12') expected_url = "/allocations/%s" % consumer_uuid # New allocation should include resources claimed on both the source # and destination hosts expected_payload = { 'allocations': { uuids.source: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024 } }, uuids.destination: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024 } }, }, } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.12', json=mock.ANY, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) # We have to pull the json body from the mock call_args to validate # it separately otherwise hash seed issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] self.assertEqual(expected_payload, actual_payload) self.assertTrue(res) def test_claim_resources_success_move_operation_with_shared(self): """Tests that when a move operation is detected (existing allocations for the same instance UUID) that we end up constructing an appropriate allocation that contains the original resources on the source host as well as the resources on the destination host but that when a shared storage provider is claimed against in both the original allocation as well as the new allocation request, we don't double that allocation resource request up. """ get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': { uuids.source: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, uuids.shared_storage: { 'resource_provider_generation': 42, 'resources': { 'DISK_GB': 100, }, }, }, } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = resp_mock consumer_uuid = uuids.consumer_uuid alloc_req = { 'allocations': { uuids.destination: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, } }, uuids.shared_storage: { 'resources': { 'DISK_GB': 100, } }, } } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources(self.context, consumer_uuid, alloc_req, project_id, user_id, allocation_request_version='1.12') expected_url = "/allocations/%s" % consumer_uuid # New allocation should include resources claimed on both the source # and destination hosts but not have a doubled-up request for the disk # resources on the shared provider expected_payload = { 'allocations': { uuids.source: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024 } }, uuids.shared_storage: { 'resources': { 'DISK_GB': 100 } }, uuids.destination: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024 } }, }, } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.12', json=mock.ANY, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) # We have to pull the allocations from the json body from the # mock call_args to validate it separately otherwise hash seed # issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] self.assertEqual(expected_payload, actual_payload) self.assertTrue(res) def test_claim_resources_success_resize_to_same_host_no_shared(self): """Tests that when a resize to the same host operation is detected (existing allocations for the same instance UUID and same resource provider) that we end up constructing an appropriate allocation that contains the original resources on the source host as well as the resources on the destination host, which in this case are the same. """ get_current_allocations_resp_mock = mock.Mock(status_code=200) get_current_allocations_resp_mock.json.return_value = { 'allocations': { uuids.same_host: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, 'DISK_GB': 20 }, }, }, } self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock put_allocations_resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = put_allocations_resp_mock consumer_uuid = uuids.consumer_uuid # This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB # are all being increased but on the same host. We also throw a custom # resource class in the new allocation to make sure it's not lost and # that we don't have a KeyError when merging the allocations. alloc_req = { 'allocations': { uuids.same_host: { 'resources': { 'VCPU': 2, 'MEMORY_MB': 2048, 'DISK_GB': 40, 'CUSTOM_FOO': 1 } }, }, } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources(self.context, consumer_uuid, alloc_req, project_id, user_id, allocation_request_version='1.12') expected_url = "/allocations/%s" % consumer_uuid # New allocation should include doubled resources claimed on the same # host. expected_payload = { 'allocations': { uuids.same_host: { 'resources': { 'VCPU': 3, 'MEMORY_MB': 3072, 'DISK_GB': 60, 'CUSTOM_FOO': 1 } }, }, } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.12', json=mock.ANY, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) # We have to pull the json body from the mock call_args to validate # it separately otherwise hash seed issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] self.assertEqual(expected_payload, actual_payload) self.assertTrue(res) def test_claim_resources_success_resize_to_same_host_with_shared(self): """Tests that when a resize to the same host operation is detected (existing allocations for the same instance UUID and same resource provider) that we end up constructing an appropriate allocation that contains the original resources on the source host as well as the resources on the destination host, which in this case are the same. This test adds the fun wrinkle of throwing a shared storage provider in the mix when doing resize to the same host. """ get_current_allocations_resp_mock = mock.Mock(status_code=200) get_current_allocations_resp_mock.json.return_value = { 'allocations': { uuids.same_host: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024 }, }, uuids.shared_storage: { 'resource_provider_generation': 42, 'resources': { 'DISK_GB': 20, }, }, }, } self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock put_allocations_resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = put_allocations_resp_mock consumer_uuid = uuids.consumer_uuid # This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB # are all being increased but DISK_GB is on a shared storage provider. alloc_req = { 'allocations': { uuids.same_host: { 'resources': { 'VCPU': 2, 'MEMORY_MB': 2048 } }, uuids.shared_storage: { 'resources': { 'DISK_GB': 40, } }, }, } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources(self.context, consumer_uuid, alloc_req, project_id, user_id, allocation_request_version='1.12') expected_url = "/allocations/%s" % consumer_uuid # New allocation should include doubled resources claimed on the same # host. expected_payload = { 'allocations': { uuids.same_host: { 'resources': { 'VCPU': 3, 'MEMORY_MB': 3072 } }, uuids.shared_storage: { 'resources': { 'DISK_GB': 60 } }, }, } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.12', json=mock.ANY, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) # We have to pull the json body from the mock call_args to validate # it separately otherwise hash seed issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] self.assertEqual(expected_payload, actual_payload) self.assertTrue(res) def test_claim_resources_fail_retry_success(self): get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': {}, # build instance, not move } self.ks_adap_mock.get.return_value = get_resp_mock resp_mocks = [ mock.Mock( status_code=409, text='Inventory changed while attempting to allocate: ' 'Another thread concurrently updated the data. ' 'Please retry your update'), mock.Mock(status_code=204), ] self.ks_adap_mock.put.side_effect = resp_mocks consumer_uuid = uuids.consumer_uuid alloc_req = { 'allocations': { uuids.cn1: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, } }, }, } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources(self.context, consumer_uuid, alloc_req, project_id, user_id, allocation_request_version='1.12') expected_url = "/allocations/%s" % consumer_uuid expected_payload = { 'allocations': {rp_uuid: res for rp_uuid, res in alloc_req['allocations'].items()} } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id # We should have exactly two calls to the placement API that look # identical since we're retrying the same HTTP request expected_calls = [ mock.call(expected_url, microversion='1.12', json=expected_payload, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id})] * 2 self.assertEqual(len(expected_calls), self.ks_adap_mock.put.call_count) self.ks_adap_mock.put.assert_has_calls(expected_calls) self.assertTrue(res) @mock.patch.object(report.LOG, 'warning') def test_claim_resources_failure(self, mock_log): get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': {}, # build instance, not move } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=409, text='not cool') self.ks_adap_mock.put.return_value = resp_mock consumer_uuid = uuids.consumer_uuid alloc_req = { 'allocations': { uuids.cn1: { 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, } }, }, } project_id = uuids.project_id user_id = uuids.user_id res = self.client.claim_resources(self.context, consumer_uuid, alloc_req, project_id, user_id, allocation_request_version='1.12') expected_url = "/allocations/%s" % consumer_uuid expected_payload = { 'allocations': {rp_uuid: res for rp_uuid, res in alloc_req['allocations'].items()} } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.12', json=expected_payload, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertFalse(res) self.assertTrue(mock_log.called) def test_remove_provider_from_inst_alloc_no_shared(self): """Tests that the method which manipulates an existing doubled-up allocation for a move operation to remove the source host results in sending placement the proper payload to PUT /allocations/{consumer_uuid} call. """ get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': { uuids.source: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, uuids.destination: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, }, } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = resp_mock consumer_uuid = uuids.consumer_uuid project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( self.context, consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) expected_url = "/allocations/%s" % consumer_uuid # New allocations should only include the destination... expected_payload = { 'allocations': [ { 'resource_provider': { 'uuid': uuids.destination, }, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, ], } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id # We have to pull the json body from the mock call_args to validate # it separately otherwise hash seed issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] sort_by_uuid = lambda x: x['resource_provider']['uuid'] expected_allocations = sorted(expected_payload['allocations'], key=sort_by_uuid) actual_allocations = sorted(actual_payload['allocations'], key=sort_by_uuid) self.assertEqual(expected_allocations, actual_allocations) self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.10', json=mock.ANY, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) def test_remove_provider_from_inst_alloc_with_shared(self): """Tests that the method which manipulates an existing doubled-up allocation with DISK_GB being consumed from a shared storage provider for a move operation to remove the source host results in sending placement the proper payload to PUT /allocations/{consumer_uuid} call. """ get_resp_mock = mock.Mock(status_code=200) get_resp_mock.json.return_value = { 'allocations': { uuids.source: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, uuids.shared_storage: { 'resource_provider_generation': 42, 'resources': { 'DISK_GB': 100, }, }, uuids.destination: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, }, } self.ks_adap_mock.get.return_value = get_resp_mock resp_mock = mock.Mock(status_code=204) self.ks_adap_mock.put.return_value = resp_mock consumer_uuid = uuids.consumer_uuid project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( self.context, consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) expected_url = "/allocations/%s" % consumer_uuid # New allocations should only include the destination... expected_payload = { 'allocations': [ { 'resource_provider': { 'uuid': uuids.shared_storage, }, 'resources': { 'DISK_GB': 100, }, }, { 'resource_provider': { 'uuid': uuids.destination, }, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, ], } expected_payload['project_id'] = project_id expected_payload['user_id'] = user_id # We have to pull the json body from the mock call_args to validate # it separately otherwise hash seed issues get in the way. actual_payload = self.ks_adap_mock.put.call_args[1]['json'] sort_by_uuid = lambda x: x['resource_provider']['uuid'] expected_allocations = sorted(expected_payload['allocations'], key=sort_by_uuid) actual_allocations = sorted(actual_payload['allocations'], key=sort_by_uuid) self.assertEqual(expected_allocations, actual_allocations) self.ks_adap_mock.put.assert_called_once_with( expected_url, microversion='1.10', json=mock.ANY, raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(res) def test_remove_provider_from_inst_alloc_no_source(self): """Tests that if remove_provider_from_instance_allocation() fails to find any allocations for the source host, it just returns True and does not attempt to rewrite the allocation for the consumer. """ get_resp_mock = mock.Mock(status_code=200) # Act like the allocations already did not include the source host for # some reason get_resp_mock.json.return_value = { 'allocations': { uuids.shared_storage: { 'resource_provider_generation': 42, 'resources': { 'DISK_GB': 100, }, }, uuids.destination: { 'resource_provider_generation': 42, 'resources': { 'VCPU': 1, 'MEMORY_MB': 1024, }, }, }, } self.ks_adap_mock.get.return_value = get_resp_mock consumer_uuid = uuids.consumer_uuid project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( self.context, consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) self.ks_adap_mock.get.assert_called() self.ks_adap_mock.put.assert_not_called() self.assertTrue(res) def test_remove_provider_from_inst_alloc_fail_get_allocs(self): """Tests that we gracefully exit with False from remove_provider_from_instance_allocation() if the call to get the existing allocations fails for some reason """ get_resp_mock = mock.Mock(status_code=500) self.ks_adap_mock.get.return_value = get_resp_mock consumer_uuid = uuids.consumer_uuid project_id = uuids.project_id user_id = uuids.user_id res = self.client.remove_provider_from_instance_allocation( self.context, consumer_uuid, uuids.source, user_id, project_id, mock.Mock()) self.ks_adap_mock.get.assert_called() self.ks_adap_mock.put.assert_not_called() self.assertFalse(res) class TestSetAndClearAllocations(SchedulerReportClientTestCase): def setUp(self): super(TestSetAndClearAllocations, self).setUp() # We want to reuse the mock throughout the class, but with # different return values. self.mock_post = mock.patch( 'nova.scheduler.client.report.SchedulerReportClient.post').start() self.addCleanup(self.mock_post.stop) self.mock_post.return_value.status_code = 204 self.rp_uuid = mock.sentinel.rp self.consumer_uuid = mock.sentinel.consumer self.data = {"MEMORY_MB": 1024} self.project_id = mock.sentinel.project_id self.user_id = mock.sentinel.user_id self.expected_url = '/allocations' def test_url_microversion(self): expected_microversion = '1.13' resp = self.client.set_and_clear_allocations( self.context, self.rp_uuid, self.consumer_uuid, self.data, self.project_id, self.user_id) self.assertTrue(resp) self.mock_post.assert_called_once_with( self.expected_url, mock.ANY, version=expected_microversion, global_request_id=self.context.global_id) def test_payload_no_clear(self): expected_payload = { self.consumer_uuid: { 'user_id': self.user_id, 'project_id': self.project_id, 'allocations': { self.rp_uuid: { 'resources': { 'MEMORY_MB': 1024 } } } } } resp = self.client.set_and_clear_allocations( self.context, self.rp_uuid, self.consumer_uuid, self.data, self.project_id, self.user_id) self.assertTrue(resp) args, kwargs = self.mock_post.call_args payload = args[1] self.assertEqual(expected_payload, payload) def test_payload_with_clear(self): expected_payload = { self.consumer_uuid: { 'user_id': self.user_id, 'project_id': self.project_id, 'allocations': { self.rp_uuid: { 'resources': { 'MEMORY_MB': 1024 } } } }, mock.sentinel.migration_uuid: { 'user_id': self.user_id, 'project_id': self.project_id, 'allocations': {} } } resp = self.client.set_and_clear_allocations( self.context, self.rp_uuid, self.consumer_uuid, self.data, self.project_id, self.user_id, consumer_to_clear=mock.sentinel.migration_uuid) self.assertTrue(resp) args, kwargs = self.mock_post.call_args payload = args[1] self.assertEqual(expected_payload, payload) @mock.patch('time.sleep') def test_409_concurrent_update(self, mock_sleep): self.mock_post.return_value.status_code = 409 self.mock_post.return_value.text = 'concurrently updated' resp = self.client.set_and_clear_allocations( self.context, self.rp_uuid, self.consumer_uuid, self.data, self.project_id, self.user_id, consumer_to_clear=mock.sentinel.migration_uuid) self.assertFalse(resp) # Post was attempted four times. self.assertEqual(4, self.mock_post.call_count) @mock.patch('nova.scheduler.client.report.LOG.warning') def test_not_409_failure(self, mock_log): error_message = 'placement not there' self.mock_post.return_value.status_code = 503 self.mock_post.return_value.text = error_message resp = self.client.set_and_clear_allocations( self.context, self.rp_uuid, self.consumer_uuid, self.data, self.project_id, self.user_id, consumer_to_clear=mock.sentinel.migration_uuid) self.assertFalse(resp) args, kwargs = mock_log.call_args log_message = args[0] log_args = args[1] self.assertIn('Unable to post allocations', log_message) self.assertEqual(error_message, log_args['text']) class TestProviderOperations(SchedulerReportClientTestCase): @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_create_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_aggregates') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_traits') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_sharing_providers') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_providers_in_tree') def test_ensure_resource_provider_get(self, get_rpt_mock, get_shr_mock, get_trait_mock, get_agg_mock, get_inv_mock, create_rp_mock): # No resource provider exists in the client's cache, so validate that # if we get the resource provider from the placement API that we don't # try to create the resource provider. get_rpt_mock.return_value = [{ 'uuid': uuids.compute_node, 'name': mock.sentinel.name, 'generation': 1, }] get_inv_mock.return_value = None get_agg_mock.return_value = set([uuids.agg1]) get_trait_mock.return_value = set(['CUSTOM_GOLD']) get_shr_mock.return_value = [] self.client._ensure_resource_provider(self.context, uuids.compute_node) get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) self.assertTrue(self.client._provider_tree.exists(uuids.compute_node)) get_agg_mock.assert_called_once_with(self.context, uuids.compute_node) self.assertTrue( self.client._provider_tree.in_aggregates(uuids.compute_node, [uuids.agg1])) self.assertFalse( self.client._provider_tree.in_aggregates(uuids.compute_node, [uuids.agg2])) get_trait_mock.assert_called_once_with(self.context, uuids.compute_node) self.assertTrue( self.client._provider_tree.has_traits(uuids.compute_node, ['CUSTOM_GOLD'])) self.assertFalse( self.client._provider_tree.has_traits(uuids.compute_node, ['CUSTOM_SILVER'])) get_shr_mock.assert_called_once_with(self.context, set([uuids.agg1])) self.assertTrue(self.client._provider_tree.exists(uuids.compute_node)) self.assertFalse(create_rp_mock.called) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_create_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_associations') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_providers_in_tree') def test_ensure_resource_provider_create_fail(self, get_rpt_mock, refresh_mock, create_rp_mock): # No resource provider exists in the client's cache, and # _create_provider raises, indicating there was an error with the # create call. Ensure we don't populate the resource provider cache get_rpt_mock.return_value = [] create_rp_mock.side_effect = exception.ResourceProviderCreationFailed( name=uuids.compute_node) self.assertRaises( exception.ResourceProviderCreationFailed, self.client._ensure_resource_provider, self.context, uuids.compute_node) get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) create_rp_mock.assert_called_once_with( self.context, uuids.compute_node, uuids.compute_node, parent_provider_uuid=None) self.assertFalse(self.client._provider_tree.exists(uuids.compute_node)) self.assertFalse(refresh_mock.called) self.assertRaises( ValueError, self.client._provider_tree.in_aggregates, uuids.compute_node, []) self.assertRaises( ValueError, self.client._provider_tree.has_traits, uuids.compute_node, []) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_create_resource_provider', return_value=None) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_associations') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_providers_in_tree') def test_ensure_resource_provider_create_no_placement(self, get_rpt_mock, refresh_mock, create_rp_mock): # No resource provider exists in the client's cache, and # @safe_connect on _create_resource_provider returns None because # Placement isn't running yet. Ensure we don't populate the resource # provider cache. get_rpt_mock.return_value = [] self.assertRaises( exception.ResourceProviderCreationFailed, self.client._ensure_resource_provider, self.context, uuids.compute_node) get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) create_rp_mock.assert_called_once_with( self.context, uuids.compute_node, uuids.compute_node, parent_provider_uuid=None) self.assertFalse(self.client._provider_tree.exists(uuids.compute_node)) refresh_mock.assert_not_called() self.assertRaises( ValueError, self.client._provider_tree.in_aggregates, uuids.compute_node, []) self.assertRaises( ValueError, self.client._provider_tree.has_traits, uuids.compute_node, []) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_create_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_and_get_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_associations') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_providers_in_tree') def test_ensure_resource_provider_create(self, get_rpt_mock, refresh_inv_mock, refresh_assoc_mock, create_rp_mock): # No resource provider exists in the client's cache and no resource # provider was returned from the placement API, so verify that in this # case we try to create the resource provider via the placement API. get_rpt_mock.return_value = [] create_rp_mock.return_value = { 'uuid': uuids.compute_node, 'name': 'compute-name', 'generation': 1, } self.assertEqual( uuids.compute_node, self.client._ensure_resource_provider(self.context, uuids.compute_node)) self._validate_provider(uuids.compute_node, name='compute-name', generation=1, parent_uuid=None, aggregates=set(), traits=set()) # We don't refresh for a just-created provider refresh_inv_mock.assert_not_called() refresh_assoc_mock.assert_not_called() get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) create_rp_mock.assert_called_once_with( self.context, uuids.compute_node, uuids.compute_node, # name param defaults to UUID if None parent_provider_uuid=None, ) self.assertTrue(self.client._provider_tree.exists(uuids.compute_node)) create_rp_mock.reset_mock() # Validate the path where we specify a name (don't default to the UUID) self.client._ensure_resource_provider( self.context, uuids.cn2, 'a-name') create_rp_mock.assert_called_once_with( self.context, uuids.cn2, 'a-name', parent_provider_uuid=None) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_associations', new=mock.Mock()) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_create_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_providers_in_tree') def test_ensure_resource_provider_tree(self, get_rpt_mock, create_rp_mock): """Test _ensure_resource_provider with a tree of providers.""" def _create_resource_provider(context, uuid, name, parent_provider_uuid=None): """Mock side effect for creating the RP with the specified args.""" return { 'uuid': uuid, 'name': name, 'generation': 0, 'parent_provider_uuid': parent_provider_uuid } create_rp_mock.side_effect = _create_resource_provider # Not initially in the placement database, so we have to create it. get_rpt_mock.return_value = [] # Create the root root = self.client._ensure_resource_provider(self.context, uuids.root) self.assertEqual(uuids.root, root) # Now create a child child1 = self.client._ensure_resource_provider( self.context, uuids.child1, name='junior', parent_provider_uuid=uuids.root) self.assertEqual(uuids.child1, child1) # If we re-ensure the child, we get the object from the tree, not a # newly-created one - i.e. the early .find() works like it should. self.assertIs(child1, self.client._ensure_resource_provider(self.context, uuids.child1)) # Make sure we can create a grandchild grandchild = self.client._ensure_resource_provider( self.context, uuids.grandchild, parent_provider_uuid=uuids.child1) self.assertEqual(uuids.grandchild, grandchild) # Now create a second child of the root and make sure it doesn't wind # up in some crazy wrong place like under child1 or grandchild child2 = self.client._ensure_resource_provider( self.context, uuids.child2, parent_provider_uuid=uuids.root) self.assertEqual(uuids.child2, child2) # At this point we should get all the providers. self.assertEqual( set([uuids.root, uuids.child1, uuids.child2, uuids.grandchild]), set(self.client._provider_tree.get_provider_uuids())) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_providers_in_tree') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_and_get_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_associations') def test_ensure_resource_provider_refresh_fetch(self, mock_ref_assoc, mock_ref_inv, mock_gpit): """Make sure refreshes are called with the appropriate UUIDs and flags when we fetch the provider tree from placement. """ tree_uuids = set([uuids.root, uuids.one, uuids.two]) mock_gpit.return_value = [{'uuid': u, 'name': u, 'generation': 42} for u in tree_uuids] self.assertEqual(uuids.root, self.client._ensure_resource_provider(self.context, uuids.root)) mock_gpit.assert_called_once_with(self.context, uuids.root) mock_ref_inv.assert_has_calls([mock.call(self.context, uuid) for uuid in tree_uuids]) mock_ref_assoc.assert_has_calls( [mock.call(self.context, uuid, generation=42, force=True) for uuid in tree_uuids]) self.assertEqual(tree_uuids, set(self.client._provider_tree.get_provider_uuids())) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_providers_in_tree') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_create_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_refresh_associations') def test_ensure_resource_provider_refresh_create(self, mock_refresh, mock_create, mock_gpit): """Make sure refresh is not called when we create the RP.""" mock_gpit.return_value = [] mock_create.return_value = {'name': 'cn', 'uuid': uuids.cn, 'generation': 42} self.assertEqual(uuids.root, self.client._ensure_resource_provider(self.context, uuids.root)) mock_gpit.assert_called_once_with(self.context, uuids.root) mock_create.assert_called_once_with(self.context, uuids.root, uuids.root, parent_provider_uuid=None) mock_refresh.assert_not_called() self.assertEqual([uuids.cn], self.client._provider_tree.get_provider_uuids()) def test_get_allocation_candidates(self): resp_mock = mock.Mock(status_code=200) json_data = { 'allocation_requests': mock.sentinel.alloc_reqs, 'provider_summaries': mock.sentinel.p_sums, } resources = scheduler_utils.ResourceRequest.from_extra_specs({ 'resources:VCPU': '1', 'resources:MEMORY_MB': '1024', 'trait:HW_CPU_X86_AVX': 'required', 'trait:CUSTOM_TRAIT1': 'required', 'trait:CUSTOM_TRAIT2': 'preferred', 'trait:CUSTOM_TRAIT3': 'forbidden', 'trait:CUSTOM_TRAIT4': 'forbidden', 'resources1:DISK_GB': '30', 'trait1:STORAGE_DISK_SSD': 'required', 'resources2:VGPU': '2', 'trait2:HW_GPU_RESOLUTION_W2560H1600': 'required', 'trait2:HW_GPU_API_VULKAN': 'required', 'resources3:SRIOV_NET_VF': '1', 'resources3:CUSTOM_NET_EGRESS_BYTES_SEC': '125000', 'group_policy': 'isolate', # These are ignored because misspelled, bad value, etc. 'resources02:CUSTOM_WIDGET': '123', 'trait:HW_NIC_OFFLOAD_LRO': 'preferred', 'group_policy3': 'none', }) resources.get_request_group(None).member_of = [ ('agg1', 'agg2', 'agg3'), ('agg1', 'agg2')] expected_path = '/allocation_candidates' expected_query = [ ('group_policy', 'isolate'), ('limit', '1000'), ('member_of', 'in:agg1,agg2'), ('member_of', 'in:agg1,agg2,agg3'), ('required', 'CUSTOM_TRAIT1,HW_CPU_X86_AVX,!CUSTOM_TRAIT3,' '!CUSTOM_TRAIT4'), ('required1', 'STORAGE_DISK_SSD'), ('required2', 'HW_GPU_API_VULKAN,HW_GPU_RESOLUTION_W2560H1600'), ('resources', 'MEMORY_MB:1024,VCPU:1'), ('resources1', 'DISK_GB:30'), ('resources2', 'VGPU:2'), ('resources3', 'CUSTOM_NET_EGRESS_BYTES_SEC:125000,SRIOV_NET_VF:1') ] resp_mock.json.return_value = json_data self.ks_adap_mock.get.return_value = resp_mock alloc_reqs, p_sums, allocation_request_version = ( self.client.get_allocation_candidates(self.context, resources)) url = self.ks_adap_mock.get.call_args[0][0] split_url = parse.urlsplit(url) query = parse.parse_qsl(split_url.query) self.assertEqual(expected_path, split_url.path) self.assertEqual(expected_query, query) expected_url = '/allocation_candidates?%s' % parse.urlencode( expected_query) self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.25', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.assertEqual(mock.sentinel.p_sums, p_sums) def test_get_ac_no_trait_bogus_group_policy_custom_limit(self): self.flags(max_placement_results=42, group='scheduler') resp_mock = mock.Mock(status_code=200) json_data = { 'allocation_requests': mock.sentinel.alloc_reqs, 'provider_summaries': mock.sentinel.p_sums, } resources = scheduler_utils.ResourceRequest.from_extra_specs({ 'resources:VCPU': '1', 'resources:MEMORY_MB': '1024', 'resources1:DISK_GB': '30', 'group_policy': 'bogus', }) expected_path = '/allocation_candidates' expected_query = [ ('limit', '42'), ('resources', 'MEMORY_MB:1024,VCPU:1'), ('resources1', 'DISK_GB:30'), ] resp_mock.json.return_value = json_data self.ks_adap_mock.get.return_value = resp_mock alloc_reqs, p_sums, allocation_request_version = ( self.client.get_allocation_candidates(self.context, resources)) url = self.ks_adap_mock.get.call_args[0][0] split_url = parse.urlsplit(url) query = parse.parse_qsl(split_url.query) self.assertEqual(expected_path, split_url.path) self.assertEqual(expected_query, query) expected_url = '/allocation_candidates?%s' % parse.urlencode( expected_query) self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.25', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(mock.sentinel.p_sums, p_sums) def test_get_allocation_candidates_not_found(self): # Ensure _get_resource_provider() just returns None when the placement # API doesn't find a resource provider matching a UUID resp_mock = mock.Mock(status_code=404) self.ks_adap_mock.get.return_value = resp_mock expected_path = '/allocation_candidates' expected_query = {'resources': ['MEMORY_MB:1024'], 'limit': ['100']} # Make sure we're also honoring the configured limit self.flags(max_placement_results=100, group='scheduler') resources = scheduler_utils.ResourceRequest.from_extra_specs( {'resources:MEMORY_MB': '1024'}) res = self.client.get_allocation_candidates(self.context, resources) self.ks_adap_mock.get.assert_called_once_with( mock.ANY, raise_exc=False, microversion='1.25', headers={'X-Openstack-Request-Id': self.context.global_id}) url = self.ks_adap_mock.get.call_args[0][0] split_url = parse.urlsplit(url) query = parse.parse_qs(split_url.query) self.assertEqual(expected_path, split_url.path) self.assertEqual(expected_query, query) self.assertIsNone(res[0]) def test_get_resource_provider_found(self): # Ensure _get_resource_provider() returns a dict of resource provider # if it finds a resource provider record from the placement API uuid = uuids.compute_node resp_mock = mock.Mock(status_code=200) json_data = { 'uuid': uuid, 'name': uuid, 'generation': 42, 'parent_provider_uuid': None, } resp_mock.json.return_value = json_data self.ks_adap_mock.get.return_value = resp_mock result = self.client._get_resource_provider(self.context, uuid) expected_provider_dict = dict( uuid=uuid, name=uuid, generation=42, parent_provider_uuid=None, ) expected_url = '/resource_providers/' + uuid self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.14', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(expected_provider_dict, result) def test_get_resource_provider_not_found(self): # Ensure _get_resource_provider() just returns None when the placement # API doesn't find a resource provider matching a UUID resp_mock = mock.Mock(status_code=404) self.ks_adap_mock.get.return_value = resp_mock uuid = uuids.compute_node result = self.client._get_resource_provider(self.context, uuid) expected_url = '/resource_providers/' + uuid self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.14', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertIsNone(result) @mock.patch.object(report.LOG, 'error') def test_get_resource_provider_error(self, logging_mock): # Ensure _get_resource_provider() sets the error flag when trying to # communicate with the placement API and not getting an error we can # deal with resp_mock = mock.Mock(status_code=503) self.ks_adap_mock.get.return_value = resp_mock self.ks_adap_mock.get.return_value.headers = { 'x-openstack-request-id': uuids.request_id} uuid = uuids.compute_node self.assertRaises( exception.ResourceProviderRetrievalFailed, self.client._get_resource_provider, self.context, uuid) expected_url = '/resource_providers/' + uuid self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.14', headers={'X-Openstack-Request-Id': self.context.global_id}) # A 503 Service Unavailable should trigger an error log that # includes the placement request id and return None # from _get_resource_provider() self.assertTrue(logging_mock.called) self.assertEqual(uuids.request_id, logging_mock.call_args[0][1]['placement_req_id']) def test_get_sharing_providers(self): resp_mock = mock.Mock(status_code=200) rpjson = [ { 'uuid': uuids.sharing1, 'name': 'bandwidth_provider', 'generation': 42, 'parent_provider_uuid': None, 'root_provider_uuid': None, 'links': [], }, { 'uuid': uuids.sharing2, 'name': 'storage_provider', 'generation': 42, 'parent_provider_uuid': None, 'root_provider_uuid': None, 'links': [], }, ] resp_mock.json.return_value = {'resource_providers': rpjson} self.ks_adap_mock.get.return_value = resp_mock result = self.client._get_sharing_providers( self.context, [uuids.agg1, uuids.agg2]) expected_url = ('/resource_providers?member_of=in:' + ','.join((uuids.agg1, uuids.agg2)) + '&required=MISC_SHARES_VIA_AGGREGATE') self.ks_adap_mock.get.assert_called_once_with( expected_url, microversion='1.18', raise_exc=False, headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(rpjson, result) def test_get_sharing_providers_emptylist(self): self.assertEqual( [], self.client._get_sharing_providers(self.context, [])) self.ks_adap_mock.get.assert_not_called() @mock.patch.object(report.LOG, 'error') def test_get_sharing_providers_error(self, logging_mock): # Ensure _get_sharing_providers() logs an error and raises if the # placement API call doesn't respond 200 resp_mock = mock.Mock(status_code=503) self.ks_adap_mock.get.return_value = resp_mock self.ks_adap_mock.get.return_value.headers = { 'x-openstack-request-id': uuids.request_id} uuid = uuids.agg self.assertRaises(exception.ResourceProviderRetrievalFailed, self.client._get_sharing_providers, self.context, [uuid]) expected_url = ('/resource_providers?member_of=in:' + uuid + '&required=MISC_SHARES_VIA_AGGREGATE') self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.18', headers={'X-Openstack-Request-Id': self.context.global_id}) # A 503 Service Unavailable should trigger an error log that # includes the placement request id self.assertTrue(logging_mock.called) self.assertEqual(uuids.request_id, logging_mock.call_args[0][1]['placement_req_id']) def test_get_providers_in_tree(self): # Ensure _get_providers_in_tree() returns a list of resource # provider dicts if it finds a resource provider record from the # placement API root = uuids.compute_node child = uuids.child resp_mock = mock.Mock(status_code=200) rpjson = [ { 'uuid': root, 'name': 'daddy', 'generation': 42, 'parent_provider_uuid': None, }, { 'uuid': child, 'name': 'junior', 'generation': 42, 'parent_provider_uuid': root, }, ] resp_mock.json.return_value = {'resource_providers': rpjson} self.ks_adap_mock.get.return_value = resp_mock result = self.client._get_providers_in_tree(self.context, root) expected_url = '/resource_providers?in_tree=' + root self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.14', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(rpjson, result) @mock.patch.object(report.LOG, 'error') def test_get_providers_in_tree_error(self, logging_mock): # Ensure _get_providers_in_tree() logs an error and raises if the # placement API call doesn't respond 200 resp_mock = mock.Mock(status_code=503) self.ks_adap_mock.get.return_value = resp_mock self.ks_adap_mock.get.return_value.headers = { 'x-openstack-request-id': 'req-' + uuids.request_id} uuid = uuids.compute_node self.assertRaises(exception.ResourceProviderRetrievalFailed, self.client._get_providers_in_tree, self.context, uuid) expected_url = '/resource_providers?in_tree=' + uuid self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.14', headers={'X-Openstack-Request-Id': self.context.global_id}) # A 503 Service Unavailable should trigger an error log that includes # the placement request id self.assertTrue(logging_mock.called) self.assertEqual('req-' + uuids.request_id, logging_mock.call_args[0][1]['placement_req_id']) def test_create_resource_provider(self): """Test that _create_resource_provider() sends a dict of resource provider information without a parent provider UUID. """ uuid = uuids.compute_node name = 'computehost' resp_mock = mock.Mock(status_code=200) self.ks_adap_mock.post.return_value = resp_mock self.assertEqual( resp_mock.json.return_value, self.client._create_resource_provider(self.context, uuid, name)) expected_payload = { 'uuid': uuid, 'name': name, } expected_url = '/resource_providers' self.ks_adap_mock.post.assert_called_once_with( expected_url, json=expected_payload, raise_exc=False, microversion='1.20', headers={'X-Openstack-Request-Id': self.context.global_id}) def test_create_resource_provider_with_parent(self): """Test that when specifying a parent provider UUID, that the parent_provider_uuid part of the payload is properly specified. """ parent_uuid = uuids.parent uuid = uuids.compute_node name = 'computehost' resp_mock = mock.Mock(status_code=200) self.ks_adap_mock.post.return_value = resp_mock self.assertEqual( resp_mock.json.return_value, self.client._create_resource_provider( self.context, uuid, name, parent_provider_uuid=parent_uuid, ) ) expected_payload = { 'uuid': uuid, 'name': name, 'parent_provider_uuid': parent_uuid, } expected_url = '/resource_providers' self.ks_adap_mock.post.assert_called_once_with( expected_url, json=expected_payload, raise_exc=False, microversion='1.20', headers={'X-Openstack-Request-Id': self.context.global_id}) @mock.patch.object(report.LOG, 'info') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_resource_provider') def test_create_resource_provider_concurrent_create(self, get_rp_mock, logging_mock): # Ensure _create_resource_provider() returns a dict of resource # provider gotten from _get_resource_provider() if the call to create # the resource provider in the placement API returned a 409 Conflict, # indicating another thread concurrently created the resource provider # record. uuid = uuids.compute_node name = 'computehost' self.ks_adap_mock.post.return_value = fake_requests.FakeResponse( 409, content='not a name conflict', headers={'x-openstack-request-id': uuids.request_id}) get_rp_mock.return_value = mock.sentinel.get_rp result = self.client._create_resource_provider(self.context, uuid, name) expected_payload = { 'uuid': uuid, 'name': name, } expected_url = '/resource_providers' self.ks_adap_mock.post.assert_called_once_with( expected_url, json=expected_payload, raise_exc=False, microversion='1.20', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(mock.sentinel.get_rp, result) # The 409 response will produce a message to the info log. self.assertTrue(logging_mock.called) self.assertEqual(uuids.request_id, logging_mock.call_args[0][1]['placement_req_id']) def test_create_resource_provider_name_conflict(self): # When the API call to create the resource provider fails 409 with a # name conflict, we raise an exception. self.ks_adap_mock.post.return_value = fake_requests.FakeResponse( 409, content='<stuff>Conflicting resource provider name: foo ' 'already exists.</stuff>') self.assertRaises( exception.ResourceProviderCreationFailed, self.client._create_resource_provider, self.context, uuids.compute_node, 'foo') @mock.patch.object(report.LOG, 'error') def test_create_resource_provider_error(self, logging_mock): # Ensure _create_resource_provider() sets the error flag when trying to # communicate with the placement API and not getting an error we can # deal with uuid = uuids.compute_node name = 'computehost' self.ks_adap_mock.post.return_value = fake_requests.FakeResponse( 503, headers={'x-openstack-request-id': uuids.request_id}) self.assertRaises( exception.ResourceProviderCreationFailed, self.client._create_resource_provider, self.context, uuid, name) expected_payload = { 'uuid': uuid, 'name': name, } expected_url = '/resource_providers' self.ks_adap_mock.post.assert_called_once_with( expected_url, json=expected_payload, raise_exc=False, microversion='1.20', headers={'X-Openstack-Request-Id': self.context.global_id}) # A 503 Service Unavailable should log an error that # includes the placement request id and # _create_resource_provider() should return None self.assertTrue(logging_mock.called) self.assertEqual(uuids.request_id, logging_mock.call_args[0][1]['placement_req_id']) def test_put_empty(self): # A simple put with an empty (not None) payload should send the empty # payload through. # Bug #1744786 url = '/resource_providers/%s/aggregates' % uuids.foo self.client.put(url, []) self.ks_adap_mock.put.assert_called_once_with( url, json=[], raise_exc=False, microversion=None, headers={}) def test_delete_provider(self): delete_mock = fake_requests.FakeResponse(None) self.ks_adap_mock.delete.return_value = delete_mock for status_code in (204, 404): delete_mock.status_code = status_code # Seed the caches self.client._provider_tree.new_root('compute', uuids.root, generation=0) self.client._association_refresh_time[uuids.root] = 1234 self.client._delete_provider(uuids.root, global_request_id='gri') self.ks_adap_mock.delete.assert_called_once_with( '/resource_providers/' + uuids.root, headers={'X-Openstack-Request-Id': 'gri'}, microversion=None, raise_exc=False) self.assertFalse(self.client._provider_tree.exists(uuids.root)) self.assertNotIn(uuids.root, self.client._association_refresh_time) self.ks_adap_mock.delete.reset_mock() def test_delete_provider_fail(self): delete_mock = fake_requests.FakeResponse(None) self.ks_adap_mock.delete.return_value = delete_mock resp_exc_map = {409: exception.ResourceProviderInUse, 503: exception.ResourceProviderDeletionFailed} for status_code, exc in resp_exc_map.items(): delete_mock.status_code = status_code self.assertRaises(exc, self.client._delete_provider, uuids.root) self.ks_adap_mock.delete.assert_called_once_with( '/resource_providers/' + uuids.root, microversion=None, headers={}, raise_exc=False) self.ks_adap_mock.delete.reset_mock() def test_set_aggregates_for_provider(self): aggs = [uuids.agg1, uuids.agg2] resp_mock = mock.Mock(status_code=200) resp_mock.json.return_value = { 'aggregates': aggs, } self.ks_adap_mock.put.return_value = resp_mock # Prime the provider tree cache self.client._provider_tree.new_root('rp', uuids.rp, generation=0) self.assertEqual(set(), self.client._provider_tree.data(uuids.rp).aggregates) self.client.set_aggregates_for_provider(self.context, uuids.rp, aggs) self.ks_adap_mock.put.assert_called_once_with( '/resource_providers/%s/aggregates' % uuids.rp, json=aggs, raise_exc=False, microversion='1.1', headers={'X-Openstack-Request-Id': self.context.global_id}) # Cache was updated self.assertEqual(set(aggs), self.client._provider_tree.data(uuids.rp).aggregates) def test_set_aggregates_for_provider_fail(self): self.ks_adap_mock.put.return_value = mock.Mock(status_code=503) # Prime the provider tree cache self.client._provider_tree.new_root('rp', uuids.rp, generation=0) self.assertRaises( exception.ResourceProviderUpdateFailed, self.client.set_aggregates_for_provider, self.context, uuids.rp, [uuids.agg]) # The cache wasn't updated self.assertEqual(set(), self.client._provider_tree.data(uuids.rp).aggregates) class TestAggregates(SchedulerReportClientTestCase): def test_get_provider_aggregates_found(self): uuid = uuids.compute_node resp_mock = mock.Mock(status_code=200) aggs = [ uuids.agg1, uuids.agg2, ] resp_mock.json.return_value = {'aggregates': aggs} self.ks_adap_mock.get.return_value = resp_mock result = self.client._get_provider_aggregates(self.context, uuid) expected_url = '/resource_providers/' + uuid + '/aggregates' self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.1', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertEqual(set(aggs), result) @mock.patch.object(report.LOG, 'error') def test_get_provider_aggregates_error(self, log_mock): """Test that when the placement API returns any error when looking up a provider's aggregates, we raise an exception. """ uuid = uuids.compute_node resp_mock = mock.Mock(headers={ 'x-openstack-request-id': uuids.request_id}) self.ks_adap_mock.get.return_value = resp_mock for status_code in (400, 404, 503): resp_mock.status_code = status_code self.assertRaises( exception.ResourceProviderAggregateRetrievalFailed, self.client._get_provider_aggregates, self.context, uuid) expected_url = '/resource_providers/' + uuid + '/aggregates' self.ks_adap_mock.get.assert_called_once_with( expected_url, raise_exc=False, microversion='1.1', headers={'X-Openstack-Request-Id': self.context.global_id}) self.assertTrue(log_mock.called) self.assertEqual(uuids.request_id, log_mock.call_args[0][1]['placement_req_id']) self.ks_adap_mock.get.reset_mock() log_mock.reset_mock() class TestTraits(SchedulerReportClientTestCase): trait_api_kwargs = {'raise_exc': False, 'microversion': '1.6'} def test_get_provider_traits_found(self): uuid = uuids.compute_node resp_mock = mock.Mock(status_code=200) traits = [ 'CUSTOM_GOLD', 'CUSTOM_SILVER', ] resp_mock.json.return_value = {'traits': traits} self.ks_adap_mock.get.return_value = resp_mock result = self.client._get_provider_traits(self.context, uuid) expected_url = '/resource_providers/' + uuid + '/traits' self.ks_adap_mock.get.assert_called_once_with( expected_url, headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) self.assertEqual(set(traits), result) @mock.patch.object(report.LOG, 'error') def test_get_provider_traits_error(self, log_mock): """Test that when the placement API returns any error when looking up a provider's traits, we raise an exception. """ uuid = uuids.compute_node resp_mock = mock.Mock(headers={ 'x-openstack-request-id': uuids.request_id}) self.ks_adap_mock.get.return_value = resp_mock for status_code in (400, 404, 503): resp_mock.status_code = status_code self.assertRaises( exception.ResourceProviderTraitRetrievalFailed, self.client._get_provider_traits, self.context, uuid) expected_url = '/resource_providers/' + uuid + '/traits' self.ks_adap_mock.get.assert_called_once_with( expected_url, headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) self.assertTrue(log_mock.called) self.assertEqual(uuids.request_id, log_mock.call_args[0][1]['placement_req_id']) self.ks_adap_mock.get.reset_mock() log_mock.reset_mock() def test_ensure_traits(self): """Successful paths, various permutations of traits existing or needing to be created. """ standard_traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA'] custom_traits = ['CUSTOM_GOLD', 'CUSTOM_SILVER'] all_traits = standard_traits + custom_traits get_mock = mock.Mock(status_code=200) self.ks_adap_mock.get.return_value = get_mock # Request all traits; custom traits need to be created get_mock.json.return_value = {'traits': standard_traits} self.client._ensure_traits(self.context, all_traits) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:' + ','.join(all_traits), headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) self.ks_adap_mock.put.assert_has_calls( [mock.call('/traits/' + trait, headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) for trait in custom_traits], any_order=True) self.ks_adap_mock.reset_mock() # Request standard traits; no traits need to be created get_mock.json.return_value = {'traits': standard_traits} self.client._ensure_traits(self.context, standard_traits) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:' + ','.join(standard_traits), headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) self.ks_adap_mock.put.assert_not_called() self.ks_adap_mock.reset_mock() # Request no traits - short circuit self.client._ensure_traits(self.context, None) self.client._ensure_traits(self.context, []) self.ks_adap_mock.get.assert_not_called() self.ks_adap_mock.put.assert_not_called() def test_ensure_traits_fail_retrieval(self): self.ks_adap_mock.get.return_value = mock.Mock(status_code=400) self.assertRaises(exception.TraitRetrievalFailed, self.client._ensure_traits, self.context, ['FOO']) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:FOO', headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) self.ks_adap_mock.put.assert_not_called() def test_ensure_traits_fail_creation(self): get_mock = mock.Mock(status_code=200) get_mock.json.return_value = {'traits': []} self.ks_adap_mock.get.return_value = get_mock self.ks_adap_mock.put.return_value = fake_requests.FakeResponse(400) self.assertRaises(exception.TraitCreationFailed, self.client._ensure_traits, self.context, ['FOO']) self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:FOO', headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) self.ks_adap_mock.put.assert_called_once_with( '/traits/FOO', headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) def test_set_traits_for_provider(self): traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA'] # Make _ensure_traits succeed without PUTting get_mock = mock.Mock(status_code=200) get_mock.json.return_value = {'traits': traits} self.ks_adap_mock.get.return_value = get_mock # Prime the provider tree cache self.client._provider_tree.new_root('rp', uuids.rp, generation=0) # Mock the /rp/{u}/traits PUT to succeed put_mock = mock.Mock(status_code=200) put_mock.json.return_value = {'traits': traits, 'resource_provider_generation': 1} self.ks_adap_mock.put.return_value = put_mock # Invoke self.client.set_traits_for_provider(self.context, uuids.rp, traits) # Verify API calls self.ks_adap_mock.get.assert_called_once_with( '/traits?name=in:' + ','.join(traits), headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) self.ks_adap_mock.put.assert_called_once_with( '/resource_providers/%s/traits' % uuids.rp, json={'traits': traits, 'resource_provider_generation': 0}, headers={'X-Openstack-Request-Id': self.context.global_id}, **self.trait_api_kwargs) # And ensure the provider tree cache was updated appropriately self.assertFalse( self.client._provider_tree.have_traits_changed(uuids.rp, traits)) # Validate the generation self.assertEqual( 1, self.client._provider_tree.data(uuids.rp).generation) def test_set_traits_for_provider_fail(self): traits = ['HW_NIC_OFFLOAD_UCS', 'HW_NIC_OFFLOAD_RDMA'] get_mock = mock.Mock() self.ks_adap_mock.get.return_value = get_mock # Prime the provider tree cache self.client._provider_tree.new_root('rp', uuids.rp, generation=0) # _ensure_traits exception bubbles up get_mock.status_code = 400 self.assertRaises( exception.TraitRetrievalFailed, self.client.set_traits_for_provider, self.context, uuids.rp, traits) self.ks_adap_mock.put.assert_not_called() get_mock.status_code = 200 get_mock.json.return_value = {'traits': traits} # Conflict self.ks_adap_mock.put.return_value = mock.Mock(status_code=409) self.assertRaises( exception.ResourceProviderUpdateConflict, self.client.set_traits_for_provider, self.context, uuids.rp, traits) # Other error self.ks_adap_mock.put.return_value = mock.Mock(status_code=503) self.assertRaises( exception.ResourceProviderUpdateFailed, self.client.set_traits_for_provider, self.context, uuids.rp, traits) class TestAssociations(SchedulerReportClientTestCase): @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_aggregates') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_traits') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_sharing_providers') def test_refresh_associations_no_last(self, mock_shr_get, mock_trait_get, mock_agg_get): """Test that associations are refreshed when stale.""" uuid = uuids.compute_node # Seed the provider tree so _refresh_associations finds the provider self.client._provider_tree.new_root('compute', uuid, generation=1) mock_agg_get.return_value = set([uuids.agg1]) mock_trait_get.return_value = set(['CUSTOM_GOLD']) self.client._refresh_associations(self.context, uuid) mock_agg_get.assert_called_once_with(self.context, uuid) mock_trait_get.assert_called_once_with(self.context, uuid) mock_shr_get.assert_called_once_with( self.context, mock_agg_get.return_value) self.assertIn(uuid, self.client._association_refresh_time) self.assertTrue( self.client._provider_tree.in_aggregates(uuid, [uuids.agg1])) self.assertFalse( self.client._provider_tree.in_aggregates(uuid, [uuids.agg2])) self.assertTrue( self.client._provider_tree.has_traits(uuid, ['CUSTOM_GOLD'])) self.assertFalse( self.client._provider_tree.has_traits(uuid, ['CUSTOM_SILVER'])) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_aggregates') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_traits') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_sharing_providers') def test_refresh_associations_no_refresh_sharing(self, mock_shr_get, mock_trait_get, mock_agg_get): """Test refresh_sharing=False.""" uuid = uuids.compute_node # Seed the provider tree so _refresh_associations finds the provider self.client._provider_tree.new_root('compute', uuid, generation=1) mock_agg_get.return_value = set([uuids.agg1]) mock_trait_get.return_value = set(['CUSTOM_GOLD']) self.client._refresh_associations(self.context, uuid, refresh_sharing=False) mock_agg_get.assert_called_once_with(self.context, uuid) mock_trait_get.assert_called_once_with(self.context, uuid) mock_shr_get.assert_not_called() self.assertIn(uuid, self.client._association_refresh_time) self.assertTrue( self.client._provider_tree.in_aggregates(uuid, [uuids.agg1])) self.assertFalse( self.client._provider_tree.in_aggregates(uuid, [uuids.agg2])) self.assertTrue( self.client._provider_tree.has_traits(uuid, ['CUSTOM_GOLD'])) self.assertFalse( self.client._provider_tree.has_traits(uuid, ['CUSTOM_SILVER'])) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_aggregates') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_traits') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_sharing_providers') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_associations_stale') def test_refresh_associations_not_stale(self, mock_stale, mock_shr_get, mock_trait_get, mock_agg_get): """Test that refresh associations is not called when the map is not stale. """ mock_stale.return_value = False uuid = uuids.compute_node self.client._refresh_associations(self.context, uuid) mock_agg_get.assert_not_called() mock_trait_get.assert_not_called() mock_shr_get.assert_not_called() self.assertFalse(self.client._association_refresh_time) @mock.patch.object(report.LOG, 'debug') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_aggregates') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_provider_traits') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_sharing_providers') def test_refresh_associations_time(self, mock_shr_get, mock_trait_get, mock_agg_get, log_mock): """Test that refresh associations is called when the map is stale.""" uuid = uuids.compute_node # Seed the provider tree so _refresh_associations finds the provider self.client._provider_tree.new_root('compute', uuid, generation=1) mock_agg_get.return_value = set([]) mock_trait_get.return_value = set([]) mock_shr_get.return_value = [] # Called a first time because association_refresh_time is empty. now = time.time() self.client._refresh_associations(self.context, uuid) mock_agg_get.assert_called_once_with(self.context, uuid) mock_trait_get.assert_called_once_with(self.context, uuid) mock_shr_get.assert_called_once_with(self.context, set()) log_mock.assert_has_calls([ mock.call('Refreshing aggregate associations for resource ' 'provider %s, aggregates: %s', uuid, 'None'), mock.call('Refreshing trait associations for resource ' 'provider %s, traits: %s', uuid, 'None') ]) self.assertIn(uuid, self.client._association_refresh_time) # Clear call count. mock_agg_get.reset_mock() mock_trait_get.reset_mock() mock_shr_get.reset_mock() with mock.patch('time.time') as mock_future: # Not called a second time because not enough time has passed. mock_future.return_value = (now + CONF.compute.resource_provider_association_refresh / 2) self.client._refresh_associations(self.context, uuid) mock_agg_get.assert_not_called() mock_trait_get.assert_not_called() mock_shr_get.assert_not_called() # Called because time has passed. mock_future.return_value = (now + CONF.compute.resource_provider_association_refresh + 1) self.client._refresh_associations(self.context, uuid) mock_agg_get.assert_called_once_with(self.context, uuid) mock_trait_get.assert_called_once_with(self.context, uuid) mock_shr_get.assert_called_once_with(self.context, set()) class TestComputeNodeToInventoryDict(test.NoDBTestCase): def test_compute_node_inventory(self): uuid = uuids.compute_node name = 'computehost' compute_node = objects.ComputeNode(uuid=uuid, hypervisor_hostname=name, vcpus=2, cpu_allocation_ratio=16.0, memory_mb=1024, ram_allocation_ratio=1.5, local_gb=10, disk_allocation_ratio=1.0) self.flags(reserved_host_memory_mb=1000) self.flags(reserved_host_disk_mb=200) self.flags(reserved_host_cpus=1) result = report._compute_node_to_inventory_dict(compute_node) expected = { 'VCPU': { 'total': compute_node.vcpus, 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, 'allocation_ratio': compute_node.cpu_allocation_ratio, }, 'MEMORY_MB': { 'total': compute_node.memory_mb, 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': compute_node.memory_mb, 'step_size': 1, 'allocation_ratio': compute_node.ram_allocation_ratio, }, 'DISK_GB': { 'total': compute_node.local_gb, 'reserved': 1, # this is ceil(1000/1024) 'min_unit': 1, 'max_unit': compute_node.local_gb, 'step_size': 1, 'allocation_ratio': compute_node.disk_allocation_ratio, }, } self.assertEqual(expected, result) def test_compute_node_inventory_empty(self): uuid = uuids.compute_node name = 'computehost' compute_node = objects.ComputeNode(uuid=uuid, hypervisor_hostname=name, vcpus=0, cpu_allocation_ratio=16.0, memory_mb=0, ram_allocation_ratio=1.5, local_gb=0, disk_allocation_ratio=1.0) result = report._compute_node_to_inventory_dict(compute_node) self.assertEqual({}, result) class TestInventory(SchedulerReportClientTestCase): @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_update_inventory') def test_update_compute_node(self, mock_ui, mock_erp): cn = self.compute_node self.client.update_compute_node(self.context, cn) mock_erp.assert_called_once_with(self.context, cn.uuid, cn.hypervisor_hostname) expected_inv_data = { 'VCPU': { 'total': 8, 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 16.0, }, 'MEMORY_MB': { 'total': 1024, 'reserved': 512, 'min_unit': 1, 'max_unit': 1024, 'step_size': 1, 'allocation_ratio': 1.5, }, 'DISK_GB': { 'total': 10, 'reserved': 0, 'min_unit': 1, 'max_unit': 10, 'step_size': 1, 'allocation_ratio': 1.0, }, } mock_ui.assert_called_once_with( self.context, cn.uuid, expected_inv_data, ) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_update_inventory') def test_update_compute_node_no_inv(self, mock_ui, mock_erp): """Ensure that if there are no inventory records, we still call _update_inventory(). """ cn = self.compute_node cn.vcpus = 0 cn.memory_mb = 0 cn.local_gb = 0 self.client.update_compute_node(self.context, cn) mock_erp.assert_called_once_with(self.context, cn.uuid, cn.hypervisor_hostname) mock_ui.assert_called_once_with(self.context, cn.uuid, {}) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'get') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') def test_update_inventory_initial_empty(self, mock_put, mock_get): # Ensure _update_inventory() returns a list of Inventories objects # after creating or updating the existing values uuid = uuids.compute_node compute_node = self.compute_node # Make sure the resource provider exists for preventing to call the API self._init_provider_tree(resources_override={}) mock_get.return_value.json.return_value = { 'resource_provider_generation': 43, 'inventories': { 'VCPU': {'total': 16}, 'MEMORY_MB': {'total': 1024}, 'DISK_GB': {'total': 10}, } } mock_put.return_value.status_code = 200 mock_put.return_value.json.return_value = { 'resource_provider_generation': 44, 'inventories': { 'VCPU': {'total': 16}, 'MEMORY_MB': {'total': 1024}, 'DISK_GB': {'total': 10}, } } inv_data = report._compute_node_to_inventory_dict(compute_node) result = self.client._update_inventory_attempt( self.context, compute_node.uuid, inv_data ) self.assertTrue(result) exp_url = '/resource_providers/%s/inventories' % uuid mock_get.assert_called_once_with( exp_url, global_request_id=self.context.global_id) # Updated with the new inventory from the PUT call self._validate_provider(uuid, generation=44) expected = { # Called with the newly-found generation from the existing # inventory 'resource_provider_generation': 43, 'inventories': { 'VCPU': { 'total': 8, 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, 'allocation_ratio': compute_node.cpu_allocation_ratio, }, 'MEMORY_MB': { 'total': 1024, 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': compute_node.memory_mb, 'step_size': 1, 'allocation_ratio': compute_node.ram_allocation_ratio, }, 'DISK_GB': { 'total': 10, 'reserved': 0, # reserved_host_disk_mb is 0 by default 'min_unit': 1, 'max_unit': compute_node.local_gb, 'step_size': 1, 'allocation_ratio': compute_node.disk_allocation_ratio, }, } } mock_put.assert_called_once_with( exp_url, expected, global_request_id=self.context.global_id) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'get') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') def test_update_inventory(self, mock_put, mock_get): self.flags(reserved_host_disk_mb=1000) # Ensure _update_inventory() returns a list of Inventories objects # after creating or updating the existing values uuid = uuids.compute_node compute_node = self.compute_node # Make sure the resource provider exists for preventing to call the API self._init_provider_tree() new_vcpus_total = 240 mock_get.return_value.json.return_value = { 'resource_provider_generation': 43, 'inventories': { 'VCPU': {'total': 16}, 'MEMORY_MB': {'total': 1024}, 'DISK_GB': {'total': 10}, } } mock_put.return_value.status_code = 200 mock_put.return_value.json.return_value = { 'resource_provider_generation': 44, 'inventories': { 'VCPU': {'total': new_vcpus_total}, 'MEMORY_MB': {'total': 1024}, 'DISK_GB': {'total': 10}, } } inv_data = report._compute_node_to_inventory_dict(compute_node) # Make a change to trigger the update... inv_data['VCPU']['total'] = new_vcpus_total result = self.client._update_inventory_attempt( self.context, compute_node.uuid, inv_data ) self.assertTrue(result) exp_url = '/resource_providers/%s/inventories' % uuid mock_get.assert_called_once_with( exp_url, global_request_id=self.context.global_id) # Updated with the new inventory from the PUT call self._validate_provider(uuid, generation=44) expected = { # Called with the newly-found generation from the existing # inventory 'resource_provider_generation': 43, 'inventories': { 'VCPU': { 'total': new_vcpus_total, 'reserved': 0, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, 'allocation_ratio': compute_node.cpu_allocation_ratio, }, 'MEMORY_MB': { 'total': 1024, 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': compute_node.memory_mb, 'step_size': 1, 'allocation_ratio': compute_node.ram_allocation_ratio, }, 'DISK_GB': { 'total': 10, 'reserved': 1, # this is ceil for 1000MB 'min_unit': 1, 'max_unit': compute_node.local_gb, 'step_size': 1, 'allocation_ratio': compute_node.disk_allocation_ratio, }, } } mock_put.assert_called_once_with( exp_url, expected, global_request_id=self.context.global_id) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'get') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') def test_update_inventory_no_update(self, mock_put, mock_get): """Simulate situation where scheduler client is first starting up and ends up loading information from the placement API via a GET against the resource provider's inventory but has no local cached inventory information for a resource provider. """ uuid = uuids.compute_node compute_node = self.compute_node # Make sure the resource provider exists for preventing to call the API self._init_provider_tree(generation_override=42, resources_override={}) mock_get.return_value.json.return_value = { 'resource_provider_generation': 43, 'inventories': { 'VCPU': { 'total': 8, 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': compute_node.vcpus, 'step_size': 1, 'allocation_ratio': compute_node.cpu_allocation_ratio, }, 'MEMORY_MB': { 'total': 1024, 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': compute_node.memory_mb, 'step_size': 1, 'allocation_ratio': compute_node.ram_allocation_ratio, }, 'DISK_GB': { 'total': 10, 'reserved': 0, 'min_unit': 1, 'max_unit': compute_node.local_gb, 'step_size': 1, 'allocation_ratio': compute_node.disk_allocation_ratio, }, } } inv_data = report._compute_node_to_inventory_dict(compute_node) result = self.client._update_inventory_attempt( self.context, compute_node.uuid, inv_data ) self.assertTrue(result) exp_url = '/resource_providers/%s/inventories' % uuid mock_get.assert_called_once_with( exp_url, global_request_id=self.context.global_id) # No update so put should not be called self.assertFalse(mock_put.called) # Make sure we updated the generation from the inventory records self._validate_provider(uuid, generation=43) @mock.patch.object(report.LOG, 'info') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') def test_update_inventory_concurrent_update(self, mock_ensure, mock_put, mock_get, mock_info): # Ensure _update_inventory() returns a list of Inventories objects # after creating or updating the existing values uuid = uuids.compute_node compute_node = self.compute_node # Make sure the resource provider exists for preventing to call the API self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, generation=42, ) mock_get.return_value = { 'resource_provider_generation': 42, 'inventories': {}, } mock_put.return_value.status_code = 409 mock_put.return_value.text = 'Does not match inventory in use' mock_put.return_value.headers = {'x-openstack-request-id': uuids.request_id} inv_data = report._compute_node_to_inventory_dict(compute_node) result = self.client._update_inventory_attempt( self.context, compute_node.uuid, inv_data ) self.assertFalse(result) # Invalidated the cache self.assertFalse(self.client._provider_tree.exists(uuid)) # Refreshed our resource provider mock_ensure.assert_called_once_with(self.context, uuid) # Logged the request id in the log message self.assertEqual(uuids.request_id, mock_info.call_args[0][1]['placement_req_id']) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') def test_update_inventory_inventory_in_use(self, mock_put, mock_get): # Ensure _update_inventory() returns a list of Inventories objects # after creating or updating the existing values uuid = uuids.compute_node compute_node = self.compute_node # Make sure the resource provider exists for preventing to call the API self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, generation=42, ) mock_get.return_value = { 'resource_provider_generation': 42, 'inventories': {}, } mock_put.return_value.status_code = 409 mock_put.return_value.text = ( "update conflict: Inventory for VCPU on " "resource provider 123 in use" ) inv_data = report._compute_node_to_inventory_dict(compute_node) self.assertRaises( exception.InventoryInUse, self.client._update_inventory_attempt, self.context, compute_node.uuid, inv_data, ) # Did NOT invalidate the cache self.assertTrue(self.client._provider_tree.exists(uuid)) @mock.patch.object(report.LOG, 'info') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') def test_update_inventory_unknown_response(self, mock_put, mock_get, mock_info): # Ensure _update_inventory() returns a list of Inventories objects # after creating or updating the existing values uuid = uuids.compute_node compute_node = self.compute_node # Make sure the resource provider exists for preventing to call the API self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, generation=42, ) mock_get.return_value = { 'resource_provider_generation': 42, 'inventories': {}, } mock_put.return_value.status_code = 234 mock_put.return_value.headers = {'x-openstack-request-id': uuids.request_id} inv_data = report._compute_node_to_inventory_dict(compute_node) result = self.client._update_inventory_attempt( self.context, compute_node.uuid, inv_data ) self.assertFalse(result) # No cache invalidation self.assertTrue(self.client._provider_tree.exists(uuid)) @mock.patch.object(report.LOG, 'warning') @mock.patch.object(report.LOG, 'debug') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_get_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') def test_update_inventory_failed(self, mock_put, mock_get, mock_debug, mock_warn): # Ensure _update_inventory() returns a list of Inventories objects # after creating or updating the existing values uuid = uuids.compute_node compute_node = self.compute_node # Make sure the resource provider exists for preventing to call the API self.client._provider_tree.new_root( compute_node.hypervisor_hostname, compute_node.uuid, generation=42, ) mock_get.return_value = { 'resource_provider_generation': 42, 'inventories': {}, } mock_put.return_value = fake_requests.FakeResponse( 400, headers={'x-openstack-request-id': uuids.request_id}) inv_data = report._compute_node_to_inventory_dict(compute_node) result = self.client._update_inventory_attempt( self.context, compute_node.uuid, inv_data ) self.assertFalse(result) # No cache invalidation self.assertTrue(self.client._provider_tree.exists(uuid)) # Logged the request id in the log messages self.assertEqual(uuids.request_id, mock_debug.call_args[0][1]['placement_req_id']) self.assertEqual(uuids.request_id, mock_warn.call_args[0][1]['placement_req_id']) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_update_inventory_attempt') @mock.patch('time.sleep') def test_update_inventory_fails_and_then_succeeds(self, mock_sleep, mock_update, mock_ensure): # Ensure _update_inventory() fails if we have a conflict when updating # but retries correctly. cn = self.compute_node mock_update.side_effect = (False, True) self.client._provider_tree.new_root( cn.hypervisor_hostname, cn.uuid, generation=42, ) result = self.client._update_inventory( self.context, cn.uuid, mock.sentinel.inv_data ) self.assertTrue(result) # Only slept once mock_sleep.assert_called_once_with(1) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_update_inventory_attempt') @mock.patch('time.sleep') def test_update_inventory_never_succeeds(self, mock_sleep, mock_update, mock_ensure): # but retries correctly. cn = self.compute_node mock_update.side_effect = (False, False, False) self.client._provider_tree.new_root( cn.hypervisor_hostname, cn.uuid, generation=42, ) result = self.client._update_inventory( self.context, cn.uuid, mock.sentinel.inv_data ) self.assertFalse(result) # Slept three times mock_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)]) # Three attempts to update mock_update.assert_has_calls([ mock.call(self.context, cn.uuid, mock.sentinel.inv_data), mock.call(self.context, cn.uuid, mock.sentinel.inv_data), mock.call(self.context, cn.uuid, mock.sentinel.inv_data), ]) # Slept three times mock_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)]) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_update_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_classes') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') def test_set_inventory_for_provider_no_custom(self, mock_erp, mock_erc, mock_upd): """Tests that inventory records of all standard resource classes are passed to the report client's _update_inventory() method. """ inv_data = { 'VCPU': { 'total': 24, 'reserved': 0, 'min_unit': 1, 'max_unit': 24, 'step_size': 1, 'allocation_ratio': 1.0, }, 'MEMORY_MB': { 'total': 1024, 'reserved': 0, 'min_unit': 1, 'max_unit': 1024, 'step_size': 1, 'allocation_ratio': 1.0, }, 'DISK_GB': { 'total': 100, 'reserved': 0, 'min_unit': 1, 'max_unit': 100, 'step_size': 1, 'allocation_ratio': 1.0, }, } self.client.set_inventory_for_provider( self.context, mock.sentinel.rp_uuid, mock.sentinel.rp_name, inv_data, ) mock_erp.assert_called_once_with( self.context, mock.sentinel.rp_uuid, mock.sentinel.rp_name, parent_provider_uuid=None, ) # No custom resource classes to ensure... mock_erc.assert_called_once_with(self.context, set(['VCPU', 'MEMORY_MB', 'DISK_GB'])) mock_upd.assert_called_once_with( self.context, mock.sentinel.rp_uuid, inv_data, ) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_update_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_classes') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') def test_set_inventory_for_provider_no_inv(self, mock_erp, mock_erc, mock_upd): """Tests that passing empty set of inventory records triggers a delete of inventory for the provider. """ inv_data = {} self.client.set_inventory_for_provider( self.context, mock.sentinel.rp_uuid, mock.sentinel.rp_name, inv_data, ) mock_erp.assert_called_once_with( self.context, mock.sentinel.rp_uuid, mock.sentinel.rp_name, parent_provider_uuid=None, ) mock_erc.assert_called_once_with(self.context, set()) mock_upd.assert_called_once_with( self.context, mock.sentinel.rp_uuid, {}) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_update_inventory') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_classes') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') def test_set_inventory_for_provider_with_custom(self, mock_erp, mock_erc, mock_upd): """Tests that inventory records that include a custom resource class are passed to the report client's _update_inventory() method and that the custom resource class is auto-created. """ inv_data = { 'VCPU': { 'total': 24, 'reserved': 0, 'min_unit': 1, 'max_unit': 24, 'step_size': 1, 'allocation_ratio': 1.0, }, 'MEMORY_MB': { 'total': 1024, 'reserved': 0, 'min_unit': 1, 'max_unit': 1024, 'step_size': 1, 'allocation_ratio': 1.0, }, 'DISK_GB': { 'total': 100, 'reserved': 0, 'min_unit': 1, 'max_unit': 100, 'step_size': 1, 'allocation_ratio': 1.0, }, 'CUSTOM_IRON_SILVER': { 'total': 1, 'reserved': 0, 'min_unit': 1, 'max_unit': 1, 'step_size': 1, 'allocation_ratio': 1.0, } } self.client.set_inventory_for_provider( self.context, mock.sentinel.rp_uuid, mock.sentinel.rp_name, inv_data, ) mock_erp.assert_called_once_with( self.context, mock.sentinel.rp_uuid, mock.sentinel.rp_name, parent_provider_uuid=None, ) mock_erc.assert_called_once_with( self.context, set(['VCPU', 'MEMORY_MB', 'DISK_GB', 'CUSTOM_IRON_SILVER'])) mock_upd.assert_called_once_with( self.context, mock.sentinel.rp_uuid, inv_data, ) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_classes', new=mock.Mock()) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' '_ensure_resource_provider') def test_set_inventory_for_provider_with_parent(self, mock_erp): """Ensure parent UUID is sent through.""" self.client.set_inventory_for_provider( self.context, uuids.child, 'junior', {}, parent_provider_uuid=uuids.parent) mock_erp.assert_called_once_with( self.context, uuids.child, 'junior', parent_provider_uuid=uuids.parent) class TestAllocations(SchedulerReportClientTestCase): @mock.patch('nova.compute.utils.is_volume_backed_instance') def test_instance_to_allocations_dict(self, mock_vbi): mock_vbi.return_value = False inst = objects.Instance( uuid=uuids.inst, flavor=objects.Flavor(root_gb=10, swap=1023, ephemeral_gb=100, memory_mb=1024, vcpus=2, extra_specs={})) result = report._instance_to_allocations_dict(inst) expected = { 'MEMORY_MB': 1024, 'VCPU': 2, 'DISK_GB': 111, } self.assertEqual(expected, result) @mock.patch('nova.compute.utils.is_volume_backed_instance') def test_instance_to_allocations_dict_overrides(self, mock_vbi): """Test that resource overrides in an instance's flavor extra_specs are reported to placement. """ mock_vbi.return_value = False specs = { 'resources:CUSTOM_DAN': '123', 'resources:%s' % fields.ResourceClass.VCPU: '4', 'resources:NOTATHING': '456', 'resources:NOTEVENANUMBER': 'catfood', 'resources:': '7', 'resources:ferret:weasel': 'smelly', 'foo': 'bar', } inst = objects.Instance( uuid=uuids.inst, flavor=objects.Flavor(root_gb=10, swap=1023, ephemeral_gb=100, memory_mb=1024, vcpus=2, extra_specs=specs)) result = report._instance_to_allocations_dict(inst) expected = { 'MEMORY_MB': 1024, 'VCPU': 4, 'DISK_GB': 111, 'CUSTOM_DAN': 123, } self.assertEqual(expected, result) @mock.patch('nova.compute.utils.is_volume_backed_instance') def test_instance_to_allocations_dict_boot_from_volume(self, mock_vbi): mock_vbi.return_value = True inst = objects.Instance( uuid=uuids.inst, flavor=objects.Flavor(root_gb=10, swap=1, ephemeral_gb=100, memory_mb=1024, vcpus=2, extra_specs={})) result = report._instance_to_allocations_dict(inst) expected = { 'MEMORY_MB': 1024, 'VCPU': 2, 'DISK_GB': 101, } self.assertEqual(expected, result) @mock.patch('nova.compute.utils.is_volume_backed_instance') def test_instance_to_allocations_dict_zero_disk(self, mock_vbi): mock_vbi.return_value = True inst = objects.Instance( uuid=uuids.inst, flavor=objects.Flavor(root_gb=10, swap=0, ephemeral_gb=0, memory_mb=1024, vcpus=2, extra_specs={})) result = report._instance_to_allocations_dict(inst) expected = { 'MEMORY_MB': 1024, 'VCPU': 2, } self.assertEqual(expected, result) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'get') @mock.patch('nova.scheduler.client.report.' '_instance_to_allocations_dict') def test_update_instance_allocation_new(self, mock_a, mock_get, mock_put): cn = objects.ComputeNode(uuid=uuids.cn) inst = objects.Instance(uuid=uuids.inst, project_id=uuids.project, user_id=uuids.user) mock_get.return_value.json.return_value = {'allocations': {}} expected = { 'allocations': [ {'resource_provider': {'uuid': cn.uuid}, 'resources': mock_a.return_value}], 'project_id': inst.project_id, 'user_id': inst.user_id, } self.client.update_instance_allocation(self.context, cn, inst, 1) mock_put.assert_called_once_with( '/allocations/%s' % inst.uuid, expected, version='1.8', global_request_id=self.context.global_id) self.assertTrue(mock_get.called) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'get') @mock.patch('nova.scheduler.client.report.' '_instance_to_allocations_dict') def test_update_instance_allocation_existing(self, mock_a, mock_get, mock_put): cn = objects.ComputeNode(uuid=uuids.cn) inst = objects.Instance(uuid=uuids.inst) mock_get.return_value.json.return_value = {'allocations': { cn.uuid: { 'generation': 2, 'resources': { 'DISK_GB': 123, 'MEMORY_MB': 456, } }} } mock_a.return_value = { 'DISK_GB': 123, 'MEMORY_MB': 456, } self.client.update_instance_allocation(self.context, cn, inst, 1) self.assertFalse(mock_put.called) mock_get.assert_called_once_with( '/allocations/%s' % inst.uuid, global_request_id=self.context.global_id) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'get') @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'put') @mock.patch('nova.scheduler.client.report.' '_instance_to_allocations_dict') @mock.patch.object(report.LOG, 'warning') def test_update_instance_allocation_new_failed(self, mock_warn, mock_a, mock_put, mock_get): cn = objects.ComputeNode(uuid=uuids.cn) inst = objects.Instance(uuid=uuids.inst, project_id=uuids.project, user_id=uuids.user) mock_put.return_value = fake_requests.FakeResponse(400) self.client.update_instance_allocation(self.context, cn, inst, 1) self.assertTrue(mock_warn.called) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'delete') def test_update_instance_allocation_delete(self, mock_delete): cn = objects.ComputeNode(uuid=uuids.cn) inst = objects.Instance(uuid=uuids.inst) self.client.update_instance_allocation(self.context, cn, inst, -1) mock_delete.assert_called_once_with( '/allocations/%s' % inst.uuid, global_request_id=self.context.global_id) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'delete') @mock.patch.object(report.LOG, 'warning') def test_update_instance_allocation_delete_failed(self, mock_warn, mock_delete): cn = objects.ComputeNode(uuid=uuids.cn) inst = objects.Instance(uuid=uuids.inst) mock_delete.return_value = fake_requests.FakeResponse(400) self.client.update_instance_allocation(self.context, cn, inst, -1) self.assertTrue(mock_warn.called) @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' 'delete') @mock.patch('nova.scheduler.client.report.LOG') def test_delete_allocation_for_instance_ignore_404(self, mock_log, mock_delete): """Tests that we don't log a warning on a 404 response when trying to delete an allocation record. """ mock_delete.return_value = fake_requests.FakeResponse(404) self.client.delete_allocation_for_instance(self.context, uuids.rp_uuid) # make sure we didn't screw up the logic or the mock mock_log.info.assert_not_called() # make sure warning wasn't called for the 404 mock_log.warning.assert_not_called() @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete") @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete_allocation_for_instance") @mock.patch("nova.objects.InstanceList.get_by_host_and_node") def test_delete_resource_provider_cascade(self, mock_by_host, mock_del_alloc, mock_delete): self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) inst1 = objects.Instance(uuid=uuids.inst1) inst2 = objects.Instance(uuid=uuids.inst2) mock_by_host.return_value = objects.InstanceList( objects=[inst1, inst2]) resp_mock = mock.Mock(status_code=204) mock_delete.return_value = resp_mock self.client.delete_resource_provider(self.context, cn, cascade=True) self.assertEqual(2, mock_del_alloc.call_count) exp_url = "/resource_providers/%s" % uuids.cn mock_delete.assert_called_once_with( exp_url, global_request_id=self.context.global_id) self.assertFalse(self.client._provider_tree.exists(uuids.cn)) @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete") @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete_allocation_for_instance") @mock.patch("nova.objects.InstanceList.get_by_host_and_node") def test_delete_resource_provider_no_cascade(self, mock_by_host, mock_del_alloc, mock_delete): self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) self.client._association_refresh_time[uuids.cn] = mock.Mock() cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) inst1 = objects.Instance(uuid=uuids.inst1) inst2 = objects.Instance(uuid=uuids.inst2) mock_by_host.return_value = objects.InstanceList( objects=[inst1, inst2]) resp_mock = mock.Mock(status_code=204) mock_delete.return_value = resp_mock self.client.delete_resource_provider(self.context, cn) mock_del_alloc.assert_not_called() exp_url = "/resource_providers/%s" % uuids.cn mock_delete.assert_called_once_with( exp_url, global_request_id=self.context.global_id) self.assertNotIn(uuids.cn, self.client._association_refresh_time) @mock.patch("nova.scheduler.client.report.SchedulerReportClient." "delete") @mock.patch('nova.scheduler.client.report.LOG') def test_delete_resource_provider_log_calls(self, mock_log, mock_delete): # First, check a successful call self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1) cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host", hypervisor_hostname="fake_hostname", ) resp_mock = fake_requests.FakeResponse(204) mock_delete.return_value = resp_mock self.client.delete_resource_provider(self.context, cn) # With a 204, only the info should be called self.assertEqual(1, mock_log.info.call_count) self.assertEqual(0, mock_log.warning.call_count) # Now check a 404 response mock_log.reset_mock() resp_mock.status_code = 404 self.client.delete_resource_provider(self.context, cn) # With a 404, neither log message should be called self.assertEqual(0, mock_log.info.call_count) self.assertEqual(0, mock_log.warning.call_count) # Finally, check a 409 response mock_log.reset_mock() resp_mock.status_code = 409 self.client.delete_resource_provider(self.context, cn) # With a 409, only the error should be called self.assertEqual(0, mock_log.info.call_count) self.assertEqual(1, mock_log.error.call_count) class TestResourceClass(SchedulerReportClientTestCase): def setUp(self): super(TestResourceClass, self).setUp() _put_patch = mock.patch( "nova.scheduler.client.report.SchedulerReportClient.put") self.addCleanup(_put_patch.stop) self.mock_put = _put_patch.start() def test_ensure_resource_classes(self): rcs = ['VCPU', 'CUSTOM_FOO', 'MEMORY_MB', 'CUSTOM_BAR'] self.client._ensure_resource_classes(self.context, rcs) self.mock_put.assert_has_calls([ mock.call('/resource_classes/%s' % rc, None, version='1.7', global_request_id=self.context.global_id) for rc in ('CUSTOM_FOO', 'CUSTOM_BAR') ], any_order=True) def test_ensure_resource_classes_none(self): for empty in ([], (), set(), {}): self.client._ensure_resource_classes(self.context, empty) self.mock_put.assert_not_called() def test_ensure_resource_classes_put_fail(self): self.mock_put.return_value = fake_requests.FakeResponse(503) rcs = ['VCPU', 'MEMORY_MB', 'CUSTOM_BAD'] self.assertRaises( exception.InvalidResourceClass, self.client._ensure_resource_classes, self.context, rcs) # Only called with the "bad" one self.mock_put.assert_called_once_with( '/resource_classes/CUSTOM_BAD', None, version='1.7', global_request_id=self.context.global_id)
phenoxim/nova
nova/tests/unit/scheduler/client/test_report.py
Python
apache-2.0
143,398
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import str import six import logging import filecmp import os import re import sys import uuid import json import time from nose.plugins.attrib import attr from nose.tools import assert_raises, assert_equals, assert_less import tempfile import shutil from mock import patch import synapseclient import synapseclient.client as client import synapseclient.utils as utils import synapseclient.__main__ as cmdline from synapseclient.evaluation import Evaluation import integration from integration import schedule_for_cleanup, QUERY_TIMEOUT_SEC if six.PY2: from StringIO import StringIO else: from io import StringIO def setup_module(module): module.syn = integration.syn module.project = integration.project module.parser = cmdline.build_parser() #used for --description and --descriptionFile tests module.upload_filename = _create_temp_file_with_cleanup() module.description_text = "'some description text'" module.desc_filename = _create_temp_file_with_cleanup(module.description_text) module.update_description_text = "'SOMEBODY ONCE TOLD ME THE WORLD WAS GONNA ROLL ME I AINT THE SHARPEST TOOL IN THE SHED'" module.other_user = integration.other_user def run(*command, **kwargs): """ Sends the given command list to the command line client. :returns: The STDOUT output of the command. """ old_stdout = sys.stdout capturedSTDOUT = StringIO() syn_client = kwargs.get('syn', syn) stream_handler = logging.StreamHandler(capturedSTDOUT) try: sys.stdout = capturedSTDOUT syn_client.logger.addHandler(stream_handler) sys.argv = [item for item in command] args = parser.parse_args() args.debug = True cmdline.perform_main(args, syn_client) except SystemExit: pass # Prevent the test from quitting prematurely finally: sys.stdout = old_stdout syn_client.logger.handlers.remove(stream_handler) capturedSTDOUT = capturedSTDOUT.getvalue() return capturedSTDOUT def parse(regex, output): """Returns the first match.""" m = re.search(regex, output) if m: if len(m.groups()) > 0: return m.group(1).strip() else: raise Exception('ERROR parsing output: "' + str(output) + '"') def test_command_line_client(): # Create a Project output = run('synapse', '--skip-checks', 'create', '-name', str(uuid.uuid4()), '-description', 'test of command line client', 'Project') project_id = parse(r'Created entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(project_id) # Create a File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'add', '-name', 'BogusFileEntity', '-description', 'Bogus data to test file upload', '-parentid', project_id, filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we stored the file in Synapse f1 = syn.get(file_entity_id) fh = syn._getFileHandle(f1.dataFileHandleId) assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle' # Get File from the command line output = run('synapse', '--skip-checks', 'get', file_entity_id) downloaded_filename = parse(r'Downloaded file:\s+(.*)', output) schedule_for_cleanup(downloaded_filename) assert os.path.exists(downloaded_filename) assert filecmp.cmp(filename, downloaded_filename) # Update the File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'store', '--id', file_entity_id, filename) updated_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)', output) # Get the File again output = run('synapse', '--skip-checks', 'get', file_entity_id) downloaded_filename = parse(r'Downloaded file:\s+(.*)', output) schedule_for_cleanup(downloaded_filename) assert os.path.exists(downloaded_filename) assert filecmp.cmp(filename, downloaded_filename) # Test query output = "" start_time = time.time() while not ('BogusFileEntity' in output and file_entity_id in output): assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC) output = run('synapse', '--skip-checks', 'query', 'select id, name from entity where parentId=="%s"' % project_id) # Move the file to new folder folder = syn.store(synapseclient.Folder(parentId=project_id)) output = run('synapse', 'mv', '--id', file_entity_id, '--parentid', folder.id) downloaded_filename = parse(r'Moved\s+(.*)', output) movedFile = syn.get(file_entity_id, downloadFile=False) assert movedFile.parentId == folder.id # Test Provenance repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' output = run('synapse', '--skip-checks', 'set-provenance', '-id', file_entity_id, '-name', 'TestActivity', '-description', 'A very excellent provenance', '-used', file_entity_id, '-executed', repo_url) activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output) output = run('synapse', '--skip-checks', 'get-provenance', '--id', file_entity_id) activity = json.loads(output) assert activity['name'] == 'TestActivity' assert activity['description'] == 'A very excellent provenance' used = utils._find_used(activity, lambda used: 'reference' in used) assert used['reference']['targetId'] == file_entity_id used = utils._find_used(activity, lambda used: 'url' in used) assert used['url'] == repo_url assert used['wasExecuted'] == True # Note: Tests shouldn't have external dependencies # but this is a pretty picture of Singapore singapore_url = 'http://upload.wikimedia.org/wikipedia/commons/' \ 'thumb/3/3e/1_singapore_city_skyline_dusk_panorama_2011.jpg' \ '/1280px-1_singapore_city_skyline_dusk_panorama_2011.jpg' # Test external file handle output = run('synapse', '--skip-checks', 'add', '-name', 'Singapore', '-description', 'A nice picture of Singapore', '-parentid', project_id, singapore_url) exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we created an external file handle f2 = syn.get(exteral_entity_id) fh = syn._getFileHandle(f2.dataFileHandleId) assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle' output = run('synapse', '--skip-checks', 'get', exteral_entity_id) downloaded_filename = parse(r'Downloaded file:\s+(.*)', output) schedule_for_cleanup(downloaded_filename) assert os.path.exists(downloaded_filename) # Delete the Project output = run('synapse', '--skip-checks', 'delete', project_id) def test_command_line_client_annotations(): # Create a Project output = run('synapse', '--skip-checks', 'create', '-name', str(uuid.uuid4()), '-description', 'test of command line client', 'Project') project_id = parse(r'Created entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(project_id) # Create a File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'add', '-name', 'BogusFileEntity', '-description', 'Bogus data to test file upload', '-parentid', project_id, filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Test setting annotations output = run('synapse', '--skip-checks', 'set-annotations', '--id', file_entity_id, '--annotations', '{"foo": 1, "bar": "1", "baz": [1, 2, 3]}', ) # Test getting annotations # check that the three things set are correct # This test should be adjusted to check for equality of the # whole annotation dictionary once the issue of other # attributes (creationDate, eTag, id, uri) being returned is resolved # See: https://sagebionetworks.jira.com/browse/SYNPY-175 output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id ) annotations = json.loads(output) assert annotations['foo'] == [1] assert annotations['bar'] == [u"1"] assert annotations['baz'] == [1, 2, 3] # Test setting annotations by replacing existing ones. output = run('synapse', '--skip-checks', 'set-annotations', '--id', file_entity_id, '--annotations', '{"foo": 2}', '--replace' ) # Test that the annotation was updated output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id ) annotations = json.loads(output) assert annotations['foo'] == [2] # Since this replaces the existing annotations, previous values # Should not be available. assert_raises(KeyError, lambda key: annotations[key], 'bar') assert_raises(KeyError, lambda key: annotations[key], 'baz') # Test running add command to set annotations on a new object filename2 = utils.make_bogus_data_file() schedule_for_cleanup(filename2) output = run('synapse', '--skip-checks', 'add', '-name', 'BogusData2', '-description', 'Bogus data to test file upload with add and add annotations', '-parentid', project_id, '--annotations', '{"foo": 123}', filename2) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Test that the annotation was updated output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id ) annotations = json.loads(output) assert annotations['foo'] == [123] # Test running store command to set annotations on a new object filename3 = utils.make_bogus_data_file() schedule_for_cleanup(filename3) output = run('synapse', '--skip-checks', 'store', '--name', 'BogusData3', '--description', '\"Bogus data to test file upload with store and add annotations\"', '--parentid', project_id, '--annotations', '{"foo": 456}', filename3) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Test that the annotation was updated output = run('synapse', '--skip-checks', 'get-annotations', '--id', file_entity_id ) annotations = json.loads(output) assert annotations['foo'] == [456] def test_command_line_store_and_submit(): # Create a Project output = run('synapse', '--skip-checks', 'store', '--name', str(uuid.uuid4()), '--description', 'test of store command', '--type', 'Project') project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(project_id) # Create and upload a file filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'store', '--description', 'Bogus data to test file upload', '--parentid', project_id, '--file', filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we stored the file in Synapse f1 = syn.get(file_entity_id) fh = syn._getFileHandle(f1.dataFileHandleId) assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle' # Test that entity is named after the file it contains assert f1.name == os.path.basename(filename) # Create an Evaluation to submit to eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id) eval = syn.store(eval) schedule_for_cleanup(eval) # Submit a bogus file output = run('synapse', '--skip-checks', 'submit', '--evaluation', eval.id, '--name', 'Some random name', '--entity', file_entity_id) submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output) #testing different commmand line options for submitting to an evaluation #. submitting to an evaluation by evaluationID output = run('synapse', '--skip-checks', 'submit', '--evalID', eval.id, '--name', 'Some random name', '--alias', 'My Team', '--entity', file_entity_id) submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output) # Update the file filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', 'store', '--id', file_entity_id, '--file', filename) updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output) schedule_for_cleanup(updated_entity_id) # Submit an updated bogus file and this time by evaluation name output = run('synapse', '--skip-checks', 'submit', '--evaluationName', eval.name, '--entity', file_entity_id) submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output) # Tests shouldn't have external dependencies, but here it's required ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif' # Test external file handle output = run('synapse', '--skip-checks', 'store', '--name', 'Rubber Ducky', '--description', 'I like rubber duckies', '--parentid', project_id, '--file', ducky_url) exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) schedule_for_cleanup(exteral_entity_id) # Verify that we created an external file handle f2 = syn.get(exteral_entity_id) fh = syn._getFileHandle(f2.dataFileHandleId) assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle' #submit an external file to an evaluation and use provenance filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' output = run('synapse', '--skip-checks', 'submit', '--evalID', eval.id, '--file', filename, '--parent', project_id, '--used', exteral_entity_id, '--executed', repo_url ) submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output) # Delete project output = run('synapse', '--skip-checks', 'delete', project_id) def test_command_get_recursive_and_query(): """Tests the 'synapse get -r' and 'synapse get -q' functions""" project_entity = project # Create Folders in Project folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity)) folder_entity2 = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=folder_entity)) # Create and upload two files in sub-Folder uploaded_paths = [] file_entities = [] for i in range(2): f = utils.make_bogus_data_file() uploaded_paths.append(f) schedule_for_cleanup(f) file_entity = synapseclient.File(f, parent=folder_entity2) file_entity = syn.store(file_entity) file_entities.append(file_entity) schedule_for_cleanup(f) #Add a file in the Folder as well f = utils.make_bogus_data_file() uploaded_paths.append(f) schedule_for_cleanup(f) file_entity = synapseclient.File(f, parent=folder_entity) file_entity = syn.store(file_entity) file_entities.append(file_entity) time.sleep(2) # get -r uses syncFromSynapse() which uses getChildren(), which is not immediately consistent, but faster than chunked queries. ### Test recursive get output = run('synapse', '--skip-checks', 'get', '-r', folder_entity.id) #Verify that we downloaded files: new_paths = [os.path.join('.', folder_entity2.name, os.path.basename(f)) for f in uploaded_paths[:-1]] new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1]))) schedule_for_cleanup(folder_entity.name) for downloaded, uploaded in zip(new_paths, uploaded_paths): assert os.path.exists(downloaded) assert filecmp.cmp(downloaded, uploaded) schedule_for_cleanup(downloaded) time.sleep(3) # get -q uses chunkedQuery which are eventually consistent ### Test query get ### Note: We're not querying on annotations because tests can fail if there ### are lots of jobs queued as happens when staging is syncing output = run('synapse', '--skip-checks', 'get', '-q', "select id from file where parentId=='%s'" % folder_entity2.id) #Verify that we downloaded files from folder_entity2 new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]] for downloaded, uploaded in zip(new_paths, uploaded_paths[:-1]): assert os.path.exists(downloaded) assert filecmp.cmp(downloaded, uploaded) schedule_for_cleanup(downloaded) schedule_for_cleanup(new_paths[0]) ### Test query get using a Table with an entity column ### This should be replaced when Table File Views are implemented in the client cols = [] cols.append(synapseclient.Column(name='id', columnType='ENTITYID')) schema1 = syn.store(synapseclient.Schema(name='Foo Table', columns=cols, parent=project_entity)) schedule_for_cleanup(schema1.id) data1 =[[x.id] for x in file_entities] row_reference_set1 = syn.store(synapseclient.RowSet(schema=schema1, rows=[synapseclient.Row(r) for r in data1])) time.sleep(3) # get -q uses chunkedQuery which are eventually consistent ### Test Table/View query get output = run('synapse', '--skip-checks', 'get', '-q', "select id from %s" % schema1.id) #Verify that we downloaded files: new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]] new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1]))) schedule_for_cleanup(folder_entity.name) for downloaded, uploaded in zip(new_paths, uploaded_paths): assert os.path.exists(downloaded) assert filecmp.cmp(downloaded, uploaded) schedule_for_cleanup(downloaded) schedule_for_cleanup(new_paths[0]) def test_command_copy(): """Tests the 'synapse cp' function""" # Create a Project project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4()))) schedule_for_cleanup(project_entity.id) # Create a Folder in Project folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity)) schedule_for_cleanup(folder_entity.id) # Create and upload a file in Folder repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' annots = {'test':['hello_world']} # Create, upload, and set annotations on a file in Folder filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) file_entity = syn.store(synapseclient.File(filename, parent=folder_entity)) externalURL_entity = syn.store(synapseclient.File(repo_url,name='rand',parent=folder_entity,synapseStore=False)) syn.setAnnotations(file_entity,annots) syn.setAnnotations(externalURL_entity,annots) schedule_for_cleanup(file_entity.id) schedule_for_cleanup(externalURL_entity.id) ### Test cp function output = run('synapse', '--skip-checks', 'cp',file_entity.id, '--destinationId',project_entity.id) output_URL = run('synapse', '--skip-checks', 'cp',externalURL_entity.id, '--destinationId',project_entity.id) copied_id = parse(r'Copied syn\d+ to (syn\d+)',output) copied_URL_id = parse(r'Copied syn\d+ to (syn\d+)',output_URL) #Verify that our copied files are identical copied_ent = syn.get(copied_id) copied_URL_ent = syn.get(copied_URL_id,downloadFile=False) schedule_for_cleanup(copied_id) schedule_for_cleanup(copied_URL_id) copied_ent_annot = syn.getAnnotations(copied_id) copied_url_annot = syn.getAnnotations(copied_URL_id) copied_prov = syn.getProvenance(copied_id)['used'][0]['reference']['targetId'] copied_url_prov = syn.getProvenance(copied_URL_id)['used'][0]['reference']['targetId'] #Make sure copied files are the same assert copied_prov == file_entity.id assert copied_ent_annot == annots assert copied_ent.properties.dataFileHandleId == file_entity.properties.dataFileHandleId #Make sure copied URLs are the same assert copied_url_prov == externalURL_entity.id assert copied_url_annot == annots assert copied_URL_ent.externalURL == repo_url assert copied_URL_ent.name == 'rand' assert copied_URL_ent.properties.dataFileHandleId == externalURL_entity.properties.dataFileHandleId #Verify that errors are being thrown when a #file is copied to a folder/project that has a file with the same filename assert_raises(ValueError,run, 'synapse', '--debug', '--skip-checks', 'cp',file_entity.id, '--destinationId',project_entity.id) def test_command_line_using_paths(): # Create a Project project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4()))) schedule_for_cleanup(project_entity.id) # Create a Folder in Project folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity)) # Create and upload a file in Folder filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) file_entity = syn.store(synapseclient.File(filename, parent=folder_entity)) # Verify that we can use show with a filename output = run('synapse', '--skip-checks', 'show', filename) id = parse(r'File: %s\s+\((syn\d+)\)\s+' %os.path.split(filename)[1], output) assert file_entity.id == id # Verify that limitSearch works by making sure we get the file entity # that's inside the folder file_entity2 = syn.store(synapseclient.File(filename, parent=project_entity)) output = run('synapse', '--skip-checks', 'get', '--limitSearch', folder_entity.id, filename) id = parse(r'Associated file: .* with synapse ID (syn\d+)', output) name = parse(r'Associated file: (.*) with synapse ID syn\d+', output) assert_equals(file_entity.id, id) assert utils.equal_paths(name, filename) #Verify that set-provenance works with filepath repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' output = run('synapse', '--skip-checks', 'set-provenance', '-id', file_entity2.id, '-name', 'TestActivity', '-description', 'A very excellent provenance', '-used', filename, '-executed', repo_url, '-limitSearch', folder_entity.id) activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output) output = run('synapse', '--skip-checks', 'get-provenance', '-id', file_entity2.id) activity = json.loads(output) assert activity['name'] == 'TestActivity' assert activity['description'] == 'A very excellent provenance' #Verify that store works with provenance specified with filepath repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient' filename2 = utils.make_bogus_data_file() schedule_for_cleanup(filename2) output = run('synapse', '--skip-checks', 'add', filename2, '-parentid', project_entity.id, '-used', filename, '-executed', '%s %s' %(repo_url, filename)) entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) output = run('synapse', '--skip-checks', 'get-provenance', '-id', entity_id) activity = json.loads(output) a = [a for a in activity['used'] if a['wasExecuted']==False] assert a[0]['reference']['targetId'] in [file_entity.id, file_entity2.id] #Test associate command #I have two files in Synapse filename and filename2 path = tempfile.mkdtemp() schedule_for_cleanup(path) shutil.copy(filename, path) shutil.copy(filename2, path) output = run('synapse', '--skip-checks', 'associate', path, '-r') output = run('synapse', '--skip-checks', 'show', filename) def test_table_query(): """Test command line ability to do table query. """ cols = [] cols.append(synapseclient.Column(name='name', columnType='STRING', maximumSize=1000)) cols.append(synapseclient.Column(name='foo', columnType='STRING', enumValues=['foo', 'bar', 'bat'])) cols.append(synapseclient.Column(name='x', columnType='DOUBLE')) cols.append(synapseclient.Column(name='age', columnType='INTEGER')) cols.append(synapseclient.Column(name='cartoon', columnType='BOOLEAN')) project_entity = project schema1 = syn.store(synapseclient.Schema(name=str(uuid.uuid4()), columns=cols, parent=project_entity)) schedule_for_cleanup(schema1.id) data1 =[['Chris', 'bar', 11.23, 45, False], ['Jen', 'bat', 14.56, 40, False], ['Jane', 'bat', 17.89, 6, False], ['Henry', 'bar', 10.12, 1, False]] row_reference_set1 = syn.store(synapseclient.RowSet(schema=schema1, rows=[synapseclient.Row(r) for r in data1])) # Test query output = run('synapse', '--skip-checks', 'query', 'select * from %s' % schema1.id) output_rows = output.rstrip("\n").split("\n") # Check the length of the output assert len(output_rows) == 5, "got %s rows" % (len(output_rows),) # Check that headers are correct. # Should be column names in schema plus the ROW_ID and ROW_VERSION my_headers_set = output_rows[0].split("\t") expected_headers_set = ["ROW_ID", "ROW_VERSION"] + list(map(lambda x: x.name, cols)) assert my_headers_set == expected_headers_set, "%r != %r" % (my_headers_set, expected_headers_set) def test_login(): if not other_user['username']: raise SkipTest("Skipping test for login command: No [test-authentication] in %s" % client.CONFIG_FILE) alt_syn = synapseclient.Synapse() with patch.object(alt_syn, "login") as mock_login, patch.object(alt_syn, "getUserProfile", return_value={"userName":"test_user","ownerId":"ownerId"}) as mock_get_user_profile: output = run('synapse', '--skip-checks', 'login', '-u', other_user['username'], '-p', other_user['password'], '--rememberMe', syn=alt_syn) mock_login.assert_called_once_with(other_user['username'], other_user['password'], forced=True, rememberMe=True, silent=False) mock_get_user_profile.assert_called_once_with() def test_configPath(): """Test using a user-specified configPath for Synapse configuration file. """ tmp_config_file = tempfile.NamedTemporaryFile(suffix='.synapseConfig', delete=False) shutil.copyfile(synapseclient.client.CONFIG_FILE, tmp_config_file.name) # Create a File filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) output = run('synapse', '--skip-checks', '--configPath', tmp_config_file.name, 'add', '-name', 'BogusFileEntityTwo', '-description', 'Bogus data to test file upload', '-parentid', project.id, filename) file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output) # Verify that we stored the file in Synapse f1 = syn.get(file_entity_id) fh = syn._getFileHandle(f1.dataFileHandleId) assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle' def _description_wiki_check(run_output, expected_description): entity_id = parse(r'Created.* entity:\s+(syn\d+)\s+', run_output) wiki = syn.getWiki(entity_id) assert_equals(expected_description, wiki.markdown) def _create_temp_file_with_cleanup(specific_file_text = None): if specific_file_text: with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as file: file.write(specific_file_text) filename = file.name else: filename = utils.make_bogus_data_file() schedule_for_cleanup(filename) return filename def test_create__with_description(): output = run('synapse', 'create', 'Folder', '-name', str(uuid.uuid4()), '-parentid', project.id, '--description', description_text ) _description_wiki_check(output, description_text) def test_store__with_description(): output = run('synapse', 'store', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--description', description_text ) _description_wiki_check(output, description_text) def test_add__with_description(): output = run('synapse', 'add', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--description', description_text ) _description_wiki_check(output, description_text) def test_create__with_descriptionFile(): output = run('synapse', 'create', 'Folder', '-name', str(uuid.uuid4()), '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) def test_store__with_descriptionFile(): output = run('synapse', 'store', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) def test_add__with_descriptionFile(): output = run('synapse', 'add', upload_filename, '-name', str(uuid.uuid4()), '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) def test_create__update_description(): name = str(uuid.uuid4()) output = run('synapse', 'create', 'Folder', '-name', name, '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) output = run('synapse', 'create', 'Folder', '-name', name, '-parentid', project.id, '--description', update_description_text ) _description_wiki_check(output, update_description_text) def test_store__update_description(): name = str(uuid.uuid4()) output = run('synapse', 'store', upload_filename, '-name', name, '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) output = run('synapse', 'store', upload_filename, '-name', name, '-parentid', project.id, '--description', update_description_text ) _description_wiki_check(output, update_description_text) def test_add__update_description(): name = str(uuid.uuid4()) output = run('synapse', 'add', upload_filename, '-name', name, '-parentid', project.id, '--descriptionFile', desc_filename ) _description_wiki_check(output, description_text) output = run('synapse', 'add', upload_filename, '-name', name, '-parentid', project.id, '--description', update_description_text ) _description_wiki_check(output, update_description_text)
zimingd/synapsePythonClient
tests/integration/test_command_line_client.py
Python
apache-2.0
36,272
import collections import re import sys import time import traceback from functools import partial from multiprocessing import Process, Queue from unittest import skipIf from cassandra import ConsistencyLevel from cassandra.cluster import Cluster from cassandra.query import SimpleStatement # TODO add in requirements.txt from enum import Enum # Remove when switching to py3 from assertions import (assert_all, assert_crc_check_chance_equal, assert_invalid, assert_none, assert_one, assert_unavailable) from dtest import Tester, debug from nose.plugins.attrib import attr from tools import known_failure, new_node, require, since # CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle # pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696 # for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be # written. MIGRATION_WAIT = 5 @since('3.0') class TestMaterializedViews(Tester): """ Test materialized views implementation. @jira_ticket CASSANDRA-6477 @since 3.0 """ def prepare(self, user_table=False, rf=1, options=None, nodes=3): cluster = self.cluster cluster.populate([nodes, 0]) if options: cluster.set_configuration_options(values=options) cluster.start() node1 = cluster.nodelist()[0] session = self.patient_cql_connection(node1) self.create_ks(session, 'ks', rf) if user_table: session.execute( ("CREATE TABLE users (username varchar, password varchar, gender varchar, " "session_token varchar, state varchar, birth_year bigint, " "PRIMARY KEY (username));") ) # create a materialized view session.execute(("CREATE MATERIALIZED VIEW users_by_state AS " "SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL " "PRIMARY KEY (state, username)")) return session def _insert_data(self, session): # insert data insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES " session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);") session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);") session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);") session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);") def _replay_batchlogs(self): debug("Replaying batchlog on all nodes") for node in self.cluster.nodelist(): if node.is_running(): node.nodetool("replaybatchlog") def create_test(self): """Test the materialized view creation""" session = self.prepare(user_table=True) result = list(session.execute(("SELECT * FROM system_schema.views " "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING"))) self.assertEqual(len(result), 1, "Expecting 1 materialized view, got" + str(result)) def test_gcgs_validation(self): """Verify that it's not possible to create or set a too low gc_grace_seconds on MVs""" session = self.prepare(user_table=True) # Shouldn't be able to alter the gc_grace_seconds of the base table to 0 assert_invalid(session, "ALTER TABLE users WITH gc_grace_seconds = 0", "Cannot alter gc_grace_seconds of the base table of a materialized view " "to 0, since this value is used to TTL undelivered updates. Setting " "gc_grace_seconds too low might cause undelivered updates to expire " "before being replayed.") # But can alter the gc_grace_seconds of the bease table to a value != 0 session.execute("ALTER TABLE users WITH gc_grace_seconds = 10") # Shouldn't be able to alter the gc_grace_seconds of the MV to 0 assert_invalid(session, "ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0", "Cannot alter gc_grace_seconds of a materialized view to 0, since " "this value is used to TTL undelivered updates. Setting gc_grace_seconds " "too low might cause undelivered updates to expire before being replayed.") # Now let's drop MV session.execute("DROP MATERIALIZED VIEW ks.users_by_state;") # Now we should be able to set the gc_grace_seconds of the base table to 0 session.execute("ALTER TABLE users WITH gc_grace_seconds = 0") # Now we shouldn't be able to create a new MV on this table assert_invalid(session, "CREATE MATERIALIZED VIEW users_by_state AS " "SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL " "PRIMARY KEY (state, username)", "Cannot create materialized view 'users_by_state' for base table 'users' " "with gc_grace_seconds of 0, since this value is used to TTL undelivered " "updates. Setting gc_grace_seconds too low might cause undelivered updates" " to expire before being replayed.") def insert_test(self): """Test basic insertions""" session = self.prepare(user_table=True) self._insert_data(session) result = list(session.execute("SELECT * FROM users;")) self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result))) result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';")) self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result))) result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';")) self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result))) result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';")) self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result))) def populate_mv_after_insert_test(self): """Test that a view is OK when created with existing data""" session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") for i in xrange(1000): session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i)) session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL " "AND id IS NOT NULL PRIMARY KEY (v, id)")) debug("wait that all batchlogs are replayed") self._replay_batchlogs() for i in xrange(1000): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i]) def crc_check_chance_test(self): """Test that crc_check_chance parameter is properly populated after mv creation and update""" session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL " "AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5")) assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True) session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3") assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True) def prepared_statement_test(self): """Test basic insertions with prepared statement""" session = self.prepare(user_table=True) insertPrepared = session.prepare( "INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);" ) selectPrepared = session.prepare( "SELECT state, password, session_token FROM users_by_state WHERE state=?;" ) # insert data session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968))) session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971))) session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978))) session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974))) result = list(session.execute("SELECT * FROM users;")) self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result))) result = list(session.execute(selectPrepared.bind(['TX']))) self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result))) result = list(session.execute(selectPrepared.bind(['CA']))) self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result))) result = list(session.execute(selectPrepared.bind(['MA']))) self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result))) def immutable_test(self): """Test that a materialized view is immutable""" session = self.prepare(user_table=True) # cannot insert assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');", "Cannot directly modify a materialized view") # cannot update assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';", "Cannot directly modify a materialized view") # cannot delete a row assert_invalid(session, "DELETE from users_by_state where state='TX';", "Cannot directly modify a materialized view") # cannot delete a cell assert_invalid(session, "DELETE session_token from users_by_state where state='TX';", "Cannot directly modify a materialized view") # cannot alter a table assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar", "Cannot use ALTER TABLE on Materialized View") def drop_mv_test(self): """Test that we can drop a view properly""" session = self.prepare(user_table=True) # create another materialized view session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS " "SELECT * FROM users WHERE birth_year IS NOT NULL AND " "username IS NOT NULL PRIMARY KEY (birth_year, username)")) result = list(session.execute(("SELECT * FROM system_schema.views " "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING"))) self.assertEqual(len(result), 2, "Expecting {} materialized view, got {}".format(2, len(result))) session.execute("DROP MATERIALIZED VIEW ks.users_by_state;") result = list(session.execute(("SELECT * FROM system_schema.views " "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING"))) self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result))) def drop_column_test(self): """Test that we cannot drop a column if it is used by a MV""" session = self.prepare(user_table=True) result = list(session.execute(("SELECT * FROM system_schema.views " "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING"))) self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result))) assert_invalid( session, "ALTER TABLE ks.users DROP state;", "Cannot drop column state, depended on by materialized views" ) def drop_table_test(self): """Test that we cannot drop a table without deleting its MVs first""" session = self.prepare(user_table=True) result = list(session.execute(("SELECT * FROM system_schema.views " "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING"))) self.assertEqual( len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)) ) assert_invalid( session, "DROP TABLE ks.users;", "Cannot drop table when materialized views still depend on it" ) result = list(session.execute(("SELECT * FROM system_schema.views " "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING"))) self.assertEqual( len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)) ) session.execute("DROP MATERIALIZED VIEW ks.users_by_state;") session.execute("DROP TABLE ks.users;") result = list(session.execute(("SELECT * FROM system_schema.views " "WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING"))) self.assertEqual( len(result), 0, "Expecting {} materialized view, got {}".format(1, len(result)) ) @known_failure(failure_source='test', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12225', flaky=True) def clustering_column_test(self): """Test that we can use clustering columns as primary key for a materialized view""" session = self.prepare() session.default_consistency_level = ConsistencyLevel.QUORUM session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, " "session_token varchar, state varchar, birth_year bigint, " "PRIMARY KEY (username, state, birth_year));")) # create a materialized view that use a compound key session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year " "AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL " "AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)")) session.cluster.control_connection.wait_for_schema_agreement() self._insert_data(session) result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'")) self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result))) result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968")) self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result))) def _add_dc_after_mv_test(self, rf): """ @jira_ticket CASSANDRA-10978 Add datacenter with configurable replication. """ session = self.prepare(rf=rf) debug("Creating schema") session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) debug("Writing 1k to base") for i in xrange(1000): session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i)) debug("Reading 1k from view") for i in xrange(1000): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) debug("Reading 1k from base") for i in xrange(1000): assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i]) debug("Bootstrapping new node in another dc") node4 = new_node(self.cluster, data_center='dc2') node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)]) debug("Bootstrapping new node in another dc") node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2') node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)]) session2 = self.patient_exclusive_cql_connection(node4) debug("Verifying data from new node in view") for i in xrange(1000): assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i]) debug("Inserting 100 into base") for i in xrange(1000, 1100): session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i)) debug("Verify 100 in view") for i in xrange(1000, 1100): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) @known_failure(failure_source='test', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140', flaky=True) def add_dc_after_mv_simple_replication_test(self): """ @jira_ticket CASSANDRA-10634 Test that materialized views work as expected when adding a datacenter with SimpleStrategy. """ self._add_dc_after_mv_test(1) @known_failure(failure_source='test', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140', flaky=True) def add_dc_after_mv_network_replication_test(self): """ @jira_ticket CASSANDRA-10634 Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy. """ self._add_dc_after_mv_test({'dc1': 1, 'dc2': 1}) @known_failure(failure_source='test', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140', flaky=True) @known_failure(failure_source='test', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12446', flaky=True) def add_node_after_mv_test(self): """ @jira_ticket CASSANDRA-10978 Test that materialized views work as expected when adding a node. """ session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) for i in xrange(1000): session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i)) for i in xrange(1000): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) node4 = new_node(self.cluster) node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)]) session2 = self.patient_exclusive_cql_connection(node4) for i in xrange(1000): assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i]) for i in xrange(1000, 1100): session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i)) for i in xrange(1000, 1100): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) @known_failure(failure_source='test', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140', flaky=True) def add_write_survey_node_after_mv_test(self): """ @jira_ticket CASSANDRA-10621 @jira_ticket CASSANDRA-10978 Test that materialized views work as expected when adding a node in write survey mode. """ session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) for i in xrange(1000): session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i)) for i in xrange(1000): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) node4 = new_node(self.cluster) node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)]) for i in xrange(1000, 1100): session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i)) for i in xrange(1100): assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i]) def allow_filtering_test(self): """Test that allow filtering works as usual for a materialized view""" session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t " "WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)")) for i in xrange(1000): session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i)) for i in xrange(1000): assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0]) rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'")) self.assertEqual(len(rows), 1000, "Expected 1000 rows but got {}".format(len(rows))) assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'") assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1") for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i), [i, i, 'a', 3.0] ) assert_one( session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i), ['a', i, i, 3.0] ) def secondary_index_test(self): """Test that secondary indexes cannot be created on a materialized view""" session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) assert_invalid(session, "CREATE INDEX ON t_by_v (v2)", "Secondary indexes are not supported on materialized views") def ttl_test(self): """ Test that TTL works as expected for a materialized view @expected_result The TTL is propagated properly between tables. """ session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)") session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t " "WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)")) for i in xrange(100): session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i)) for i in xrange(100): assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i]) time.sleep(20) rows = list(session.execute("SELECT * FROM t_by_v2")) self.assertEqual(len(rows), 0, "Expected 0 rows but got {}".format(len(rows))) def query_all_new_column_test(self): """ Test that a materialized view created with a 'SELECT *' works as expected when adding a new column @expected_result The new column is present in the view. """ session = self.prepare(user_table=True) self._insert_data(session) assert_one( session, "SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'", ['TX', 'user1', 1968, 'f', 'ch@ngem3a', None] ) session.execute("ALTER TABLE users ADD first_name varchar;") results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'")) self.assertEqual(len(results), 1) self.assertTrue(hasattr(results[0], 'first_name'), 'Column "first_name" not found') assert_one( session, "SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'", ['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None] ) def query_new_column_test(self): """ Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column @expected_result The new column is not present in the view. """ session = self.prepare(user_table=True) session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT username FROM users " "WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)")) self._insert_data(session) assert_one( session, "SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'", ['TX', 'user1'] ) session.execute("ALTER TABLE users ADD first_name varchar;") results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'")) self.assertEqual(len(results), 1) self.assertFalse(hasattr(results[0], 'first_name'), 'Column "first_name" found in view') assert_one( session, "SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'", ['TX', 'user1'] ) def lwt_test(self): """Test that lightweight transaction behave properly with a materialized view""" session = self.prepare() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) debug("Inserting initial data using IF NOT EXISTS") for i in xrange(1000): session.execute( "INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i) ) self._replay_batchlogs() debug("All rows should have been inserted") for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0] ) debug("Tyring to UpInsert data with a different value using IF NOT EXISTS") for i in xrange(1000): v = i * 2 session.execute( "INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v) ) self._replay_batchlogs() debug("No rows should have changed") for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0] ) debug("Update the 10 first rows with a different value") for i in xrange(1000): v = i + 2000 session.execute( "UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v) ) self._replay_batchlogs() debug("Verify that only the 10 first rows changed.") results = list(session.execute("SELECT * FROM t_by_v;")) self.assertEqual(len(results), 1000) for i in xrange(1000): v = i + 2000 if i < 10 else i assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(v), [v, i, 'a', 3.0] ) debug("Deleting the first 10 rows") for i in xrange(1000): v = i + 2000 session.execute( "DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v) ) self._replay_batchlogs() debug("Verify that only the 10 first rows have been deleted.") results = list(session.execute("SELECT * FROM t_by_v;")) self.assertEqual(len(results), 990) for i in xrange(10, 1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0] ) @known_failure(failure_source='test', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11612', flaky=True, notes='flaps on Windows') def interrupt_build_process_test(self): """Test that an interupted MV build process is resumed as it should""" session = self.prepare(options={'hinted_handoff_enabled': False}) node1, node2, node3 = self.cluster.nodelist() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") debug("Inserting initial data") for i in xrange(10000): session.execute( "INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i) ) debug("Create a MV") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) debug("Stop the cluster. Interrupt the MV build process.") self.cluster.stop() debug("Restart the cluster") self.cluster.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node1) session.execute("USE ks") debug("MV shouldn't be built yet.") assert_none(session, "SELECT * FROM t_by_v WHERE v=10000;") debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.") start = time.time() while True: try: result = list(session.execute("SELECT count(*) FROM t_by_v;")) self.assertNotEqual(result[0].count, 10000) except AssertionError: debug("MV build process is finished") break elapsed = (time.time() - start) / 60 if elapsed > 2: break time.sleep(5) debug("Verify all data") result = list(session.execute("SELECT count(*) FROM t_by_v;")) self.assertEqual(result[0].count, 10000) for i in xrange(10000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0], cl=ConsistencyLevel.ALL ) def view_tombstone_test(self): """ Test that a materialized views properly tombstone @jira_ticket CASSANDRA-10261 @jira_ticket CASSANDRA-10910 """ self.prepare(rf=3, options={'hinted_handoff_enabled': False}) node1, node2, node3 = self.cluster.nodelist() session = self.patient_exclusive_cql_connection(node1) session.max_trace_wait = 120 session.execute('USE ks') session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)")) session.cluster.control_connection.wait_for_schema_agreement() # Set initial values TS=0, verify session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0", consistency_level=ConsistencyLevel.ALL)) self._replay_batchlogs() assert_one( session, "SELECT * FROM t_by_v WHERE v = 1", [1, 1, 'a', 3.0] ) session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1", consistency_level=ConsistencyLevel.ALL)) self._replay_batchlogs() assert_one( session, "SELECT * FROM t_by_v WHERE v = 1", [1, 1, 'b', 3.0] ) # change v's value and TS=3, tombstones v=1 and adds v=0 record session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1", consistency_level=ConsistencyLevel.ALL)) self._replay_batchlogs() assert_none(session, "SELECT * FROM t_by_v WHERE v = 1") debug('Shutdown node2') node2.stop(wait_other_notice=True) session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1", consistency_level=ConsistencyLevel.QUORUM)) self._replay_batchlogs() assert_one( session, "SELECT * FROM t_by_v WHERE v = 1", [1, 1, 'b', 3.0] ) node2.start(wait_other_notice=True, wait_for_binary_proto=True) # We should get a digest mismatch query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL) result = session.execute(query, trace=True) self.check_trace_events(result.get_query_trace(), True) # We should not get a digest mismatch the second time query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL) result = session.execute(query, trace=True) self.check_trace_events(result.get_query_trace(), False) # Verify values one last time assert_one( session, "SELECT * FROM t_by_v WHERE v = 1", [1, 1, 'b', 3.0], cl=ConsistencyLevel.ALL ) def check_trace_events(self, trace, expect_digest): # we should see multiple requests get enqueued prior to index scan # execution happening # Look for messages like: # Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey regex = r"Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey" for event in trace.events: desc = event.description match = re.match(regex, desc) if match: if expect_digest: break else: self.fail("Encountered digest mismatch when we shouldn't") else: if expect_digest: self.fail("Didn't find digest mismatch") def simple_repair_test(self): """ Test that a materialized view are consistent after a simple repair. """ session = self.prepare(rf=3, options={'hinted_handoff_enabled': False}) node1, node2, node3 = self.cluster.nodelist() session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) session.cluster.control_connection.wait_for_schema_agreement() debug('Shutdown node2') node2.stop(wait_other_notice=True) for i in xrange(1000): session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i)) self._replay_batchlogs() debug('Verify the data in the MV with CL=ONE') for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0] ) debug('Verify the data in the MV with CL=ALL. All should be unavailable.') for i in xrange(1000): statement = SimpleStatement( "SELECT * FROM t_by_v WHERE v = {}".format(i), consistency_level=ConsistencyLevel.ALL ) assert_unavailable( session.execute, statement ) debug('Start node2, and repair') node2.start(wait_other_notice=True, wait_for_binary_proto=True) node1.repair() debug('Verify the data in the MV with CL=ONE. All should be available now.') for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0], cl=ConsistencyLevel.ONE ) def base_replica_repair_test(self): """ Test that a materialized view are consistent after the repair of the base replica. """ self.prepare(rf=3) node1, node2, node3 = self.cluster.nodelist() session = self.patient_exclusive_cql_connection(node1) session.execute('USE ks') session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)") session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) session.cluster.control_connection.wait_for_schema_agreement() debug('Write initial data') for i in xrange(1000): session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i)) self._replay_batchlogs() debug('Verify the data in the MV with CL=ALL') for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0], cl=ConsistencyLevel.ALL ) debug('Shutdown node1') node1.stop(wait_other_notice=True) debug('Delete node1 data') node1.clear(clear_all=True) debug('Restarting node1') node1.start(wait_other_notice=True, wait_for_binary_proto=True) debug('Shutdown node2 and node3') node2.stop(wait_other_notice=True) node3.stop(wait_other_notice=True) session = self.patient_exclusive_cql_connection(node1) session.execute('USE ks') debug('Verify that there is no data on node1') for i in xrange(1000): assert_none( session, "SELECT * FROM t_by_v WHERE v = {}".format(i) ) debug('Restarting node2 and node3') node2.start(wait_other_notice=True, wait_for_binary_proto=True) node3.start(wait_other_notice=True, wait_for_binary_proto=True) # Just repair the base replica node1.nodetool("repair ks t") debug('Verify data with cl=ALL') for i in xrange(1000): assert_one( session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0] ) @attr("resource-intensive") def complex_repair_test(self): """ Test that a materialized view are consistent after a more complex repair. """ session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5) node1, node2, node3, node4, node5 = self.cluster.nodelist() # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)" "WITH gc_grace_seconds = 5") session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)")) session.cluster.control_connection.wait_for_schema_agreement() debug('Shutdown node2 and node3') node2.stop() node3.stop(wait_other_notice=True) debug('Write initial data to node1 (will be replicated to node4 and node5)') for i in xrange(1000): session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i)) debug('Verify the data in the MV on node1 with CL=ONE') for i in xrange(1000): assert_one( session, "SELECT * FROM ks.t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0] ) debug('Close connection to node1') session.cluster.shutdown() debug('Shutdown node1, node4 and node5') node1.stop() node4.stop() node5.stop() debug('Start nodes 2 and 3') node2.start() node3.start(wait_other_notice=True, wait_for_binary_proto=True) session2 = self.patient_cql_connection(node2) debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.') for i in xrange(1000): assert_none( session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(i) ) debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5') for i in xrange(1000): # we write i*2 as value, instead of i session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2)) debug('Verify the new data in the MV on node2 with CL=ONE') for i in xrange(1000): v = i * 2 assert_one( session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(v), [v, v, 'a', 3.0] ) debug('Wait for batchlogs to expire from node2 and node3') time.sleep(5) debug('Start remaining nodes') node1.start(wait_other_notice=True, wait_for_binary_proto=True) node4.start(wait_other_notice=True, wait_for_binary_proto=True) node5.start(wait_other_notice=True, wait_for_binary_proto=True) session = self.patient_cql_connection(node1) debug('Read data from MV at QUORUM (old data should be returned)') for i in xrange(1000): assert_one( session, "SELECT * FROM ks.t_by_v WHERE v = {}".format(i), [i, i, 'a', 3.0], cl=ConsistencyLevel.QUORUM ) debug('Run global repair on node1') node1.repair() debug('Read data from MV at quorum (new data should be returned after repair)') for i in xrange(1000): v = i * 2 assert_one( session, "SELECT * FROM ks.t_by_v WHERE v = {}".format(v), [v, v, 'a', 3.0], cl=ConsistencyLevel.QUORUM ) def really_complex_repair_test(self): """ Test that a materialized view are consistent after a more complex repair. """ session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5) node1, node2, node3, node4, node5 = self.cluster.nodelist() # we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))" "WITH gc_grace_seconds = 1") session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t " "WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND " "v2 IS NOT NULL PRIMARY KEY (v2, v, id)")) session.cluster.control_connection.wait_for_schema_agreement() debug('Shutdown node2 and node3') node2.stop(wait_other_notice=True) node3.stop(wait_other_notice=True) session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)") session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)") self._replay_batchlogs() debug('Verify the data in the MV on node1 with CL=ONE') assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]]) session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)") session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)") self._replay_batchlogs() debug('Verify the data in the MV on node1 with CL=ONE') assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]]) session.shutdown() debug('Shutdown node1, node4 and node5') node1.stop() node4.stop() node5.stop() debug('Start nodes 2 and 3') node2.start() node3.start(wait_other_notice=True, wait_for_binary_proto=True) session2 = self.patient_cql_connection(node2) session2.execute('USE ks') debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.') assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'") debug('Write new data in node2 that overlap those in node1') session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)") session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)") self._replay_batchlogs() assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]]) session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)") session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)") self._replay_batchlogs() assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]]) debug("Composite delete of everything") session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1") session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2") self._replay_batchlogs() assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'") assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'") debug('Wait for batchlogs to expire from node2 and node3') time.sleep(5) debug('Start remaining nodes') node1.start(wait_other_notice=True, wait_for_binary_proto=True) node4.start(wait_other_notice=True, wait_for_binary_proto=True) node5.start(wait_other_notice=True, wait_for_binary_proto=True) # at this point the data isn't repaired so we have an inconsistency. # this value should return None assert_all( session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]], cl=ConsistencyLevel.QUORUM ) debug('Run global repair on node1') node1.repair() assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM) def complex_mv_select_statements_test(self): """ Test complex MV select statements @jira_ticket CASSANDRA-9664 """ cluster = self.cluster cluster.populate(3).start() node1 = cluster.nodelist()[0] session = self.patient_cql_connection(node1) session.default_consistency_level = ConsistencyLevel.QUORUM debug("Creating keyspace") session.execute("CREATE KEYSPACE mvtest WITH replication = " "{'class': 'SimpleStrategy', 'replication_factor': '3'}") session.execute('USE mvtest') mv_primary_keys = ["((a, b), c)", "((b, a), c)", "(a, b, c)", "(c, b, a)", "((c, a), b)"] for mv_primary_key in mv_primary_keys: session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))") insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)") update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?") delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?") delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?") session.cluster.control_connection.wait_for_schema_agreement() rows = [(0, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0), (0, 1, 1, 0), (1, 0, 0, 0), (1, 0, 1, 0), (1, 1, -1, 0), (1, 1, 0, 0), (1, 1, 1, 0)] for row in rows: session.execute(insert_stmt, row) debug("Testing MV primary key: {}".format(mv_primary_key)) session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE " "a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key)) time.sleep(3) assert_all( session, "SELECT a, b, c, d FROM mv", [[1, 0, 1, 0], [1, 1, 1, 0]], ignore_order=True, cl=ConsistencyLevel.QUORUM ) # insert new rows that does not match the filter session.execute(insert_stmt, (0, 0, 1, 0)) session.execute(insert_stmt, (1, 1, 0, 0)) assert_all( session, "SELECT a, b, c, d FROM mv", [[1, 0, 1, 0], [1, 1, 1, 0]], ignore_order=True, cl=ConsistencyLevel.QUORUM ) # insert new row that does match the filter session.execute(insert_stmt, (1, 2, 1, 0)) assert_all( session, "SELECT a, b, c, d FROM mv", [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]], ignore_order=True, cl=ConsistencyLevel.QUORUM ) # update rows that does not match the filter session.execute(update_stmt, (1, 1, -1, 0)) session.execute(update_stmt, (0, 1, 1, 0)) assert_all( session, "SELECT a, b, c, d FROM mv", [[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]], ignore_order=True, cl=ConsistencyLevel.QUORUM ) # update a row that does match the filter session.execute(update_stmt, (2, 1, 1, 1)) assert_all( session, "SELECT a, b, c, d FROM mv", [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]], ignore_order=True, cl=ConsistencyLevel.QUORUM ) # delete rows that does not match the filter session.execute(delete_stmt1, (1, 1, -1)) session.execute(delete_stmt1, (2, 0, 1)) session.execute(delete_stmt2, (0,)) assert_all( session, "SELECT a, b, c, d FROM mv", [[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]], ignore_order=True, cl=ConsistencyLevel.QUORUM ) # delete a row that does match the filter session.execute(delete_stmt1, (1, 1, 1)) assert_all( session, "SELECT a, b, c, d FROM mv", [[1, 0, 1, 0], [1, 2, 1, 0]], ignore_order=True, cl=ConsistencyLevel.QUORUM ) # delete a partition that matches the filter session.execute(delete_stmt2, (1,)) assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM) # Cleanup session.execute("DROP MATERIALIZED VIEW mv") session.execute("DROP TABLE test") # For read verification class MutationPresence(Enum): match = 1 extra = 2 missing = 3 excluded = 4 unknown = 5 class MM(object): mp = None def out(self): pass class Match(MM): def __init__(self): self.mp = MutationPresence.match def out(self): return None class Extra(MM): expecting = None value = None row = None def __init__(self, expecting, value, row): self.mp = MutationPresence.extra self.expecting = expecting self.value = value self.row = row def out(self): return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row) class Missing(MM): value = None row = None def __init__(self, value, row): self.mp = MutationPresence.missing self.value = value self.row = row def out(self): return "Missing. At {}".format(self.row) class Excluded(MM): def __init__(self): self.mp = MutationPresence.excluded def out(self): return None class Unknown(MM): def __init__(self): self.mp = MutationPresence.unknown def out(self): return None readConsistency = ConsistencyLevel.QUORUM writeConsistency = ConsistencyLevel.QUORUM SimpleRow = collections.namedtuple('SimpleRow', 'a b c d') def row_generate(i, num_partitions): return SimpleRow(a=i % num_partitions, b=(i % 400) / num_partitions, c=i, d=i) # Create a threaded session and execute queries from a Queue def thread_session(ip, queue, start, end, rows, num_partitions): def execute_query(session, select_gi, i): row = row_generate(i, num_partitions) if (row.a, row.b) in rows: base = rows[(row.a, row.b)] else: base = -1 gi = list(session.execute(select_gi, [row.c, row.a])) if base == i and len(gi) == 1: return Match() elif base != i and len(gi) == 1: return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3])) elif base == i and len(gi) == 0: return Missing(base, i) elif base != i and len(gi) == 0: return Excluded() else: return Unknown() try: cluster = Cluster([ip]) session = cluster.connect() select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?") select_gi.consistency_level = readConsistency for i in range(start, end): ret = execute_query(session, select_gi, i) queue.put_nowait(ret) except Exception as e: print str(e) queue.close() @since('3.0') @skipIf(sys.platform == 'win32', 'Bug in python on Windows: https://bugs.python.org/issue10128') class TestMaterializedViewsConsistency(Tester): def prepare(self, user_table=False): cluster = self.cluster cluster.populate(3).start() node2 = cluster.nodelist()[1] # Keep the status of async requests self.exception_type = collections.Counter() self.num_request_done = 0 self.counts = {} for mp in MutationPresence: self.counts[mp] = 0 self.rows = {} self.update_stats_every = 100 debug("Set to talk to node 2") self.session = self.patient_cql_connection(node2) return self.session def _print_write_status(self, row): output = "\r{}".format(row) for key in self.exception_type.keys(): output = "{} ({}: {})".format(output, key, self.exception_type[key]) sys.stdout.write(output) sys.stdout.flush() def _print_read_status(self, row): if self.counts[MutationPresence.unknown] == 0: sys.stdout.write( "\rOn {}; match: {}; extra: {}; missing: {}".format( row, self.counts[MutationPresence.match], self.counts[MutationPresence.extra], self.counts[MutationPresence.missing]) ) else: sys.stdout.write( "\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format( row, self.counts[MutationPresence.match], self.counts[MutationPresence.extra], self.counts[MutationPresence.missing], self.counts[MutationPresence.unkown]) ) sys.stdout.flush() def _do_row(self, insert_stmt, i, num_partitions): # Error callback for async requests def handle_errors(row, exc): self.num_request_done += 1 try: name = type(exc).__name__ self.exception_type[name] += 1 except Exception as e: print traceback.format_exception_only(type(e), e) # Success callback for async requests def success_callback(row): self.num_request_done += 1 if i % self.update_stats_every == 0: self._print_write_status(i) row = row_generate(i, num_partitions) async = self.session.execute_async(insert_stmt, row) errors = partial(handle_errors, row) async.add_callbacks(success_callback, errors) def _populate_rows(self): statement = SimpleStatement( "SELECT a, b, c FROM mvtest.test1", consistency_level=readConsistency ) data = self.session.execute(statement) for row in data: self.rows[(row.a, row.b)] = row.c @known_failure(failure_source='cassandra', jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11290', flaky=True) @require(11290) def single_partition_consistent_reads_after_write_test(self): """ Tests consistency of multiple writes to a single partition @jira_ticket CASSANDRA-10981 """ self._consistent_reads_after_write_test(1) def multi_partition_consistent_reads_after_write_test(self): """ Tests consistency of multiple writes to a multiple partitions @jira_ticket CASSANDRA-10981 """ self._consistent_reads_after_write_test(20) def _consistent_reads_after_write_test(self, num_partitions): session = self.prepare() node1, node2, node3 = self.cluster.nodelist() # Test config lower = 0 upper = 100000 processes = 4 queues = [None] * processes eachProcess = (upper - lower) / processes debug("Creating schema") session.execute( ("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = " "{'class': 'SimpleStrategy', 'replication_factor': '3'}") ) session.execute( "CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))" ) session.cluster.control_connection.wait_for_schema_agreement() insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)") insert1.consistency_level = writeConsistency debug("Writing data to base table") for i in range(upper / 10): self._do_row(insert1, i, num_partitions) debug("Creating materialized view") session.execute( ('CREATE MATERIALIZED VIEW mvtest.mv1 AS ' 'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND ' 'c IS NOT NULL PRIMARY KEY (c,a,b)') ) session.cluster.control_connection.wait_for_schema_agreement() debug("Writing more data to base table") for i in range(upper / 10, upper): self._do_row(insert1, i, num_partitions) # Wait that all requests are done while self.num_request_done < upper: time.sleep(1) debug("Making sure all batchlogs are replayed on node1") node1.nodetool("replaybatchlog") debug("Making sure all batchlogs are replayed on node2") node2.nodetool("replaybatchlog") debug("Making sure all batchlogs are replayed on node3") node3.nodetool("replaybatchlog") debug("Finished writes, now verifying reads") self._populate_rows() for i in range(processes): start = lower + (eachProcess * i) if i == processes - 1: end = upper else: end = lower + (eachProcess * (i + 1)) q = Queue() node_ip = self.get_ip_from_node(node2) p = Process(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions)) p.start() queues[i] = q for i in range(lower, upper): if i % 100 == 0: self._print_read_status(i) mm = queues[i % processes].get() if not mm.out() is None: sys.stdout.write("\r{}\n" .format(mm.out())) self.counts[mm.mp] += 1 self._print_read_status(upper) sys.stdout.write("\n") sys.stdout.flush()
thobbs/cassandra-dtest
materialized_views_test.py
Python
apache-2.0
61,309
# Copyright 2012 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import eventlet import mock import netaddr from oslo.config import cfg from oslo import messaging from testtools import matchers from neutron.agent.common import config as agent_config from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import dvr from neutron.agent.l3 import dvr_router from neutron.agent.l3 import ha from neutron.agent.l3 import link_local_allocator as lla from neutron.agent.l3 import router_info as l3router from neutron.agent.linux import interface from neutron.agent.linux import ra from neutron.agent.metadata import driver as metadata_driver from neutron.common import config as base_config from neutron.common import constants as l3_constants from neutron.common import exceptions as n_exc from neutron.common import utils as common_utils from neutron.i18n import _LE from neutron.openstack.common import log from neutron.openstack.common import uuidutils from neutron.plugins.common import constants as p_const from neutron.tests import base _uuid = uuidutils.generate_uuid HOSTNAME = 'myhost' FAKE_ID = _uuid() FAKE_ID_2 = _uuid() FIP_PRI = 32768 class FakeDev(object): def __init__(self, name): self.name = name def router_append_interface(router, count=1, ip_version=4, ra_mode=None, addr_mode=None): if ip_version == 4: ip_pool = '35.4.%i.4' cidr_pool = '35.4.%i.0/24' gw_pool = '35.4.%i.1' elif ip_version == 6: ip_pool = 'fd01:%x::6' cidr_pool = 'fd01:%x::/64' gw_pool = 'fd01:%x::1' else: raise ValueError("Invalid ip_version: %s" % ip_version) interfaces = router[l3_constants.INTERFACE_KEY] current = sum( [netaddr.IPNetwork(p['subnet']['cidr']).version == ip_version for p in interfaces]) mac_address = netaddr.EUI('ca:fe:de:ad:be:ef') mac_address.dialect = netaddr.mac_unix for i in range(current, current + count): interfaces.append( {'id': _uuid(), 'network_id': _uuid(), 'admin_state_up': True, 'fixed_ips': [{'ip_address': ip_pool % i, 'subnet_id': _uuid()}], 'mac_address': str(mac_address), 'subnet': {'cidr': cidr_pool % i, 'gateway_ip': gw_pool % i, 'ipv6_ra_mode': ra_mode, 'ipv6_address_mode': addr_mode}}) mac_address.value += 1 def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1, enable_floating_ip=False, enable_ha=False, extra_routes=False): if ip_version == 4: ip_addr = '19.4.4.4' cidr = '19.4.4.0/24' gateway_ip = '19.4.4.1' elif ip_version == 6: ip_addr = 'fd00::4' cidr = 'fd00::/64' gateway_ip = 'fd00::1' else: raise ValueError("Invalid ip_version: %s" % ip_version) router_id = _uuid() ex_gw_port = {'id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ee', 'network_id': _uuid(), 'fixed_ips': [{'ip_address': ip_addr, 'subnet_id': _uuid()}], 'subnet': {'cidr': cidr, 'gateway_ip': gateway_ip}} routes = [] if extra_routes: routes = [{'destination': '8.8.8.0/24', 'nexthop': ip_addr}] router = { 'id': router_id, 'distributed': False, l3_constants.INTERFACE_KEY: [], 'routes': routes, 'gw_port': ex_gw_port} if enable_floating_ip: router[l3_constants.FLOATINGIP_KEY] = [{ 'id': _uuid(), 'port_id': _uuid(), 'floating_ip_address': '19.4.4.2', 'fixed_ip_address': '10.0.0.1'}] router_append_interface(router, count=num_internal_ports, ip_version=ip_version) if enable_ha: router['ha'] = True router['ha_vr_id'] = 1 router[l3_constants.HA_INTERFACE_KEY] = get_ha_interface() if enable_snat is not None: router['enable_snat'] = enable_snat return router def _get_subnet_id(port): return port['fixed_ips'][0]['subnet_id'] #TODO(jschwarz): This is a shared function with both the unit tests # and the functional tests, and should be moved elsewhere (probably # neutron/tests/common/). def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'): return {'admin_state_up': True, 'device_id': _uuid(), 'device_owner': 'network:router_ha_interface', 'fixed_ips': [{'ip_address': ip, 'subnet_id': _uuid()}], 'id': _uuid(), 'mac_address': mac, 'name': u'L3 HA Admin port 0', 'network_id': _uuid(), 'status': u'ACTIVE', 'subnet': {'cidr': '169.254.192.0/18', 'gateway_ip': '169.254.255.254', 'id': _uuid()}, 'tenant_id': '', 'agent_id': _uuid(), 'agent_host': 'aaa', 'priority': 1} class TestBasicRouterOperations(base.BaseTestCase): def setUp(self): super(TestBasicRouterOperations, self).setUp() self.conf = agent_config.setup_conf() self.conf.register_opts(base_config.core_opts) self.conf.register_cli_opts(log.common_cli_opts) self.conf.register_cli_opts(log.logging_cli_opts) self.conf.register_opts(l3_config.OPTS) self.conf.register_opts(ha.OPTS) agent_config.register_interface_driver_opts_helper(self.conf) agent_config.register_use_namespaces_opts_helper(self.conf) agent_config.register_root_helper(self.conf) self.conf.register_opts(interface.OPTS) self.conf.set_override('router_id', 'fake_id') self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.conf.set_override('send_arp_for_ha', 1) self.conf.set_override('state_path', '') self.conf.root_helper = 'sudo' self.device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists') self.device_exists = self.device_exists_p.start() mock.patch('neutron.agent.l3.ha.AgentMixin' '._init_ha_conf_path').start() mock.patch('neutron.agent.linux.keepalived.KeepalivedNotifierMixin' '._get_full_config_file_path').start() self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') self.utils_exec = self.utils_exec_p.start() self.utils_replace_file_p = mock.patch( 'neutron.agent.linux.utils.replace_file') self.utils_replace_file = self.utils_replace_file_p.start() self.external_process_p = mock.patch( 'neutron.agent.linux.external_process.ProcessManager') self.external_process = self.external_process_p.start() self.send_arp_p = mock.patch( 'neutron.agent.linux.ip_lib.send_gratuitous_arp') self.send_arp = self.send_arp_p.start() self.send_arp_proxyarp_p = mock.patch( 'neutron.agent.linux.ip_lib.send_garp_for_proxyarp') self.send_arp_proxyarp = self.send_arp_proxyarp_p.start() self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = self.dvr_cls_p.start() self.mock_driver = mock.MagicMock() self.mock_driver.DEV_NAME_LEN = ( interface.LinuxInterfaceDriver.DEV_NAME_LEN) driver_cls.return_value = self.mock_driver self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper') ip_cls = self.ip_cls_p.start() self.mock_ip = mock.MagicMock() ip_cls.return_value = self.mock_ip ip_rule = mock.patch('neutron.agent.linux.ip_lib.IpRule').start() self.mock_rule = mock.MagicMock() ip_rule.return_value = self.mock_rule ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start() self.mock_ip_dev = mock.MagicMock() ip_dev.return_value = self.mock_ip_dev self.l3pluginApi_cls_p = mock.patch( 'neutron.agent.l3.agent.L3PluginApi') l3pluginApi_cls = self.l3pluginApi_cls_p.start() self.plugin_api = mock.MagicMock() l3pluginApi_cls.return_value = self.plugin_api self.looping_call_p = mock.patch( 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') self.looping_call_p.start() self.snat_ports = [{'subnet': {'cidr': '152.2.0.0/16', 'gateway_ip': '152.2.0.1', 'id': _uuid()}, 'network_id': _uuid(), 'device_owner': 'network:router_centralized_snat', 'ip_cidr': '152.2.0.13/16', 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': _uuid(), 'ip_address': '152.2.0.13'}], 'id': _uuid(), 'device_id': _uuid()}, {'subnet': {'cidr': '152.10.0.0/16', 'gateway_ip': '152.10.0.1', 'id': _uuid()}, 'network_id': _uuid(), 'device_owner': 'network:router_centralized_snat', 'ip_cidr': '152.10.0.13/16', 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': _uuid(), 'ip_address': '152.10.0.13'}], 'id': _uuid(), 'device_id': _uuid()}] def _prepare_internal_network_data(self): port_id = _uuid() router_id = _uuid() network_id = _uuid() router = prepare_router_data(num_internal_ports=2) router_id = router['id'] ri = l3router.RouterInfo(router_id, self.conf.root_helper, router=router) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) cidr = '99.0.1.9/24' mac = 'ca:fe:de:ad:be:ef' port = {'network_id': network_id, 'id': port_id, 'ip_cidr': cidr, 'mac_address': mac} return agent, ri, port def test_periodic_sync_routers_task_raise_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_routers.side_effect = ValueError() with mock.patch.object(agent, '_cleanup_namespaces') as f: self.assertRaises(ValueError, agent.periodic_sync_routers_task, agent.context) self.assertTrue(agent.fullsync) self.assertFalse(f.called) def test_l3_initial_full_sync_done(self): with mock.patch.object(l3_agent.L3NATAgent, 'periodic_sync_routers_task') as router_sync: with mock.patch.object(eventlet, 'spawn_n'): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.after_start() router_sync.assert_called_once_with(agent.context) def test_periodic_sync_routers_task_call_clean_stale_namespaces(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_routers.return_value = [] with mock.patch.object(agent, '_cleanup_namespaces') as f: agent.periodic_sync_routers_task(agent.context) self.assertTrue(f.called) def test_router_info_create(self): id = _uuid() ns = "ns-" + id ri = l3router.RouterInfo(id, self.conf.root_helper, {}, ns_name=ns) self.assertTrue(ri.ns_name.endswith(id)) def test_router_info_create_with_router(self): id = _uuid() ex_gw_port = {'id': _uuid(), 'network_id': _uuid(), 'fixed_ips': [{'ip_address': '19.4.4.4', 'subnet_id': _uuid()}], 'subnet': {'cidr': '19.4.4.0/24', 'gateway_ip': '19.4.4.1'}} router = { 'id': _uuid(), 'enable_snat': True, 'routes': [], 'gw_port': ex_gw_port} ns = "ns-" + id ri = l3router.RouterInfo(id, self.conf.root_helper, router, ns_name=ns) self.assertTrue(ri.ns_name.endswith(id)) self.assertEqual(ri.router, router) def test_agent_create(self): l3_agent.L3NATAgent(HOSTNAME, self.conf) def _test_internal_network_action(self, action): agent, ri, port = self._prepare_internal_network_data() interface_name = agent.get_internal_device_name(port['id']) if action == 'add': self.device_exists.return_value = False agent.internal_network_added(ri, port) self.assertEqual(self.mock_driver.plug.call_count, 1) self.assertEqual(self.mock_driver.init_l3.call_count, 1) self.send_arp.assert_called_once_with(ri.ns_name, interface_name, '99.0.1.9', mock.ANY, mock.ANY) elif action == 'remove': self.device_exists.return_value = True agent.internal_network_removed(ri, port) self.assertEqual(self.mock_driver.unplug.call_count, 1) else: raise Exception("Invalid action %s" % action) def _test_internal_network_action_dist(self, action): agent, ri, port = self._prepare_internal_network_data() ri.router['distributed'] = True ri.router['gw_port_host'] = HOSTNAME agent.host = HOSTNAME agent.conf.agent_mode = 'dvr_snat' sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.31/24'} if action == 'add': self.device_exists.return_value = False agent._map_internal_interfaces = mock.Mock(return_value=sn_port) agent._snat_redirect_add = mock.Mock() agent._set_subnet_info = mock.Mock() agent._internal_network_added = mock.Mock() agent.internal_network_added(ri, port) self.assertEqual(agent._snat_redirect_add.call_count, 1) self.assertEqual(agent._set_subnet_info.call_count, 1) self.assertEqual(agent._internal_network_added.call_count, 2) agent._internal_network_added.assert_called_with( agent.get_snat_ns_name(ri.router['id']), sn_port['network_id'], sn_port['id'], sn_port['ip_cidr'], sn_port['mac_address'], agent.get_snat_int_device_name(sn_port['id']), dvr.SNAT_INT_DEV_PREFIX) def test_agent_add_internal_network(self): self._test_internal_network_action('add') def test_agent_add_internal_network_dist(self): self._test_internal_network_action_dist('add') def test_agent_remove_internal_network(self): self._test_internal_network_action('remove') def _test_external_gateway_action(self, action, router): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router, ns_name=agent.get_ns_name(router['id'])) # Special setup for dvr routers if router.get('distributed'): agent.conf.agent_mode = 'dvr_snat' agent.host = HOSTNAME agent._create_dvr_gateway = mock.Mock() agent.get_snat_interfaces = mock.Mock(return_value=self.snat_ports) ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} interface_name = agent.get_external_device_name(ex_gw_port['id']) if action == 'add': self.device_exists.return_value = False fake_fip = {'floatingips': [{'id': _uuid(), 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] agent.external_gateway_added(ri, ex_gw_port, interface_name) if not router.get('distributed'): self.assertEqual(self.mock_driver.plug.call_count, 1) self.assertEqual(self.mock_driver.init_l3.call_count, 1) self.send_arp.assert_called_once_with(ri.ns_name, interface_name, '20.0.0.30', mock.ANY, mock.ANY) kwargs = {'preserve_ips': ['192.168.1.34/32'], 'namespace': 'qrouter-' + router['id'], 'gateway': '20.0.0.1', 'extra_subnets': [{'cidr': '172.16.0.0/24'}]} self.mock_driver.init_l3.assert_called_with(interface_name, ['20.0.0.30/24'], **kwargs) else: agent._create_dvr_gateway.assert_called_once_with( ri, ex_gw_port, interface_name, self.snat_ports) elif action == 'remove': self.device_exists.return_value = True agent.external_gateway_removed(ri, ex_gw_port, interface_name) self.assertEqual(self.mock_driver.unplug.call_count, 1) else: raise Exception("Invalid action %s" % action) def _prepare_ext_gw_test(self, agent): ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} interface_name = agent.get_external_device_name(ex_gw_port['id']) self.device_exists.return_value = True return interface_name, ex_gw_port def test_external_gateway_updated(self): router = prepare_router_data(num_internal_ports=2) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router, ns_name=agent.get_ns_name(router['id'])) interface_name, ex_gw_port = self._prepare_ext_gw_test(agent) fake_fip = {'floatingips': [{'id': _uuid(), 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] agent.external_gateway_updated(ri, ex_gw_port, interface_name) self.assertEqual(self.mock_driver.plug.call_count, 0) self.assertEqual(self.mock_driver.init_l3.call_count, 1) self.send_arp.assert_called_once_with(ri.ns_name, interface_name, '20.0.0.30', mock.ANY, mock.ANY) kwargs = {'preserve_ips': ['192.168.1.34/32'], 'namespace': 'qrouter-' + router['id'], 'gateway': '20.0.0.1', 'extra_subnets': [{'cidr': '172.16.0.0/24'}]} self.mock_driver.init_l3.assert_called_with(interface_name, ['20.0.0.30/24'], **kwargs) def _test_ext_gw_updated_dvr_agent_mode(self, host, agent_mode, expected_call_count): router = prepare_router_data(num_internal_ports=2) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) interface_name, ex_gw_port = self._prepare_ext_gw_test(agent) agent._external_gateway_added = mock.Mock() # test agent mode = dvr (compute node) router['distributed'] = True router['gw_port_host'] = host agent.conf.agent_mode = agent_mode agent.external_gateway_updated(ri, ex_gw_port, interface_name) # no gateway should be added on dvr node self.assertEqual(expected_call_count, agent._external_gateway_added.call_count) def test_ext_gw_updated_dvr_agent_mode(self): # no gateway should be added on dvr node self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0) def test_ext_gw_updated_dvr_snat_agent_mode_no_host(self): # no gateway should be added on dvr_snat node without host match self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr_snat', 0) def test_ext_gw_updated_dvr_snat_agent_mode_host(self): # gateway should be added on dvr_snat node self._test_ext_gw_updated_dvr_agent_mode(self.conf.host, 'dvr_snat', 1) def test_agent_add_external_gateway(self): router = prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('add', router) def test_agent_add_external_gateway_dist(self): router = prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('add', router) def test_agent_remove_external_gateway(self): router = prepare_router_data(num_internal_ports=2) self._test_external_gateway_action('remove', router) def test_agent_remove_external_gateway_dist(self): router = prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME self._test_external_gateway_action('remove', router) def _check_agent_method_called(self, agent, calls, namespace): self.mock_ip.netns.execute.assert_has_calls( [mock.call(call, check_exit_code=False) for call in calls], any_order=True) def _test_routing_table_update(self, namespace): if not namespace: self.conf.set_override('use_namespaces', False) router_id = _uuid() ri = l3router.RouterInfo(router_id, self.conf.root_helper, {}) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) fake_route1 = {'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'} fake_route2 = {'destination': '135.207.111.111/32', 'nexthop': '1.2.3.4'} agent._update_routing_table(ri, 'replace', fake_route1) expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(agent, expected, namespace) agent._update_routing_table(ri, 'delete', fake_route1) expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16', 'via', '1.2.3.4']] self._check_agent_method_called(agent, expected, namespace) agent._update_routing_table(ri, 'replace', fake_route2) expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(agent, expected, namespace) agent._update_routing_table(ri, 'delete', fake_route2) expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32', 'via', '1.2.3.4']] self._check_agent_method_called(agent, expected, namespace) def test_agent_routing_table_updated(self): self._test_routing_table_update(namespace=True) def test_agent_routing_table_updated_no_namespace(self): self._test_routing_table_update(namespace=False) def test_routes_updated(self): self._test_routes_updated(namespace=True) def test_routes_updated_no_namespace(self): self._test_routes_updated(namespace=False) def _test_routes_updated(self, namespace=True): if not namespace: self.conf.set_override('use_namespaces', False) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() ri = l3router.RouterInfo(router_id, self.conf.root_helper, {}) ri.router = {} fake_old_routes = [] fake_new_routes = [{'destination': "110.100.31.0/24", 'nexthop': "10.100.10.30"}, {'destination': "110.100.30.0/24", 'nexthop': "10.100.10.30"}] ri.routes = fake_old_routes ri.router['routes'] = fake_new_routes agent.routes_updated(ri) expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24', 'via', '10.100.10.30'], ['ip', 'route', 'replace', 'to', '110.100.31.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(agent, expected, namespace) fake_new_routes = [{'destination': "110.100.30.0/24", 'nexthop': "10.100.10.30"}] ri.router['routes'] = fake_new_routes agent.routes_updated(ri) expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(agent, expected, namespace) fake_new_routes = [] ri.router['routes'] = fake_new_routes agent.routes_updated(ri) expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24', 'via', '10.100.10.30']] self._check_agent_method_called(agent, expected, namespace) def _verify_snat_rules(self, rules, router, negate=False): interfaces = router[l3_constants.INTERFACE_KEY] source_cidrs = [] for iface in interfaces: prefix = iface['subnet']['cidr'].split('/')[1] source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'], prefix) source_cidrs.append(source_cidr) source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address'] interface_name = ('qg-%s' % router['gw_port']['id'])[:14] expected_rules = [ '! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' % (interface_name, interface_name), '-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip)] for r in rules: if negate: self.assertNotIn(r.rule, expected_rules) else: self.assertIn(r.rule, expected_rules) def test__get_snat_idx_ipv4(self): ip_cidr = '101.12.13.00/24' agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) snat_idx = agent._get_snat_idx(ip_cidr) # 0x650C0D00 is numerical value of 101.12.13.00 self.assertEqual(0x650C0D00, snat_idx) def test__get_snat_idx_ipv6(self): ip_cidr = '2620:0:a03:e100::/64' agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) snat_idx = agent._get_snat_idx(ip_cidr) # 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr self.assertEqual(0x3D345705, snat_idx) def test__get_snat_idx_ipv6_below_32768(self): ip_cidr = 'd488::/30' # crc32 of this ip_cidr is 0x1BD7 agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) snat_idx = agent._get_snat_idx(ip_cidr) # 0x1BD7 + 0x3FFFFFFF = 0x40001BD6 self.assertEqual(0x40001BD6, snat_idx) def test__map_internal_interfaces(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(num_internal_ports=4) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) test_port = { 'mac_address': '00:12:23:34:45:56', 'fixed_ips': [{'subnet_id': _get_subnet_id( router[l3_constants.INTERFACE_KEY][0]), 'ip_address': '101.12.13.14'}]} internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) # test valid case res_port = agent._map_internal_interfaces(ri, internal_ports[0], [test_port]) self.assertEqual(test_port, res_port) # test invalid case test_port['fixed_ips'][0]['subnet_id'] = 1234 res_ip = agent._map_internal_interfaces(ri, internal_ports[0], [test_port]) self.assertNotEqual(test_port, res_ip) self.assertIsNone(res_ip) def test_get_internal_port(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(num_internal_ports=4) subnet_ids = [_get_subnet_id(port) for port in router[l3_constants.INTERFACE_KEY]] ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) # Test Basic cases port = agent.get_internal_port(ri, subnet_ids[0]) fips = port.get('fixed_ips', []) subnet_id = fips[0]['subnet_id'] self.assertEqual(subnet_ids[0], subnet_id) port = agent.get_internal_port(ri, subnet_ids[1]) fips = port.get('fixed_ips', []) subnet_id = fips[0]['subnet_id'] self.assertEqual(subnet_ids[1], subnet_id) port = agent.get_internal_port(ri, subnet_ids[3]) fips = port.get('fixed_ips', []) subnet_id = fips[0]['subnet_id'] self.assertEqual(subnet_ids[3], subnet_id) # Test miss cases no_port = agent.get_internal_port(ri, FAKE_ID) self.assertIsNone(no_port) port = agent.get_internal_port(ri, subnet_ids[0]) fips = port.get('fixed_ips', []) subnet_id = fips[0]['subnet_id'] self.assertNotEqual(subnet_ids[3], subnet_id) def test__set_subnet_arp_info(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(num_internal_ports=2) router['distributed'] = True ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) ports = ri.router.get(l3_constants.INTERFACE_KEY, []) test_ports = [{'mac_address': '00:11:22:33:44:55', 'device_owner': 'network:dhcp', 'subnet_id': _get_subnet_id(ports[0]), 'fixed_ips': [{'ip_address': '1.2.3.4'}]}] self.plugin_api.get_ports_by_subnet.return_value = test_ports # Test basic case ports[0]['subnet']['id'] = _get_subnet_id(ports[0]) agent._set_subnet_arp_info(ri, ports[0]) self.mock_ip_dev.neigh.add.assert_called_once_with( 4, '1.2.3.4', '00:11:22:33:44:55') # Test negative case router['distributed'] = False agent._set_subnet_arp_info(ri, ports[0]) self.mock_ip_dev.neigh.add.never_called() def test_add_arp_entry(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(num_internal_ports=2) subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent._router_added(router['id'], router) agent.add_arp_entry(None, payload) agent.router_deleted(None, router['id']) self.mock_ip_dev.neigh.add.assert_called_once_with( 4, '1.7.23.11', '00:11:22:33:44:55') def test_add_arp_entry_no_routerinfo(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(num_internal_ports=2) subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent._update_arp_entry = mock.Mock() agent.add_arp_entry(None, payload) self.assertFalse(agent._update_arp_entry.called) def test__update_arp_entry_with_no_subnet(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo( 'foo_router_id', mock.ANY, {'distributed': True, 'gw_port_host': HOSTNAME}) with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as f: agent._update_arp_entry(ri, mock.ANY, mock.ANY, 'foo_subnet_id', 'add') self.assertFalse(f.call_count) def test_del_arp_entry(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(num_internal_ports=2) subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.5.25.15', 'mac_address': '00:44:33:22:11:55', 'subnet_id': subnet_id} payload = {'arp_table': arp_table, 'router_id': router['id']} agent._router_added(router['id'], router) # first add the entry agent.add_arp_entry(None, payload) # now delete it agent.del_arp_entry(None, payload) self.mock_ip_dev.neigh.delete.assert_called_once_with( 4, '1.5.25.15', '00:44:33:22:11:55') agent.router_deleted(None, router['id']) @mock.patch('neutron.agent.linux.ip_lib.IPDevice') def _test_scan_fip_ports(self, ri, ip_list, IPDevice): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.device_exists.return_value = True IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = ip_list agent.scan_fip_ports(ri) def test_scan_fip_ports_restart_fips(self): router = prepare_router_data() ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) ri.router['distributed'] = True ip_list = [{'cidr': '111.2.3.4/32'}, {'cidr': '111.2.3.5/32'}] self._test_scan_fip_ports(ri, ip_list) self.assertEqual(ri.dist_fip_count, 2) def test_scan_fip_ports_restart_none(self): router = prepare_router_data() ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) ri.router['distributed'] = True ip_list = [] self._test_scan_fip_ports(ri, ip_list) self.assertEqual(ri.dist_fip_count, 0) def test_scan_fip_ports_restart_zero(self): router = prepare_router_data() ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) ri.router['distributed'] = True ri.dist_fip_count = 0 ip_list = None self._test_scan_fip_ports(ri, ip_list) self.assertEqual(ri.dist_fip_count, 0) def test_process_cent_router(self): router = prepare_router_data() ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) self._test_process_router(ri) def test_process_dist_router(self): router = prepare_router_data() ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0]) ri.router['distributed'] = True ri.router['_snat_router_interfaces'] = [{ 'fixed_ips': [{'subnet_id': subnet_id, 'ip_address': '1.2.3.4'}]}] ri.router['gw_port_host'] = None self._test_process_router(ri) def _test_process_router(self, ri): router = ri.router agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.host = HOSTNAME fake_fip_id = 'fake_fip_id' agent.create_dvr_fip_interfaces = mock.Mock() agent.process_router_floating_ip_addresses = mock.Mock() agent.process_router_floating_ip_nat_rules = mock.Mock() agent.process_router_floating_ip_addresses.return_value = { fake_fip_id: 'ACTIVE'} agent.external_gateway_added = mock.Mock() agent.external_gateway_updated = mock.Mock() fake_floatingips1 = {'floatingips': [ {'id': fake_fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': _uuid(), 'host': HOSTNAME}]} agent.process_router(ri) ex_gw_port = agent._get_ex_gw_port(ri) agent.process_router_floating_ip_addresses.assert_called_with( ri, ex_gw_port) agent.process_router_floating_ip_addresses.reset_mock() agent.process_router_floating_ip_nat_rules.assert_called_with(ri) agent.process_router_floating_ip_nat_rules.reset_mock() agent.external_gateway_added.reset_mock() # remap floating IP to a new fixed ip fake_floatingips2 = copy.deepcopy(fake_floatingips1) fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8' router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips'] agent.process_router(ri) ex_gw_port = agent._get_ex_gw_port(ri) agent.process_router_floating_ip_addresses.assert_called_with( ri, ex_gw_port) agent.process_router_floating_ip_addresses.reset_mock() agent.process_router_floating_ip_nat_rules.assert_called_with(ri) agent.process_router_floating_ip_nat_rules.reset_mock() self.assertEqual(agent.external_gateway_added.call_count, 0) self.assertEqual(agent.external_gateway_updated.call_count, 0) agent.external_gateway_added.reset_mock() agent.external_gateway_updated.reset_mock() # change the ex_gw_port a bit to test gateway update new_gw_port = copy.deepcopy(ri.router['gw_port']) ri.router['gw_port'] = new_gw_port old_ip = (netaddr.IPAddress(ri.router['gw_port'] ['fixed_ips'][0]['ip_address'])) ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1) agent.process_router(ri) ex_gw_port = agent._get_ex_gw_port(ri) agent.process_router_floating_ip_addresses.reset_mock() agent.process_router_floating_ip_nat_rules.reset_mock() self.assertEqual(agent.external_gateway_added.call_count, 0) self.assertEqual(agent.external_gateway_updated.call_count, 1) # remove just the floating ips del router[l3_constants.FLOATINGIP_KEY] agent.process_router(ri) ex_gw_port = agent._get_ex_gw_port(ri) agent.process_router_floating_ip_addresses.assert_called_with( ri, ex_gw_port) agent.process_router_floating_ip_addresses.reset_mock() agent.process_router_floating_ip_nat_rules.assert_called_with(ri) agent.process_router_floating_ip_nat_rules.reset_mock() # now no ports so state is torn down del router[l3_constants.INTERFACE_KEY] del router['gw_port'] agent.process_router(ri) self.assertEqual(self.send_arp.call_count, 1) distributed = ri.router.get('distributed', False) self.assertEqual(agent.process_router_floating_ip_addresses.called, distributed) self.assertEqual(agent.process_router_floating_ip_nat_rules.called, distributed) @mock.patch('neutron.agent.linux.ip_lib.IPDevice') def _test_process_router_floating_ip_addresses_add(self, ri, agent, IPDevice): floating_ips = agent.get_floating_ips(ri) fip_id = floating_ips[0]['id'] IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [] ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ex_gw_port = {'id': _uuid()} with mock.patch.object(lla.LinkLocalAllocator, '_write'): if ri.router['distributed']: agent.create_dvr_fip_interfaces(ri, ex_gw_port) fip_statuses = agent.process_router_floating_ip_addresses( ri, ex_gw_port) self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) device.addr.add.assert_called_once_with(4, '15.1.2.3/32', '15.1.2.3') def test_process_router_floating_ip_nat_rules_add(self): fip = { 'id': _uuid(), 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1' } ri = mock.MagicMock() ri.router['distributed'].__nonzero__ = lambda self: False agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.get_floating_ips = mock.Mock(return_value=[fip]) agent.process_router_floating_ip_nat_rules(ri) nat = ri.iptables_manager.ipv4['nat'] nat.clear_rules_by_tag.assert_called_once_with('floating_ip') rules = agent.floating_forward_rules('15.1.2.3', '192.168.0.1') for chain, rule in rules: nat.add_rule.assert_any_call(chain, rule, tag='floating_ip') def test_process_router_cent_floating_ip_add(self): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} router = prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self._test_process_router_floating_ip_addresses_add(ri, agent) def test_process_router_dist_floating_ip_add(self): fake_floatingips = {'floatingips': [ {'id': _uuid(), 'host': HOSTNAME, 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid()}, {'id': _uuid(), 'host': 'some-other-host', 'floating_ip_address': '15.1.2.4', 'fixed_ip_address': '192.168.0.10', 'floating_network_id': _uuid(), 'port_id': _uuid()}]} router = prepare_router_data(enable_snat=True) router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router['distributed'] = True ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ri.dist_fip_count = 0 agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.host = HOSTNAME agent.agent_gateway_port = ( {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} ) self._test_process_router_floating_ip_addresses_add(ri, agent) def test_get_router_cidrs_returns_cidrs(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = mock.MagicMock() ri.is_ha = False addresses = ['15.1.2.2/24', '15.1.2.3/32'] device = mock.MagicMock() device.addr.list.return_value = [{'cidr': addresses[0]}, {'cidr': addresses[1]}] self.assertEqual(set(addresses), agent._get_router_cidrs(ri, device)) def test_get_router_cidrs_returns_ha_cidrs(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = mock.MagicMock() ri.is_ha = True device = mock.MagicMock() device.name.return_value = 'eth2' addresses = ['15.1.2.2/24', '15.1.2.3/32'] agent._ha_get_existing_cidrs = mock.MagicMock() agent._ha_get_existing_cidrs.return_value = addresses self.assertEqual(set(addresses), agent._get_router_cidrs(ri, device)) # TODO(mrsmith): refactor for DVR cases @mock.patch('neutron.agent.linux.ip_lib.IPDevice') def test_process_router_floating_ip_addresses_remove(self, IPDevice): IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] ri = mock.MagicMock() ri.router.get.return_value = [] type(ri).is_ha = mock.PropertyMock(return_value=False) ri.router['distributed'].__nonzero__ = lambda self: False agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) fip_statuses = agent.process_router_floating_ip_addresses( ri, {'id': _uuid()}) self.assertEqual({}, fip_statuses) device.addr.delete.assert_called_once_with(4, '15.1.2.3/32') self.mock_driver.delete_conntrack_state.assert_called_once_with( root_helper=self.conf.root_helper, namespace=ri.ns_name, ip='15.1.2.3/32') def test_process_router_floating_ip_nat_rules_remove(self): ri = mock.MagicMock() ri.router.get.return_value = [] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.process_router_floating_ip_nat_rules(ri) nat = ri.iptables_manager.ipv4['nat'] nat = ri.iptables_manager.ipv4['nat`'] nat.clear_rules_by_tag.assert_called_once_with('floating_ip') @mock.patch('neutron.agent.linux.ip_lib.IPDevice') def test_process_router_floating_ip_addresses_remap(self, IPDevice): fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2' } IPDevice.return_value = device = mock.Mock() device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] ri = mock.MagicMock() ri.router['distributed'].__nonzero__ = lambda self: False type(ri).is_ha = mock.PropertyMock(return_value=False) ri.router.get.return_value = [fip] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) fip_statuses = agent.process_router_floating_ip_addresses( ri, {'id': _uuid()}) self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) self.assertFalse(device.addr.add.called) self.assertFalse(device.addr.delete.called) @mock.patch('neutron.agent.linux.ip_lib.IPDevice') def test_process_router_with_disabled_floating_ip(self, IPDevice): fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2' } ri = mock.MagicMock() ri.floating_ips = [fip] ri.router.get.return_value = [] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) fip_statuses = agent.process_router_floating_ip_addresses( ri, {'id': _uuid()}) self.assertIsNone(fip_statuses.get(fip_id)) @mock.patch('neutron.agent.linux.ip_lib.IPDevice') def test_process_router_floating_ip_with_device_add_error(self, IPDevice): IPDevice.return_value = device = mock.Mock() device.addr.add.side_effect = RuntimeError() device.addr.list.return_value = [] fip_id = _uuid() fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2' } ri = mock.MagicMock() type(ri).is_ha = mock.PropertyMock(return_value=False) ri.router.get.return_value = [fip] ri.router['distributed'].__nonzero__ = lambda self: False agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) fip_statuses = agent.process_router_floating_ip_addresses( ri, {'id': _uuid()}) self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}, fip_statuses) def test_process_router_snat_disabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(enable_snat=True) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() # Process with NAT agent.process_router(ri) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Reprocess without NAT router['enable_snat'] = False # Reassign the router object to RouterInfo ri.router = router agent.process_router(ri) # For some reason set logic does not work well with # IpTablesRule instances nat_rules_delta = [r for r in orig_nat_rules if r not in ri.iptables_manager.ipv4['nat'].rules] self.assertEqual(len(nat_rules_delta), 2) self._verify_snat_rules(nat_rules_delta, router) self.assertEqual(self.send_arp.call_count, 1) def test_process_router_snat_enabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(enable_snat=False) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() # Process without NAT agent.process_router(ri) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Reprocess with NAT router['enable_snat'] = True # Reassign the router object to RouterInfo ri.router = router agent.process_router(ri) # For some reason set logic does not work well with # IpTablesRule instances nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules if r not in orig_nat_rules] self.assertEqual(len(nat_rules_delta), 2) self._verify_snat_rules(nat_rules_delta, router) self.assertEqual(self.send_arp.call_count, 1) def test_process_router_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() # Process with NAT agent.process_router(ri) # Add an interface and reprocess router_append_interface(router) # Reassign the router object to RouterInfo ri.router = router agent.process_router(ri) # send_arp is called both times process_router is called self.assertEqual(self.send_arp.call_count, 2) def test_process_ipv6_only_gw(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(ip_version=6) # Get NAT rules without the gw_port gw_port = router['gw_port'] router['gw_port'] = None ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() agent.process_router(ri) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Get NAT rules with the gw_port router['gw_port'] = gw_port ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) with mock.patch.object( agent, 'external_gateway_nat_rules') as external_gateway_nat_rules: agent.process_router(ri) new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # There should be no change with the NAT rules self.assertFalse(external_gateway_nat_rules.called) self.assertEqual(orig_nat_rules, new_nat_rules) def _process_router_ipv6_interface_added( self, router, ra_mode=None, addr_mode=None): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() # Process with NAT agent.process_router(ri) orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:] # Add an IPv6 interface and reprocess router_append_interface(router, count=1, ip_version=6, ra_mode=ra_mode, addr_mode=addr_mode) # Reassign the router object to RouterInfo ri.router = router agent.process_router(ri) # IPv4 NAT rules should not be changed by adding an IPv6 interface nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules if r not in orig_nat_rules] self.assertFalse(nat_rules_delta) return ri def _expected_call_lookup_ri_process(self, ri, process): """Expected call if a process is looked up in a router instance.""" return [mock.call(cfg.CONF, ri.router['id'], self.conf.root_helper, ri.ns_name, process)] def _assert_ri_process_enabled(self, ri, process): """Verify that process was enabled for a router instance.""" expected_calls = self._expected_call_lookup_ri_process(ri, process) expected_calls.append(mock.call().enable(mock.ANY, True)) self.assertEqual(expected_calls, self.external_process.mock_calls) def _assert_ri_process_disabled(self, ri, process): """Verify that process was disabled for a router instance.""" expected_calls = self._expected_call_lookup_ri_process(ri, process) expected_calls.append(mock.call().disable()) self.assertEqual(expected_calls, self.external_process.mock_calls) def test_process_router_ipv6_interface_added(self): router = prepare_router_data() ri = self._process_router_ipv6_interface_added(router) self._assert_ri_process_enabled(ri, 'radvd') # Expect radvd configured without prefix self.assertNotIn('prefix', self.utils_replace_file.call_args[0][1].split()) def test_process_router_ipv6_slaac_interface_added(self): router = prepare_router_data() ri = self._process_router_ipv6_interface_added( router, ra_mode=l3_constants.IPV6_SLAAC) self._assert_ri_process_enabled(ri, 'radvd') # Expect radvd configured with prefix self.assertIn('prefix', self.utils_replace_file.call_args[0][1].split()) def test_process_router_ipv6v4_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() # Process with NAT agent.process_router(ri) # Add an IPv4 and IPv6 interface and reprocess router_append_interface(router, count=1, ip_version=4) router_append_interface(router, count=1, ip_version=6) # Reassign the router object to RouterInfo ri.router = router agent.process_router(ri) self._assert_ri_process_enabled(ri, 'radvd') def test_process_router_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data(num_internal_ports=2) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() # Process with NAT agent.process_router(ri) # Add an interface and reprocess del router[l3_constants.INTERFACE_KEY][1] # Reassign the router object to RouterInfo ri.router = router agent.process_router(ri) # send_arp is called both times process_router is called self.assertEqual(self.send_arp.call_count, 2) def test_process_router_ipv6_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() ri.router = router agent.process_router(ri) # Add an IPv6 interface and reprocess router_append_interface(router, count=1, ip_version=6) agent.process_router(ri) self._assert_ri_process_enabled(ri, 'radvd') # Reset the calls so we can check for disable radvd self.external_process.reset_mock() # Remove the IPv6 interface and reprocess del router[l3_constants.INTERFACE_KEY][1] agent.process_router(ri) self._assert_ri_process_disabled(ri, 'radvd') def test_process_router_internal_network_added_unexpected_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() with mock.patch.object( l3_agent.L3NATAgent, 'internal_network_added') as internal_network_added: # raise RuntimeError to simulate that an unexpected exception # occurs internal_network_added.side_effect = RuntimeError self.assertRaises(RuntimeError, agent.process_router, ri) self.assertNotIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_network_added.side_effect = None # periodic_sync_routers_task finds out that _rpc_loop failed to # process the router last time, it will retry in the next run. agent.process_router(ri) # We were able to add the port to ri.internal_ports self.assertIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_internal_network_removed_unexpected_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() # add an internal port agent.process_router(ri) with mock.patch.object( l3_agent.L3NATAgent, 'internal_network_removed') as internal_net_removed: # raise RuntimeError to simulate that an unexpected exception # occurs internal_net_removed.side_effect = RuntimeError ri.internal_ports[0]['admin_state_up'] = False # The above port is set to down state, remove it. self.assertRaises(RuntimeError, agent.process_router, ri) self.assertIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_net_removed.side_effect = None # periodic_sync_routers_task finds out that _rpc_loop failed to # process the router last time, it will retry in the next run. agent.process_router(ri) # We were able to remove the port from ri.internal_ports self.assertNotIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_floatingip_disabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = prepare_router_data(num_internal_ports=1) router[l3_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() agent.process_router(ri) # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}) mock_update_fip_status.reset_mock() # Process the router again, this time without floating IPs router[l3_constants.FLOATINGIP_KEY] = [] ri.router = router agent.process_router(ri) # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: l3_constants.FLOATINGIP_STATUS_DOWN}) def test_process_router_floatingip_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.process_router_floating_ip_addresses = mock.Mock() agent.process_router_floating_ip_addresses.side_effect = RuntimeError with mock.patch.object( agent.plugin_rpc, 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = prepare_router_data(num_internal_ports=1) router[l3_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) agent.external_gateway_added = mock.Mock() agent.process_router(ri) # Assess the call for putting the floating IP into Error # was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}) def test_handle_router_snat_rules_distributed_without_snat_manager(self): ri = dvr_router.DvrRouter( 'foo_router_id', mock.ANY, {'distributed': True}) ri.iptables_manager = mock.Mock() agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object(l3_agent.LOG, 'debug') as log_debug: agent._handle_router_snat_rules( ri, mock.ANY, mock.ANY, mock.ANY) self.assertIsNone(ri.snat_iptables_manager) self.assertFalse(ri.iptables_manager.called) self.assertTrue(log_debug.called) def test_handle_router_snat_rules_add_back_jump(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = mock.MagicMock() port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} ri.router = {'distributed': False} agent._handle_router_snat_rules(ri, port, "iface", "add_rules") nat = ri.iptables_manager.ipv4['nat'] nat.empty_chain.assert_any_call('snat') nat.add_rule.assert_any_call('snat', '-j $float-snat') for call in nat.mock_calls: name, args, kwargs = call if name == 'add_rule': self.assertEqual(args, ('snat', '-j $float-snat')) self.assertEqual(kwargs, {}) break def test_handle_router_snat_rules_add_rules(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(_uuid(), self.conf.root_helper, {}) ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]} ri.router = {'distributed': False} agent._handle_router_snat_rules(ri, ex_gw_port, "iface", "add_rules") nat_rules = map(str, ri.iptables_manager.ipv4['nat'].rules) wrap_name = ri.iptables_manager.wrap_name jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name, wrap_name) snat_rule = ("-A %s-snat -o iface -j SNAT --to-source %s") % ( wrap_name, ex_gw_port['fixed_ips'][0]['ip_address']) self.assertIn(jump_float_rule, nat_rules) self.assertIn(snat_rule, nat_rules) self.assertThat(nat_rules.index(jump_float_rule), matchers.LessThan(nat_rules.index(snat_rule))) def test_process_router_delete_stale_internal_devices(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_devlist = [FakeDev('qr-a1b2c3d4-e5'), FakeDev('qr-b2c3d4e5-f6')] stale_devnames = [dev.name for dev in stale_devlist] get_devices_return = [] get_devices_return.extend(stale_devlist) self.mock_ip.get_devices.return_value = get_devices_return router = prepare_router_data(enable_snat=True, num_internal_ports=1) ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) self.assertEqual(len(internal_ports), 1) internal_port = internal_ports[0] with contextlib.nested(mock.patch.object(l3_agent.L3NATAgent, 'internal_network_removed'), mock.patch.object(l3_agent.L3NATAgent, 'internal_network_added'), mock.patch.object(l3_agent.L3NATAgent, 'external_gateway_removed'), mock.patch.object(l3_agent.L3NATAgent, 'external_gateway_added') ) as (internal_network_removed, internal_network_added, external_gateway_removed, external_gateway_added): agent.process_router(ri) self.assertEqual(external_gateway_added.call_count, 1) self.assertFalse(external_gateway_removed.called) self.assertFalse(internal_network_removed.called) internal_network_added.assert_called_once_with( ri, internal_port) self.assertEqual(self.mock_driver.unplug.call_count, len(stale_devnames)) calls = [mock.call(stale_devname, namespace=ri.ns_name, prefix=l3_agent.INTERNAL_DEV_PREFIX) for stale_devname in stale_devnames] self.mock_driver.unplug.assert_has_calls(calls, any_order=True) def test_process_router_delete_stale_external_devices(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) stale_devlist = [FakeDev('qg-a1b2c3d4-e5')] stale_devnames = [dev.name for dev in stale_devlist] router = prepare_router_data(enable_snat=True, num_internal_ports=1) del router['gw_port'] ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) self.mock_ip.get_devices.return_value = stale_devlist agent.process_router(ri) self.mock_driver.unplug.assert_called_with( stale_devnames[0], bridge="br-ex", namespace=ri.ns_name, prefix=l3_agent.EXTERNAL_DEV_PREFIX) def test_router_deleted(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_deleted(None, FAKE_ID) self.assertEqual(1, agent._queue.add.call_count) def test_routers_updated(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.routers_updated(None, [FAKE_ID]) self.assertEqual(1, agent._queue.add.call_count) def test_removed_from_agent(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_removed_from_agent(None, {'router_id': FAKE_ID}) self.assertEqual(1, agent._queue.add.call_count) def test_added_to_agent(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._queue = mock.Mock() agent.router_added_to_agent(None, [FAKE_ID]) self.assertEqual(1, agent._queue.add.call_count) def test_destroy_fip_namespace(self): namespaces = ['qrouter-foo', 'qrouter-bar'] self.mock_ip.get_namespaces.return_value = namespaces self.mock_ip.get_devices.return_value = [FakeDev('fpr-aaaa'), FakeDev('fg-aaaa')] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._destroy_fip_namespace(namespaces[0]) self.mock_driver.unplug.assert_called_once_with('fg-aaaa', bridge='br-ex', prefix='fg-', namespace='qrouter' '-foo') self.mock_ip.del_veth.assert_called_once_with('fpr-aaaa') def test_destroy_namespace(self): namespace = 'qrouter-bar' self.mock_ip.get_namespaces.return_value = [namespace] self.mock_ip.get_devices.return_value = [FakeDev('qr-aaaa'), FakeDev('rfp-aaaa')] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._destroy_namespace(namespace) self.mock_driver.unplug.assert_called_once_with('qr-aaaa', prefix='qr-', namespace='qrouter' '-bar') self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa') def test_destroy_router_namespace_skips_ns_removal(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._destroy_router_namespace("fakens") self.assertEqual(self.mock_ip.netns.delete.call_count, 0) def test_destroy_router_namespace_removes_ns(self): self.conf.set_override('router_delete_namespaces', True) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent._destroy_router_namespace("fakens") self.mock_ip.netns.delete.assert_called_once_with("fakens") def _configure_metadata_proxy(self, enableflag=True): if not enableflag: self.conf.set_override('enable_metadata_proxy', False) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() router = {'id': router_id, 'external_gateway_info': {}, 'routes': [], 'distributed': False} driver = metadata_driver.MetadataDriver with mock.patch.object( driver, '_destroy_metadata_proxy') as destroy_proxy: with mock.patch.object( driver, '_spawn_metadata_proxy') as spawn_proxy: agent._process_added_router(router) if enableflag: spawn_proxy.assert_called_with(router_id, mock.ANY, mock.ANY) else: self.assertFalse(spawn_proxy.call_count) agent._router_removed(router_id) if enableflag: destroy_proxy.assert_called_with(router_id, mock.ANY, mock.ANY) else: self.assertFalse(destroy_proxy.call_count) def test_enable_metadata_proxy(self): self._configure_metadata_proxy() def test_disable_metadata_proxy_spawn(self): self._configure_metadata_proxy(enableflag=False) def test_router_id_specified_in_conf(self): self.conf.set_override('use_namespaces', False) self.conf.set_override('router_id', '') self.assertRaises(SystemExit, l3_agent.L3NATAgent, HOSTNAME, self.conf) self.conf.set_override('router_id', '1234') agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertEqual('1234', agent.conf.router_id) self.assertFalse(agent._clean_stale_namespaces) def test_process_router_if_compatible_with_no_ext_net_in_conf(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) self.plugin_api.get_external_network_id.assert_called_with( agent.context) def test_process_router_if_compatible_with_cached_ext_net(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' agent.target_ex_net_id = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) self.assertFalse(self.plugin_api.get_external_network_id.called) def test_process_router_if_compatible_with_stale_cached_ext_net(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' agent.target_ex_net_id = 'bbb' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) self.plugin_api.get_external_network_id.assert_called_with( agent.context) def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent.router_info = {} self.plugin_api.get_external_network_id.side_effect = ( n_exc.TooManyExternalNetworks()) self.assertRaises(n_exc.TooManyExternalNetworks, agent._process_router_if_compatible, router) self.assertNotIn(router['id'], agent.router_info) def test_process_router_if_compatible_with_ext_net_in_conf(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'bbb'}} agent.router_info = {} self.conf.set_override('gateway_external_network_id', 'aaa') self.assertRaises(n_exc.RouterNotCompatibleWithAgent, agent._process_router_if_compatible, router) self.assertNotIn(router['id'], agent.router_info) def test_process_router_if_compatible_with_no_bridge_no_ext_net(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' router = {'id': _uuid(), 'routes': [], 'admin_state_up': True, 'external_gateway_info': {'network_id': 'aaa'}} agent.router_info = {} self.conf.set_override('external_network_bridge', '') agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) def test_nonexistent_interface_driver(self): self.conf.set_override('interface_driver', None) with mock.patch.object(l3_agent, 'LOG') as log: self.assertRaises(SystemExit, l3_agent.L3NATAgent, HOSTNAME, self.conf) msg = 'An interface driver must be specified' log.error.assert_called_once_with(msg) self.conf.set_override('interface_driver', 'wrong_driver') with mock.patch.object(l3_agent, 'LOG') as log: self.assertRaises(SystemExit, l3_agent.L3NATAgent, HOSTNAME, self.conf) msg = _LE("Error importing interface driver '%s'") log.error.assert_called_once_with(msg, 'wrong_driver') def _cleanup_namespace_test(self, stale_namespace_list, router_list, other_namespaces): self.conf.set_override('router_delete_namespaces', True) good_namespace_list = [l3_agent.NS_PREFIX + r['id'] for r in router_list] good_namespace_list += [dvr.SNAT_NS_PREFIX + r['id'] for r in router_list] self.mock_ip.get_namespaces.return_value = (stale_namespace_list + good_namespace_list + other_namespaces) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertTrue(agent._clean_stale_namespaces) pm = self.external_process.return_value pm.reset_mock() agent._destroy_router_namespace = mock.MagicMock() agent._destroy_snat_namespace = mock.MagicMock() ns_list = agent._list_namespaces() agent._cleanup_namespaces(ns_list, [r['id'] for r in router_list]) # Expect process manager to disable metadata proxy per qrouter ns qrouters = [n for n in stale_namespace_list if n.startswith(l3_agent.NS_PREFIX)] self.assertEqual(agent._destroy_router_namespace.call_count, len(qrouters)) self.assertEqual(agent._destroy_snat_namespace.call_count, len(stale_namespace_list) - len(qrouters)) expected_args = [mock.call(ns) for ns in qrouters] agent._destroy_router_namespace.assert_has_calls(expected_args, any_order=True) self.assertFalse(agent._clean_stale_namespaces) def test_cleanup_namespace(self): self.conf.set_override('router_id', None) stale_namespaces = [l3_agent.NS_PREFIX + 'foo', l3_agent.NS_PREFIX + 'bar', dvr.SNAT_NS_PREFIX + 'foo'] other_namespaces = ['unknown'] self._cleanup_namespace_test(stale_namespaces, [], other_namespaces) def test_cleanup_namespace_with_registered_router_ids(self): self.conf.set_override('router_id', None) stale_namespaces = [l3_agent.NS_PREFIX + 'cccc', l3_agent.NS_PREFIX + 'eeeee', dvr.SNAT_NS_PREFIX + 'fffff'] router_list = [{'id': 'foo', 'distributed': False}, {'id': 'aaaa', 'distributed': False}] other_namespaces = ['qdhcp-aabbcc', 'unknown'] self._cleanup_namespace_test(stale_namespaces, router_list, other_namespaces) def test_cleanup_namespace_with_conf_router_id(self): self.conf.set_override('router_id', 'bbbbb') stale_namespaces = [l3_agent.NS_PREFIX + 'cccc', l3_agent.NS_PREFIX + 'eeeee', l3_agent.NS_PREFIX + self.conf.router_id] router_list = [{'id': 'foo', 'distributed': False}, {'id': 'aaaa', 'distributed': False}] other_namespaces = ['qdhcp-aabbcc', 'unknown'] self._cleanup_namespace_test(stale_namespaces, router_list, other_namespaces) def test_create_dvr_gateway(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = l3router.RouterInfo(router['id'], self.conf.root_helper, router=router) port_id = _uuid() dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'id': port_id, 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} interface_name = agent.get_snat_int_device_name(port_id) self.device_exists.return_value = False agent._create_dvr_gateway(ri, dvr_gw_port, interface_name, self.snat_ports) # check 2 internal ports are plugged # check 1 ext-gw-port is plugged self.assertEqual(self.mock_driver.plug.call_count, 3) self.assertEqual(self.mock_driver.init_l3.call_count, 3) def test_agent_gateway_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) network_id = _uuid() port_id = _uuid() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'id': port_id, 'network_id': network_id, 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} fip_ns_name = ( agent.get_fip_ns_name(str(network_id))) interface_name = ( agent.get_fip_ext_device_name(port_id)) self.device_exists.return_value = False agent.agent_gateway_added(fip_ns_name, agent_gw_port, interface_name) self.assertEqual(self.mock_driver.plug.call_count, 1) self.assertEqual(self.mock_driver.init_l3.call_count, 1) if self.conf.use_namespaces: self.send_arp.assert_called_once_with(fip_ns_name, interface_name, '20.0.0.30', mock.ANY, mock.ANY) else: self.utils_exec.assert_any_call( check_exit_code=True, root_helper=self.conf.root_helper) def test_create_rtr_2_fip_link(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() fip = {'id': _uuid(), 'host': HOSTNAME, 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid()} ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) rtr_2_fip_name = agent.get_rtr_int_device_name(ri.router_id) fip_2_rtr_name = agent.get_fip_int_device_name(ri.router_id) fip_ns_name = agent.get_fip_ns_name(str(fip['floating_network_id'])) with mock.patch.object(lla.LinkLocalAllocator, '_write'): self.device_exists.return_value = False agent.create_rtr_2_fip_link(ri, fip['floating_network_id']) self.mock_ip.add_veth.assert_called_with(rtr_2_fip_name, fip_2_rtr_name, fip_ns_name) # TODO(mrsmith): add more aasserts - self.mock_ip_dev.route.add_gateway.assert_called_once_with( '169.254.31.29', table=16) # TODO(mrsmith): test _create_agent_gateway_port def test_create_rtr_2_fip_link_already_exists(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) self.device_exists.return_value = True with mock.patch.object(lla.LinkLocalAllocator, '_write'): agent.create_rtr_2_fip_link(ri, {}) self.assertFalse(self.mock_ip.add_veth.called) def test_floating_ip_added_dist(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} fip = {'id': _uuid(), 'host': HOSTNAME, 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', 'floating_network_id': _uuid(), 'port_id': _uuid()} agent.agent_gateway_port = agent_gw_port ri.rtr_fip_subnet = lla.LinkLocalAddressPair('169.254.30.42/31') ri.dist_fip_count = 0 ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) agent.floating_ip_added_dist(ri, fip, ip_cidr) self.mock_rule.add_rule_from.assert_called_with('192.168.0.1', 16, FIP_PRI) # TODO(mrsmith): add more asserts @mock.patch.object(l3_agent.L3NATAgent, '_fip_ns_unsubscribe') @mock.patch.object(lla.LinkLocalAllocator, '_write') def test_floating_ip_removed_dist(self, write, unsubscribe): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = prepare_router_data() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} fip_cidr = '11.22.33.44/24' ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router=router) ri.dist_fip_count = 2 agent.fip_ns_subscribers.add(ri.router_id) ri.floating_ips_dict['11.22.33.44'] = FIP_PRI ri.fip_2_rtr = '11.22.33.42' ri.rtr_2_fip = '11.22.33.40' agent.agent_gateway_port = agent_gw_port s = lla.LinkLocalAddressPair('169.254.30.42/31') ri.rtr_fip_subnet = s agent.floating_ip_removed_dist(ri, fip_cidr) self.mock_rule.delete_rule_priority.assert_called_with(FIP_PRI) self.mock_ip_dev.route.delete_route.assert_called_with(fip_cidr, str(s.ip)) self.assertFalse(unsubscribe.called, '_fip_ns_unsubscribe called!') with mock.patch.object(agent, '_destroy_fip_namespace') as f: ri.dist_fip_count = 1 fip_ns_name = agent.get_fip_ns_name( str(agent._fetch_external_net_id())) ri.rtr_fip_subnet = agent.local_subnets.allocate(ri.router_id) _, fip_to_rtr = ri.rtr_fip_subnet.get_pair() agent.floating_ip_removed_dist(ri, fip_cidr) self.mock_ip.del_veth.assert_called_once_with( agent.get_fip_int_device_name(router['id'])) self.mock_ip_dev.route.delete_gateway.assert_called_once_with( str(fip_to_rtr.ip), table=16) f.assert_called_once_with(fip_ns_name) unsubscribe.assert_called_once_with(ri.router_id) def test_get_service_plugin_list(self): service_plugins = [p_const.L3_ROUTER_NAT] self.plugin_api.get_service_plugin_list.return_value = service_plugins agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertEqual(service_plugins, agent.neutron_service_plugins) self.assertTrue(self.plugin_api.get_service_plugin_list.called) def test_get_service_plugin_list_failed(self): raise_rpc = messaging.RemoteError() self.plugin_api.get_service_plugin_list.side_effect = raise_rpc agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertIsNone(agent.neutron_service_plugins) self.assertTrue(self.plugin_api.get_service_plugin_list.called) def test_get_service_plugin_list_retried(self): raise_timeout = messaging.MessagingTimeout() # Raise a timeout the first 2 times it calls # get_service_plugin_list then return a empty tuple self.plugin_api.get_service_plugin_list.side_effect = ( raise_timeout, raise_timeout, tuple() ) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.assertEqual(agent.neutron_service_plugins, tuple()) def test_get_service_plugin_list_retried_max(self): raise_timeout = messaging.MessagingTimeout() # Raise a timeout 5 times self.plugin_api.get_service_plugin_list.side_effect = ( (raise_timeout, ) * 5 ) self.assertRaises(messaging.MessagingTimeout, l3_agent.L3NATAgent, HOSTNAME, self.conf) def test__fip_ns_subscribe_is_first_true(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() is_first = agent._fip_ns_subscribe(router_id) self.assertTrue(is_first) self.assertEqual(len(agent.fip_ns_subscribers), 1) def test__fip_ns_subscribe_is_first_false(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() router2_id = _uuid() agent._fip_ns_subscribe(router_id) is_first = agent._fip_ns_subscribe(router2_id) self.assertFalse(is_first) self.assertEqual(len(agent.fip_ns_subscribers), 2) def test__fip_ns_unsubscribe_is_last_true(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() agent.fip_ns_subscribers.add(router_id) is_last = agent._fip_ns_unsubscribe(router_id) self.assertTrue(is_last) self.assertEqual(len(agent.fip_ns_subscribers), 0) def test__fip_ns_unsubscribe_is_last_false(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router_id = _uuid() router2_id = _uuid() agent.fip_ns_subscribers.add(router_id) agent.fip_ns_subscribers.add(router2_id) is_last = agent._fip_ns_unsubscribe(router_id) self.assertFalse(is_last) self.assertEqual(len(agent.fip_ns_subscribers), 1) def test_external_gateway_removed_ext_gw_port_and_fip(self): self.conf.set_override('state_path', '/tmp') self.conf.set_override('router_delete_namespaces', True) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr' agent.agent_gateway_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'subnet_id': _uuid()}], 'subnet': {'gateway_ip': '20.0.0.1'}, 'id': _uuid(), 'network_id': _uuid(), 'mac_address': 'ca:fe:de:ad:be:ef', 'ip_cidr': '20.0.0.30/24'} external_net_id = _uuid() agent._fetch_external_net_id = mock.Mock(return_value=external_net_id) router = prepare_router_data(num_internal_ports=2) router['distributed'] = True router['gw_port_host'] = HOSTNAME ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router) vm_floating_ip = '19.4.4.2' ri.floating_ips_dict[vm_floating_ip] = FIP_PRI ri.dist_fip_count = 1 ri.ex_gw_port = ri.router['gw_port'] del ri.router['gw_port'] ri.rtr_fip_subnet = agent.local_subnets.allocate(ri.router_id) _, fip_to_rtr = ri.rtr_fip_subnet.get_pair() nat = ri.iptables_manager.ipv4['nat'] nat.clear_rules_by_tag = mock.Mock() nat.add_rule = mock.Mock() self.mock_ip.get_devices.return_value = [ FakeDev(agent.get_fip_ext_device_name(_uuid()))] self.mock_ip_dev.addr.list.return_value = [ {'cidr': vm_floating_ip + '/32'}, {'cidr': '19.4.4.1/24'}] self.device_exists.return_value = True agent.external_gateway_removed( ri, ri.ex_gw_port, agent.get_external_device_name(ri.ex_gw_port['id'])) self.mock_ip.del_veth.assert_called_once_with( agent.get_fip_int_device_name(ri.router['id'])) self.mock_ip_dev.route.delete_gateway.assert_called_once_with( str(fip_to_rtr.ip), table=dvr.FIP_RT_TBL) self.assertEqual(ri.dist_fip_count, 0) self.assertEqual(len(agent.fip_ns_subscribers), 0) self.assertEqual(self.mock_driver.unplug.call_count, 1) self.assertIsNone(agent.agent_gateway_port) self.mock_ip.netns.delete.assert_called_once_with( agent.get_fip_ns_name(external_net_id)) self.assertFalse(nat.add_rule.called) nat.clear_rules_by_tag.assert_called_once_with('floating_ip') def test_spawn_radvd(self): router = prepare_router_data() conffile = '/fake/radvd.conf' pidfile = '/fake/radvd.pid' agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) # we don't want the whole process manager to be mocked to be # able to catch execute() calls self.external_process_p.stop() self.ip_cls_p.stop() get_pid_file_name = ('neutron.agent.linux.external_process.' 'ProcessManager.get_pid_file_name') with mock.patch('neutron.agent.linux.utils.execute') as execute: with mock.patch(get_pid_file_name) as get_pid: get_pid.return_value = pidfile ra._spawn_radvd(router['id'], conffile, agent.get_ns_name(router['id']), self.conf.root_helper) cmd = execute.call_args[0][0] self.assertIn('radvd', cmd) _join = lambda *args: ' '.join(args) cmd = _join(*cmd) self.assertIn(_join('-C', conffile), cmd) self.assertIn(_join('-p', pidfile), cmd) self.assertIn(_join('-m', 'syslog'), cmd) def test_generate_radvd_conf_other_flag(self): # we don't check other flag for stateful since it's redundant # for this mode and can be ignored by clients, as per RFC4861 expected = {l3_constants.IPV6_SLAAC: False, l3_constants.DHCPV6_STATELESS: True} for ra_mode, flag_set in expected.iteritems(): router = prepare_router_data() ri = self._process_router_ipv6_interface_added(router, ra_mode=ra_mode) ra._generate_radvd_conf(ri.router['id'], router[l3_constants.INTERFACE_KEY], mock.Mock()) asserter = self.assertIn if flag_set else self.assertNotIn asserter('AdvOtherConfigFlag on;', self.utils_replace_file.call_args[0][1]) def test__put_fips_in_error_state(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = mock.Mock() ri.router.get.return_value = [{'id': mock.sentinel.id1}, {'id': mock.sentinel.id2}] statuses = agent._put_fips_in_error_state(ri) expected = [{mock.sentinel.id1: l3_constants.FLOATINGIP_STATUS_ERROR, mock.sentinel.id2: l3_constants.FLOATINGIP_STATUS_ERROR}] self.assertNotEqual(expected, statuses) def test__process_snat_dnat_for_fip(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.process_router_floating_ip_nat_rules = mock.Mock( side_effect=Exception) self.assertRaises(n_exc.FloatingIpSetupException, agent._process_snat_dnat_for_fip, mock.sentinel.ri) agent.process_router_floating_ip_nat_rules.assert_called_with( mock.sentinel.ri) def test__configure_fip_addresses(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.process_router_floating_ip_addresses = mock.Mock( side_effect=Exception) self.assertRaises(n_exc.FloatingIpSetupException, agent._configure_fip_addresses, mock.sentinel.ri, mock.sentinel.ex_gw_port) agent.process_router_floating_ip_addresses.assert_called_with( mock.sentinel.ri, mock.sentinel.ex_gw_port)
blueboxgroup/neutron
neutron/tests/unit/test_l3_agent.py
Python
apache-2.0
100,926
# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import stubout import webob from nova import exception from nova import context from nova import test from nova import log as logging from nova.api.openstack.contrib import volumetypes from nova.volume import volume_types from nova.tests.api.openstack import fakes LOG = logging.getLogger('nova.tests.api.openstack.test_volume_types') last_param = {} def stub_volume_type(id): specs = { "key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) def return_volume_types_get_all_types(context): return dict(vol_type_1=stub_volume_type(1), vol_type_2=stub_volume_type(2), vol_type_3=stub_volume_type(3)) def return_empty_volume_types_get_all_types(context): return {} def return_volume_types_get_volume_type(context, id): if id == "777": raise exception.VolumeTypeNotFound(volume_type_id=id) return stub_volume_type(int(id)) def return_volume_types_destroy(context, name): if name == "777": raise exception.VolumeTypeNotFoundByName(volume_type_name=name) pass def return_volume_types_create(context, name, specs): pass def return_volume_types_get_by_name(context, name): if name == "777": raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return stub_volume_type(int(name.split("_")[2])) class VolumeTypesApiTest(test.TestCase): def setUp(self): super(VolumeTypesApiTest, self).setUp() fakes.stub_out_key_pair_funcs(self.stubs) self.controller = volumetypes.VolumeTypesController() def tearDown(self): self.stubs.UnsetAll() super(VolumeTypesApiTest, self).tearDown() def test_volume_types_index(self): self.stubs.Set(volume_types, 'get_all_types', return_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types') res_dict = self.controller.index(req) self.assertEqual(3, len(res_dict)) for name in ['vol_type_1', 'vol_type_2', 'vol_type_3']: self.assertEqual(name, res_dict[name]['name']) self.assertEqual('value1', res_dict[name]['extra_specs']['key1']) def test_volume_types_index_no_data(self): self.stubs.Set(volume_types, 'get_all_types', return_empty_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types') res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict)) def test_volume_types_show(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1') res_dict = self.controller.show(req, 1) self.assertEqual(1, len(res_dict)) self.assertEqual('vol_type_1', res_dict['volume_type']['name']) def test_volume_types_show_not_found(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '777') def test_volume_types_delete(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.stubs.Set(volume_types, 'destroy', return_volume_types_destroy) req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1') self.controller.delete(req, 1) def test_volume_types_delete_not_found(self): self.stubs.Set(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.stubs.Set(volume_types, 'destroy', return_volume_types_destroy) req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, '777') def test_create(self): self.stubs.Set(volume_types, 'create', return_volume_types_create) self.stubs.Set(volume_types, 'get_volume_type_by_name', return_volume_types_get_by_name) body = {"volume_type": {"name": "vol_type_1", "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types') res_dict = self.controller.create(req, body) self.assertEqual(1, len(res_dict)) self.assertEqual('vol_type_1', res_dict['volume_type']['name']) def test_create_empty_body(self): self.stubs.Set(volume_types, 'create', return_volume_types_create) self.stubs.Set(volume_types, 'get_volume_type_by_name', return_volume_types_get_by_name) req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, '')
salv-orlando/MyRepo
nova/tests/api/openstack/contrib/test_volume_types.py
Python
apache-2.0
5,886
from typing import Callable, List, Sequence, Tuple import numpy as np import pytest import scipy import tensorflow as tf from _pytest.fixtures import SubRequest import gpflow import gpflow.inducing_variables.multioutput as mf import gpflow.kernels.multioutput as mk from gpflow import set_trainable from gpflow.base import AnyNDArray, RegressionData from gpflow.conditionals import sample_conditional from gpflow.conditionals.util import ( fully_correlated_conditional, fully_correlated_conditional_repeat, independent_interdomain_conditional, sample_mvn, ) from gpflow.config import default_float, default_jitter from gpflow.inducing_variables import InducingPoints from gpflow.kernels import SquaredExponential from gpflow.likelihoods import Gaussian from gpflow.models import SVGP float_type = default_float() rng = np.random.RandomState(99201) # ------------------------------------------ # Helpers # ------------------------------------------ def predict_all( models: Sequence[SVGP], Xnew: tf.Tensor, full_cov: bool, full_output_cov: bool ) -> Tuple[List[tf.Tensor], List[tf.Tensor]]: """ Returns the mean and variance of f(Xnew) for each model in `models`. """ ms, vs = [], [] for model in models: m, v = model.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov) ms.append(m) vs.append(v) return ms, vs def assert_all_array_elements_almost_equal(arr: Sequence[tf.Tensor]) -> None: """ Check if consecutive elements of `arr` are almost equal. """ for i in range(len(arr) - 1): np.testing.assert_allclose(arr[i], arr[i + 1], atol=1e-5) def check_equality_predictions( data: RegressionData, models: Sequence[SVGP], decimal: int = 3 ) -> None: """ Executes a couple of checks to compare the equality of predictions of different models. The models should be configured with the same training data (X, Y). The following checks are done: - check if elbo is (almost) equal for all models - check if predicted mean is (almost) equal - check if predicted variance is (almost) equal. All possible variances over the inputs and outputs are calculated and equality is checked. - check if variances within model are consistent. Parts of the covariance matrices should overlap, and this is tested. """ elbos = [m.elbo(data) for m in models] # Check equality of log likelihood assert_all_array_elements_almost_equal(elbos) # Predict: full_cov = True and full_output_cov = True means_tt, vars_tt = predict_all(models, Data.Xs, full_cov=True, full_output_cov=True) # Predict: full_cov = True and full_output_cov = False means_tf, vars_tf = predict_all(models, Data.Xs, full_cov=True, full_output_cov=False) # Predict: full_cov = False and full_output_cov = True means_ft, vars_ft = predict_all(models, Data.Xs, full_cov=False, full_output_cov=True) # Predict: full_cov = False and full_output_cov = False means_ff, vars_ff = predict_all(models, Data.Xs, full_cov=False, full_output_cov=False) # check equality of all the means all_means = means_tt + means_tf + means_ft + means_ff assert_all_array_elements_almost_equal(all_means) # check equality of all the variances within a category # (e.g. full_cov=True and full_output_cov=False) for var in [vars_tt, vars_tf, vars_ft, vars_ff]: assert_all_array_elements_almost_equal(var) # Here we check that the variance in different categories are equal # after transforming to the right shape. var_tt = vars_tt[0] # N x P x N x P var_tf = vars_tf[0] # P x N x c var_ft = vars_ft[0] # N x P x P var_ff = vars_ff[0] # N x P np.testing.assert_almost_equal( np.diagonal(var_tt, axis1=1, axis2=3), np.transpose(var_tf, [1, 2, 0]), decimal=decimal, ) np.testing.assert_almost_equal( np.diagonal(var_tt, axis1=0, axis2=2), np.transpose(var_ft, [1, 2, 0]), decimal=decimal, ) np.testing.assert_almost_equal( np.diagonal(np.diagonal(var_tt, axis1=0, axis2=2)), var_ff, decimal=decimal ) def expand_cov(q_sqrt: tf.Tensor, W: tf.Tensor) -> tf.Tensor: """ :param G: cholesky of covariance matrices, L x M x M :param W: mixing matrix (square), L x L :return: cholesky of 1 x LM x LM covariance matrix """ q_cov = np.matmul(q_sqrt, q_sqrt.transpose([0, 2, 1])) # [L, M, M] q_cov_expanded = scipy.linalg.block_diag(*q_cov) # [LM, LM] q_sqrt_expanded = np.linalg.cholesky(q_cov_expanded) # [LM, LM] return q_sqrt_expanded[None, ...] def create_q_sqrt(M: int, L: int) -> AnyNDArray: """ returns an array of L lower triangular matrices of size M x M """ return np.array([np.tril(rng.randn(M, M)) for _ in range(L)]) # [L, M, M] # ------------------------------------------ # Data classes: storing constants # ------------------------------------------ class Data: N, Ntest = 20, 5 D = 1 # input dimension M = 3 # inducing points L = 2 # latent gps P = 3 # output dimension MAXITER = int(15e2) X = tf.random.normal((N,), dtype=tf.float64)[:, None] * 10 - 5 G = np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X)) Ptrue = np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]]) # [L, P] Y = tf.convert_to_tensor(G @ Ptrue) G = tf.convert_to_tensor(np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X))) Ptrue = tf.convert_to_tensor(np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]])) # [L, P] Y += tf.random.normal(Y.shape, dtype=tf.float64) * [0.2, 0.2, 0.2] Xs = tf.convert_to_tensor(np.linspace(-6, 6, Ntest)[:, None]) data = (X, Y) class DataMixedKernelWithEye(Data): """ Note in this class L == P """ M, L = 4, 3 W = np.eye(L) G = np.hstack( [0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X, 1.0 + Data.X] ) # [N, P] mu_data = tf.random.uniform((M, L), dtype=tf.float64) # [M, L] sqrt_data = create_q_sqrt(M, L) # [L, M, M] mu_data_full = tf.reshape(mu_data @ W, [-1, 1]) # [L, 1] sqrt_data_full = expand_cov(sqrt_data, W) # [1, LM, LM] Y = tf.convert_to_tensor(G @ W) G = tf.convert_to_tensor(G) W = tf.convert_to_tensor(W) sqrt_data = tf.convert_to_tensor(sqrt_data) sqrt_data_full = tf.convert_to_tensor(sqrt_data_full) Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((L,), dtype=tf.float64) * 0.2 data = (Data.X, Y) class DataMixedKernel(Data): M = 5 L = 2 P = 3 W = rng.randn(P, L) G = np.hstack([0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X]) # [N, L] mu_data = tf.random.normal((M, L), dtype=tf.float64) # [M, L] sqrt_data = create_q_sqrt(M, L) # [L, M, M] Y = tf.convert_to_tensor(G @ W.T) G = tf.convert_to_tensor(G) W = tf.convert_to_tensor(W) sqrt_data = tf.convert_to_tensor(sqrt_data) Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((P,), dtype=tf.float64) * 0.1 data = (Data.X, Y) # ------------------------------------------ # Test sample conditional # ------------------------------------------ def test_sample_mvn(full_cov: bool) -> None: """ Draws 10,000 samples from a distribution with known mean and covariance. The test checks if the mean and covariance of the samples is close to the true mean and covariance. """ N, D = 10000, 2 means = tf.ones((N, D), dtype=float_type) if full_cov: covs = tf.eye(D, batch_shape=[N], dtype=float_type) else: covs = tf.ones((N, D), dtype=float_type) samples = sample_mvn(means, covs, full_cov) samples_mean = np.mean(samples, axis=0) samples_cov = np.cov(samples, rowvar=False) np.testing.assert_array_almost_equal(samples_mean, [1.0, 1.0], decimal=1) np.testing.assert_array_almost_equal(samples_cov, [[1.0, 0.0], [0.0, 1.0]], decimal=1) def test_sample_conditional(whiten: bool, full_cov: bool, full_output_cov: bool) -> None: if full_cov and full_output_cov: return q_mu = tf.random.uniform((Data.M, Data.P), dtype=tf.float64) # [M, P] q_sqrt = tf.convert_to_tensor( [np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.P)] ) # [P, M, M] Z = Data.X[: Data.M, ...] # [M, D] Xs: AnyNDArray = np.ones((Data.N, Data.D), dtype=float_type) inducing_variable = InducingPoints(Z) kernel = SquaredExponential() # Path 1 value_f, mean_f, var_f = sample_conditional( Xs, inducing_variable, kernel, q_mu, q_sqrt=q_sqrt, white=whiten, full_cov=full_cov, full_output_cov=full_output_cov, num_samples=int(1e5), ) value_f = value_f.numpy().reshape((-1,) + value_f.numpy().shape[2:]) # Path 2 if full_output_cov: pytest.skip( "sample_conditional with X instead of inducing_variable does not support full_output_cov" ) value_x, mean_x, var_x = sample_conditional( Xs, Z, kernel, q_mu, q_sqrt=q_sqrt, white=whiten, full_cov=full_cov, full_output_cov=full_output_cov, num_samples=int(1e5), ) value_x = value_x.numpy().reshape((-1,) + value_x.numpy().shape[2:]) # check if mean and covariance of samples are similar np.testing.assert_array_almost_equal( np.mean(value_x, axis=0), np.mean(value_f, axis=0), decimal=1 ) np.testing.assert_array_almost_equal( np.cov(value_x, rowvar=False), np.cov(value_f, rowvar=False), decimal=1 ) np.testing.assert_allclose(mean_x, mean_f) np.testing.assert_allclose(var_x, var_f) def test_sample_conditional_mixedkernel() -> None: q_mu = tf.random.uniform((Data.M, Data.L), dtype=tf.float64) # M x L q_sqrt = tf.convert_to_tensor( [np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.L)] ) # L x M x M Z = Data.X[: Data.M, ...] # M x D N = int(10e5) Xs: AnyNDArray = np.ones((N, Data.D), dtype=float_type) # Path 1: mixed kernel: most efficient route W = np.random.randn(Data.P, Data.L) mixed_kernel = mk.LinearCoregionalization([SquaredExponential() for _ in range(Data.L)], W) optimal_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z)) value, mean, var = sample_conditional( Xs, optimal_inducing_variable, mixed_kernel, q_mu, q_sqrt=q_sqrt, white=True ) # Path 2: independent kernels, mixed later separate_kernel = mk.SeparateIndependent([SquaredExponential() for _ in range(Data.L)]) fallback_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z)) value2, mean2, var2 = sample_conditional( Xs, fallback_inducing_variable, separate_kernel, q_mu, q_sqrt=q_sqrt, white=True ) value2 = np.matmul(value2, W.T) # check if mean and covariance of samples are similar np.testing.assert_array_almost_equal(np.mean(value, axis=0), np.mean(value2, axis=0), decimal=1) np.testing.assert_array_almost_equal( np.cov(value, rowvar=False), np.cov(value2, rowvar=False), decimal=1 ) QSqrtFactory = Callable[[tf.Tensor, int], tf.Tensor] @pytest.fixture( name="fully_correlated_q_sqrt_factory", params=[lambda _, __: None, lambda LM, R: tf.eye(LM, batch_shape=(R,))], ) def _q_sqrt_factory_fixture(request: SubRequest) -> QSqrtFactory: return request.param @pytest.mark.parametrize("R", [1, 2, 5]) def test_fully_correlated_conditional_repeat_shapes_fc_and_foc( R: int, fully_correlated_q_sqrt_factory: QSqrtFactory, full_cov: bool, full_output_cov: bool, whiten: bool, ) -> None: L, M, N, P = Data.L, Data.M, Data.N, Data.P Kmm = tf.ones((L * M, L * M)) + default_jitter() * tf.eye(L * M) Kmn = tf.ones((L * M, N, P)) if full_cov and full_output_cov: Knn = tf.ones((N, P, N, P)) expected_v_shape = [R, N, P, N, P] elif not full_cov and full_output_cov: Knn = tf.ones((N, P, P)) expected_v_shape = [R, N, P, P] elif full_cov and not full_output_cov: Knn = tf.ones((P, N, N)) expected_v_shape = [R, P, N, N] else: Knn = tf.ones((N, P)) expected_v_shape = [R, N, P] f = tf.ones((L * M, R)) q_sqrt = fully_correlated_q_sqrt_factory(L * M, R) m, v = fully_correlated_conditional_repeat( Kmn, Kmm, Knn, f, full_cov=full_cov, full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=whiten, ) assert m.shape.as_list() == [R, N, P] assert v.shape.as_list() == expected_v_shape def test_fully_correlated_conditional_repeat_whiten(whiten: bool) -> None: """ This test checks the effect of the `white` flag, which changes the projection matrix `A`. The impact of the flag on the value of `A` can be easily verified by its effect on the predicted mean. While the predicted covariance is also a function of `A` this test does not inspect that value. """ N, P = Data.N, Data.P Lm = np.random.randn(1, 1).astype(np.float32) ** 2 Kmm = Lm * Lm + default_jitter() Kmn = tf.ones((1, N, P)) Knn = tf.ones((N, P)) f = np.random.randn(1, 1).astype(np.float32) mean, _ = fully_correlated_conditional_repeat( Kmn, Kmm, Knn, f, white=whiten, ) if whiten: expected_mean = (f * Kmn) / Lm else: expected_mean = (f * Kmn) / Kmm np.testing.assert_allclose(mean, expected_mean, rtol=1e-3) def test_fully_correlated_conditional_shapes_fc_and_foc( fully_correlated_q_sqrt_factory: QSqrtFactory, full_cov: bool, full_output_cov: bool, whiten: bool, ) -> None: L, M, N, P = Data.L, Data.M, Data.N, Data.P Kmm = tf.ones((L * M, L * M)) + default_jitter() * tf.eye(L * M) Kmn = tf.ones((L * M, N, P)) if full_cov and full_output_cov: Knn = tf.ones((N, P, N, P)) expected_v_shape = [N, P, N, P] elif not full_cov and full_output_cov: Knn = tf.ones((N, P, P)) expected_v_shape = [N, P, P] elif full_cov and not full_output_cov: Knn = tf.ones((P, N, N)) expected_v_shape = [P, N, N] else: Knn = tf.ones((N, P)) expected_v_shape = [N, P] f = tf.ones((L * M, 1)) q_sqrt = fully_correlated_q_sqrt_factory(L * M, 1) m, v = fully_correlated_conditional( Kmn, Kmm, Knn, f, full_cov=full_cov, full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=whiten, ) assert m.shape.as_list() == [N, P] assert v.shape.as_list() == expected_v_shape # ------------------------------------------ # Test Mok Output Dims # ------------------------------------------ def test_shapes_of_mok() -> None: data = DataMixedKernel kern_list = [SquaredExponential() for _ in range(data.L)] k1 = mk.LinearCoregionalization(kern_list, W=data.W) assert k1.num_latent_gps == data.L k2 = mk.SeparateIndependent(kern_list) assert k2.num_latent_gps == data.L dims = 5 k3 = mk.SharedIndependent(SquaredExponential(), dims) assert k3.num_latent_gps == dims # ------------------------------------------ # Test Mixed Mok Kgg # ------------------------------------------ def test_MixedMok_Kgg() -> None: data = DataMixedKernel kern_list = [SquaredExponential() for _ in range(data.L)] kernel = mk.LinearCoregionalization(kern_list, W=data.W) Kgg = kernel.Kgg(Data.X, Data.X) # L x N x N Kff = kernel.K(Data.X, Data.X) # N x P x N x P # Kff = W @ Kgg @ W^T Kff_infered = np.einsum("lnm,pl,ql->npmq", Kgg, data.W, data.W) np.testing.assert_array_almost_equal(Kff, Kff_infered, decimal=5) # ------------------------------------------ # Integration tests # ------------------------------------------ def test_shared_independent_mok() -> None: """ In this test we use the same kernel and the same inducing inducing for each of the outputs. The outputs are considered to be uncorrelated. This is how GPflow handled multiple outputs before the multioutput framework was added. We compare three models here: 1) an ineffient one, where we use a SharedIndepedentMok with InducingPoints. This combination will uses a Kff of size N x P x N x P, Kfu if size N x P x M x P which is extremely inefficient as most of the elements are zero. 2) efficient: SharedIndependentMok and SharedIndependentMof This combinations uses the most efficient form of matrices 3) the old way, efficient way: using Kernel and InducingPoints Model 2) and 3) follow more or less the same code path. """ np.random.seed(0) # Model 1 q_mu_1 = np.random.randn(Data.M * Data.P, 1) # MP x 1 q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP kernel_1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P) inducing_variable = InducingPoints(Data.X[: Data.M, ...]) model_1 = SVGP( kernel_1, Gaussian(), inducing_variable, q_mu=q_mu_1, q_sqrt=q_sqrt_1, num_latent_gps=Data.Y.shape[-1], ) set_trainable(model_1, False) set_trainable(model_1.q_sqrt, True) gpflow.optimizers.Scipy().minimize( model_1.training_loss_closure(Data.data), variables=model_1.trainable_variables, options=dict(maxiter=500), method="BFGS", compile=True, ) # Model 2 q_mu_2 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P q_sqrt_2 = np.array( [np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)] ) # P x M x M kernel_2 = SquaredExponential(variance=0.5, lengthscales=1.2) inducing_variable_2 = InducingPoints(Data.X[: Data.M, ...]) model_2 = SVGP( kernel_2, Gaussian(), inducing_variable_2, num_latent_gps=Data.P, q_mu=q_mu_2, q_sqrt=q_sqrt_2, ) set_trainable(model_2, False) set_trainable(model_2.q_sqrt, True) gpflow.optimizers.Scipy().minimize( model_2.training_loss_closure(Data.data), variables=model_2.trainable_variables, options=dict(maxiter=500), method="BFGS", compile=True, ) # Model 3 q_mu_3 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P q_sqrt_3 = np.array( [np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)] ) # P x M x M kernel_3 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P) inducing_variable_3 = mf.SharedIndependentInducingVariables( InducingPoints(Data.X[: Data.M, ...]) ) model_3 = SVGP( kernel_3, Gaussian(), inducing_variable_3, num_latent_gps=Data.P, q_mu=q_mu_3, q_sqrt=q_sqrt_3, ) set_trainable(model_3, False) set_trainable(model_3.q_sqrt, True) gpflow.optimizers.Scipy().minimize( model_3.training_loss_closure(Data.data), variables=model_3.trainable_variables, options=dict(maxiter=500), method="BFGS", compile=True, ) check_equality_predictions(Data.data, [model_1, model_2, model_3]) def test_separate_independent_mok() -> None: """ We use different independent kernels for each of the output dimensions. We can achieve this in two ways: 1) efficient: SeparateIndependentMok with Shared/SeparateIndependentMof 2) inefficient: SeparateIndependentMok with InducingPoints However, both methods should return the same conditional, and after optimization return the same log likelihood. """ # Model 1 (Inefficient) q_mu_1 = np.random.randn(Data.M * Data.P, 1) q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP kern_list_1 = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)] kernel_1 = mk.SeparateIndependent(kern_list_1) inducing_variable_1 = InducingPoints(Data.X[: Data.M, ...]) model_1 = SVGP( kernel_1, Gaussian(), inducing_variable_1, num_latent_gps=1, q_mu=q_mu_1, q_sqrt=q_sqrt_1, ) set_trainable(model_1, False) set_trainable(model_1.q_sqrt, True) set_trainable(model_1.q_mu, True) gpflow.optimizers.Scipy().minimize( model_1.training_loss_closure(Data.data), variables=model_1.trainable_variables, method="BFGS", compile=True, ) # Model 2 (efficient) q_mu_2 = np.random.randn(Data.M, Data.P) q_sqrt_2 = np.array( [np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)] ) # P x M x M kern_list_2 = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)] kernel_2 = mk.SeparateIndependent(kern_list_2) inducing_variable_2 = mf.SharedIndependentInducingVariables( InducingPoints(Data.X[: Data.M, ...]) ) model_2 = SVGP( kernel_2, Gaussian(), inducing_variable_2, num_latent_gps=Data.P, q_mu=q_mu_2, q_sqrt=q_sqrt_2, ) set_trainable(model_2, False) set_trainable(model_2.q_sqrt, True) set_trainable(model_2.q_mu, True) gpflow.optimizers.Scipy().minimize( model_2.training_loss_closure(Data.data), variables=model_2.trainable_variables, method="BFGS", compile=True, ) check_equality_predictions(Data.data, [model_1, model_2]) def test_separate_independent_mof() -> None: """ Same test as above but we use different (i.e. separate) inducing inducing for each of the output dimensions. """ np.random.seed(0) # Model 1 (INefficient) q_mu_1 = np.random.randn(Data.M * Data.P, 1) q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP kernel_1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P) inducing_variable_1 = InducingPoints(Data.X[: Data.M, ...]) model_1 = SVGP(kernel_1, Gaussian(), inducing_variable_1, q_mu=q_mu_1, q_sqrt=q_sqrt_1) set_trainable(model_1, False) set_trainable(model_1.q_sqrt, True) set_trainable(model_1.q_mu, True) gpflow.optimizers.Scipy().minimize( model_1.training_loss_closure(Data.data), variables=model_1.trainable_variables, method="BFGS", compile=True, ) # Model 2 (efficient) q_mu_2 = np.random.randn(Data.M, Data.P) q_sqrt_2 = np.array( [np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)] ) # P x M x M kernel_2 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P) inducing_variable_list_2 = [InducingPoints(Data.X[: Data.M, ...]) for _ in range(Data.P)] inducing_variable_2 = mf.SeparateIndependentInducingVariables(inducing_variable_list_2) model_2 = SVGP(kernel_2, Gaussian(), inducing_variable_2, q_mu=q_mu_2, q_sqrt=q_sqrt_2) set_trainable(model_2, False) set_trainable(model_2.q_sqrt, True) set_trainable(model_2.q_mu, True) gpflow.optimizers.Scipy().minimize( model_2.training_loss_closure(Data.data), variables=model_2.trainable_variables, method="BFGS", compile=True, ) # Model 3 (Inefficient): an idenitical inducing variable is used P times, # and treated as a separate one. q_mu_3 = np.random.randn(Data.M, Data.P) q_sqrt_3 = np.array( [np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)] ) # P x M x M kern_list = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)] kernel_3 = mk.SeparateIndependent(kern_list) inducing_variable_list_3 = [InducingPoints(Data.X[: Data.M, ...]) for _ in range(Data.P)] inducing_variable_3 = mf.SeparateIndependentInducingVariables(inducing_variable_list_3) model_3 = SVGP(kernel_3, Gaussian(), inducing_variable_3, q_mu=q_mu_3, q_sqrt=q_sqrt_3) set_trainable(model_3, False) set_trainable(model_3.q_sqrt, True) set_trainable(model_3.q_mu, True) gpflow.optimizers.Scipy().minimize( model_3.training_loss_closure(Data.data), variables=model_3.trainable_variables, method="BFGS", compile=True, ) check_equality_predictions(Data.data, [model_1, model_2, model_3]) def test_mixed_mok_with_Id_vs_independent_mok() -> None: data = DataMixedKernelWithEye # Independent model k1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), data.L) f1 = InducingPoints(data.X[: data.M, ...]) model_1 = SVGP(k1, Gaussian(), f1, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full) set_trainable(model_1, False) set_trainable(model_1.q_sqrt, True) gpflow.optimizers.Scipy().minimize( model_1.training_loss_closure(Data.data), variables=model_1.trainable_variables, method="BFGS", compile=True, ) # Mixed Model kern_list = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(data.L)] k2 = mk.LinearCoregionalization(kern_list, data.W) f2 = InducingPoints(data.X[: data.M, ...]) model_2 = SVGP(k2, Gaussian(), f2, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full) set_trainable(model_2, False) set_trainable(model_2.q_sqrt, True) gpflow.optimizers.Scipy().minimize( model_2.training_loss_closure(Data.data), variables=model_2.trainable_variables, method="BFGS", compile=True, ) check_equality_predictions(Data.data, [model_1, model_2]) def test_compare_mixed_kernel() -> None: data = DataMixedKernel kern_list = [SquaredExponential() for _ in range(data.L)] k1 = mk.LinearCoregionalization(kern_list, W=data.W) f1 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...])) model_1 = SVGP(k1, Gaussian(), inducing_variable=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data) kern_list = [SquaredExponential() for _ in range(data.L)] k2 = mk.LinearCoregionalization(kern_list, W=data.W) f2 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...])) model_2 = SVGP(k2, Gaussian(), inducing_variable=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data) check_equality_predictions(Data.data, [model_1, model_2]) def test_multioutput_with_diag_q_sqrt() -> None: data = DataMixedKernel q_sqrt_diag = np.ones((data.M, data.L)) * 2 q_sqrt = np.repeat(np.eye(data.M)[None, ...], data.L, axis=0) * 2 # L x M x M kern_list = [SquaredExponential() for _ in range(data.L)] k1 = mk.LinearCoregionalization(kern_list, W=data.W) f1 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...])) model_1 = SVGP( k1, Gaussian(), inducing_variable=f1, q_mu=data.mu_data, q_sqrt=q_sqrt_diag, q_diag=True, ) kern_list = [SquaredExponential() for _ in range(data.L)] k2 = mk.LinearCoregionalization(kern_list, W=data.W) f2 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...])) model_2 = SVGP( k2, Gaussian(), inducing_variable=f2, q_mu=data.mu_data, q_sqrt=q_sqrt, q_diag=False, ) check_equality_predictions(Data.data, [model_1, model_2]) def test_MixedKernelSeparateMof() -> None: data = DataMixedKernel kern_list = [SquaredExponential() for _ in range(data.L)] inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)] k1 = mk.LinearCoregionalization(kern_list, W=data.W) f1 = mf.SeparateIndependentInducingVariables(inducing_variable_list) model_1 = SVGP(k1, Gaussian(), inducing_variable=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data) kern_list = [SquaredExponential() for _ in range(data.L)] inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)] k2 = mk.LinearCoregionalization(kern_list, W=data.W) f2 = mf.SeparateIndependentInducingVariables(inducing_variable_list) model_2 = SVGP(k2, Gaussian(), inducing_variable=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data) check_equality_predictions(Data.data, [model_1, model_2]) def test_separate_independent_conditional_with_q_sqrt_none() -> None: """ In response to bug #1523, this test checks that separate_independent_condtional does not fail when q_sqrt=None. """ q_sqrt = None data = DataMixedKernel kern_list = [SquaredExponential() for _ in range(data.L)] kernel = gpflow.kernels.SeparateIndependent(kern_list) inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)] inducing_variable = mf.SeparateIndependentInducingVariables(inducing_variable_list) mu_1, var_1 = gpflow.conditionals.conditional( data.X, inducing_variable, kernel, data.mu_data, full_cov=False, full_output_cov=False, q_sqrt=q_sqrt, white=True, ) def test_independent_interdomain_conditional_bug_regression() -> None: """ Regression test for https://github.com/GPflow/GPflow/issues/818 Not an exhaustive test """ M = 31 N = 11 D_lat = 5 D_inp = D_lat * 7 L = 2 P = 3 X = np.random.randn(N, D_inp) Zs = [np.random.randn(M, D_lat) for _ in range(L)] k = gpflow.kernels.SquaredExponential(lengthscales=np.ones(D_lat)) def compute_Kmn(Z: tf.Tensor, X: tf.Tensor) -> tf.Tensor: return tf.stack([k(Z, X[:, i * D_lat : (i + 1) * D_lat]) for i in range(P)]) def compute_Knn(X: tf.Tensor) -> tf.Tensor: return tf.stack([k(X[:, i * D_lat : (i + 1) * D_lat], full_cov=False) for i in range(P)]) Kmm = tf.stack([k(Z) for Z in Zs]) # L x M x M Kmn = tf.stack([compute_Kmn(Z, X) for Z in Zs]) # L x P x M x N Kmn = tf.transpose(Kmn, [2, 0, 3, 1]) # -> M x L x N x P Knn = tf.transpose(compute_Knn(X)) # N x P q_mu = tf.convert_to_tensor(np.zeros((M, L))) q_sqrt = tf.convert_to_tensor(np.stack([np.eye(M) for _ in range(L)])) tf.debugging.assert_shapes( [ (Kmm, ["L", "M", "M"]), (Kmn, ["M", "L", "N", "P"]), (Knn, ["N", "P"]), (q_mu, ["M", "L"]), (q_sqrt, ["L", "M", "M"]), ] ) _, _ = independent_interdomain_conditional( Kmn, Kmm, Knn, q_mu, q_sqrt=q_sqrt, full_cov=False, full_output_cov=False ) def test_independent_interdomain_conditional_whiten(whiten: bool) -> None: """ This test checks the effect of the `white` flag, which changes the projection matrix `A`. The impact of the flag on the value of `A` can be easily verified by its effect on the predicted mean. While the predicted covariance is also a function of `A` this test does not inspect that value. """ N, P = Data.N, Data.P Lm = np.random.randn(1, 1, 1).astype(np.float32) ** 2 Kmm = Lm * Lm + default_jitter() Kmn = tf.ones((1, 1, N, P)) Knn = tf.ones((N, P)) f = np.random.randn(1, 1).astype(np.float32) mean, _ = independent_interdomain_conditional( Kmn, Kmm, Knn, f, white=whiten, ) if whiten: expected_mean = (f * Kmn) / Lm else: expected_mean = (f * Kmn) / Kmm np.testing.assert_allclose(mean, expected_mean[0][0], rtol=1e-2)
GPflow/GPflow
tests/gpflow/conditionals/test_multioutput.py
Python
apache-2.0
31,704
from __future__ import unicode_literals import pygst pygst.require('0.10') import gst # noqa from mopidy.audio import output import logging logger = logging.getLogger(__name__) # This variable is a global that is set by the Backend # during initialization from the extension properties encoder = 'identity' class RtpSink(gst.Bin): def __init__(self): super(RtpSink, self).__init__() # These elements are 'always on' even if nobody is # subscribed to listen. It streamlines the process # of adding/removing listeners. queue = gst.element_factory_make('queue') rate = gst.element_factory_make('audiorate') enc = gst.element_factory_make(encoder) pay = gst.element_factory_make('rtpgstpay') # Re-use of the audio output bin which handles # dynamic element addition/removal nicely self.tee = output.AudioOutput() self.add_many(queue, rate, enc, pay, self.tee) gst.element_link_many(queue, rate, enc, pay, self.tee) pad = queue.get_pad('sink') ghost_pad = gst.GhostPad('sink', pad) self.add_pad(ghost_pad) def add(self, host, port): b = gst.Bin() queue = gst.element_factory_make('queue') udpsink = gst.element_factory_make('udpsink') udpsink.set_property('host', host) udpsink.set_property('port', port) # Both async and sync must be true to avoid seek # timestamp sync problems udpsink.set_property('sync', True) udpsink.set_property('async', True) b.add_many(queue, udpsink) gst.element_link_many(queue, udpsink) pad = queue.get_pad('sink') ghost_pad = gst.GhostPad('sink', pad) b.add_pad(ghost_pad) ident = str(port) + '@' + host self.tee.add_sink(ident, b) def remove(self, host, port): ident = str(port) + '@' + host self.tee.remove_sink(ident)
liamw9534/mopidy-rtp
mopidy_rtp/sink.py
Python
apache-2.0
1,944
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016 MIT Probabilistic Computing Project # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def load_docstrings(module): module.State.__init__.__func__.__doc__ = """ Construct a State. Parameters ---------- X : np.ndarray Data matrix, each row is an observation and each column a variable. outputs : list<int>, optional Unique non-negative ID for each column in X, and used to refer to the column for all future queries. Defaults to range(0, X.shape[1]) inputs : list<int>, optional Currently unsupported. cctypes : list<str> Data type of each column, see `utils.config` for valid cctypes. distargs : list<dict>, optional See the documentation for each DistributionGpm for its distargs. Zv : dict(int:int), optional Assignment of output columns to views, where Zv[k] is the view assignment for column k. Defaults to sampling from CRP. Zrv : dict(int:list<int>), optional Assignment of rows to clusters in each view, where Zrv[k] is the Zr for View k. If specified, then Zv must also be specified. Defaults to sampling from CRP. Cd : list(list<int>), optional List of marginal dependence constraints for columns. Each element in the list is a list of columns which are to be in the same view. Each column can only be in one such list i.e. [[1,2,5],[1,5]] is not allowed. Ci : list(tuple<int>), optional List of marginal independence constraints for columns. Each element in the list is a 2-tuple of columns that must be independent, i.e. [(1,2),(1,3)]. Rd : dict(int:Cd), optional Dictionary of dependence constraints for rows, wrt. Each entry is (col: Cd), where col is a column number and Cd is a list of dependence constraints for the rows with respect to that column (see doc for Cd). Ri : dict(int:Cid), optional Dictionary of independence constraints for rows, wrt. Each entry is (col: Ci), where col is a column number and Ci is a list of independence constraints for the rows with respect to that column (see doc for Ci). iterations : dict(str:int), optional Metadata holding the number of iters each kernel has been run. loom_path: str, optional Path to a loom project compatible with this State. rng : np.random.RandomState, optional. Source of entropy. """ # -------------------------------------------------------------------------- # Observe module.State.incorporate_dim.__func__.__doc__ = """ Incorporate a new Dim into this State. Parameters ---------- T : list Data with length self.n_rows(). outputs : list[int] Identity of the variable modeled by this dim, must be non-negative and cannot collide with State.outputs. Only univariate outputs currently supported, so the list be a singleton. cctype, distargs: refer to State.__init__ v : int, optional Index of the view to assign the data. If 0 <= v < len(state.views) then insert into an existing View. If v = len(state.views) then singleton view will be created with a partition from the CRP prior. If unspecified, will be sampled. """ # -------------------------------------------------------------------------- # Schema updates. module.State.update_cctype.__func__.__doc__ = """ Update the distribution type of self.dims[col] to cctype. Parameters ---------- col : int Index of column to update. cctype, distargs: refer to State.__init__ """ # -------------------------------------------------------------------------- # Compositions module.State.compose_cgpm.__func__.__doc__ = """ Compose a CGPM with this object. Parameters ---------- cgpm : cgpm.cgpm.CGpm object The `CGpm` object to compose. Returns ------- token : int A unique token representing the composed cgpm, to be used by `State.decompose_cgpm`. """ module.State.decompose_cgpm.__func__.__doc__ = """ Decompose a previously composed CGPM. Parameters ---------- token : int The unique token representing the composed cgpm, returned from `State.compose_cgpm`. """ # -------------------------------------------------------------------------- # logpdf_score module.State.logpdf_score.__func__.__doc__ = """ Compute joint density of all latents and the incorporated data. Returns ------- logpdf_score : float The log score is P(X,Z) = P(X|Z)P(Z) where X is the observed data and Z is the entirety of the latent state in the CGPM. """ # -------------------------------------------------------------------------- # Mutual information module.State.mutual_information.__func__.__doc__ = """ Computes the mutual information MI(col0:col1|constraints). Mutual information with constraints can be of the form: - MI(X:Y|Z=z): CMI at a fixed conditioning value. - MI(X:Y|Z): expected CMI E_Z[MI(X:Y|Z)] under Z. - MI(X:Y|Z, W=w): expected CMI E_Z[MI(X:Y|Z,W=w)] under Z. This function supports all three forms. The CMI is computed under the posterior predictive joint distributions. Parameters ---------- col0, col1 : list<int> Columns to comptue MI. If all columns in `col0` are equivalent to columns in `col` then entropy is returned, otherwise they must be disjoint and the CMI is returned constraints : list(tuple), optional A list of pairs (col, val) of observed values to condition on. If `val` is None, then `col` is marginalized over. T : int, optional. Number of samples to use in the outer (marginalization) estimator. N : int, optional. Number of samples to use in the inner Monte Carlo estimator. Returns ------- mi : float A point estimate of the mutual information. Examples ------- # Compute MI(X:Y) >>> State.mutual_information(col_x, col_y) # Compute MI(X:Y|Z=1) >>> State.mutual_information(col_x, col_y, {col_z: 1}) # Compute MI(X:Y|W) >>> State.mutual_information(col_x, col_y, {col_w:None}) # Compute MI(X:Y|Z=1, W) >>> State.mutual_information(col_x, col_y, {col_z: 1, col_w:None}) """ # -------------------------------------------------------------------------- # Inference module.State.transition.__func__.__doc__ = """ Run targeted inference kernels. Parameters ---------- N : int, optional Number of iterations to transition. Default 1. S : float, optional Number of seconds to transition. If both N and S set then min used. kernels : list<{'alpha', 'view_alphas', 'column_params', 'column_hypers' 'rows', 'columns'}>, optional List of inference kernels to run in this transition. Default all. views, rows, cols : list<int>, optional View, row and column numbers to apply the kernels. Default all. checkpoint : int, optional Number of transitions between recording inference diagnostics from the latent state (such as logscore and row/column partitions). Defaults to no checkpointing. progress : boolean, optional Show a progress bar for number of target iterations or elapsed time. """
probcomp/cgpm
src/crosscat/statedoc.py
Python
apache-2.0
8,646
import os import threading import datetime import cloudstorage as gcs from google.appengine.api import app_identity class FileServer(): def __init__(self): bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name()) self.bucket = '/' + bucket_name def GetFileForPath(self, path): try: full_path = self.bucket + '/' + path file_obj = gcs.open(full_path) data = file_obj.read() file_obj.close() return data except gcs.NotFoundError: return None
benmorss/excalibur
cloudserver.py
Python
apache-2.0
563
# Copyright (c) 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import range from murano.dsl import helpers class Object(object): def __init__(self, __name, **kwargs): self.data = { '?': { 'type': __name, 'id': helpers.generate_id() } } self.data.update(kwargs) @property def id(self): return self.data['?']['id'] @property def type_name(self): return self.data['?']['type'] class Attribute(object): def __init__(self, obj, key, value): self._value = value self._key = key self._obj = obj @property def obj(self): return self._obj @property def key(self): return self._key @property def value(self): return self._value class Ref(object): def __init__(self, obj): self._id = obj.id @property def id(self): return self._id def build_model(root): if isinstance(root, dict): for key, value in root.items(): root[key] = build_model(value) elif isinstance(root, list): for i in range(len(root)): root[i] = build_model(root[i]) elif isinstance(root, Object): return build_model(root.data) elif isinstance(root, Ref): return root.id elif isinstance(root, Attribute): return [root.obj.id, root.obj.type_name, root.key, root.value] return root
olivierlemasle/murano
murano/tests/unit/dsl/foundation/object_model.py
Python
apache-2.0
2,019
#!/astro/apps/pkg/python/bin/python import pyfits import SDSSfits import numpy from tools import create_fits import os def main(OUT_DIR = "/astro/net/scratch1/vanderplas/SDSS_GAL_RESTFRAME/", DIR_ROOT = "/astro/net/scratch1/sdssspec/spectro/1d_26/*/1d", LINES_FILE = "LINES_SHORT.TXT", z_min = 0.0, #zmax is set such that SII lines will z_max = 0.36, # fall in range of 3830 to 9200 angstroms rebin_coeff0 = 3.583, # rebin parameters give a wavelength rebin_coeff1 = 0.0002464, # range from 3830A to 9200A rebin_length = 1000, remove_sky_absorption = True, normalize = True): LINES = [] KEYS = ['TARGET','Z','Z_ERR','SPEC_CLN','MAG_G','MAG_R','MAG_I','N_BAD_PIX'] if LINES_FILE is not None: for line in open(LINES_FILE): line = line.split() if len(line)==0:continue W = float(line[0]) if W<3000 or W>7000:continue LINES.append('%.2f'%W) for info in ('flux','dflux','width','dwidth','nsigma'): KEYS.append('%.2f_%s' % (W,info) ) for SET in os.listdir(DIR_ROOT.split('*')[0]): if not SET.isdigit(): continue DIR = DIR_ROOT.replace('*',SET) if not os.path.exists(DIR): continue OUT_FILE = os.path.join(OUT_DIR,SET+'.dat') print 'writing %s' % os.path.join(OUT_DIR,SET+'.dat') col_dict = dict([(KEY,[]) for KEY in KEYS]) spec_list = [] NUMS = [] for F in os.listdir(DIR): if not F.endswith('.fit'): continue num = int( F.strip('.fit').split('-')[-1] ) if num in NUMS: #print " - already measured: skipping %s" % F continue #open hdu file and glean necessary info SPEC = SDSSfits.SDSSfits(os.path.join(DIR,F),LINES) if SPEC.D['SPEC_CLN'] not in (1,2,3,4): continue if SPEC.z<z_min: #print " - negative z: skipping %s" % F continue if SPEC.z>z_max: #print " - z>z_max: skipping %s" % F continue if SPEC.numlines == 0: #print " - no line measurements: skipping %s" % F continue if remove_sky_absorption: #cover up strong oxygen absorption SPEC.remove_O_lines() #move to restframe, rebin, and normalize SPEC.move_to_restframe() try: SPEC = SPEC.rebin(rebin_coeff0,rebin_coeff1,rebin_length) except: print " rebin failed. Skipping %s" % F continue if normalize: try: SPEC.normalize() except: print " normalize failed. Skipping %s" % F continue if min(SPEC.spectrum) < -4*max(SPEC.spectrum): print " goes too far negative. Skipping %s" % F NUMS.append(num) spec_list.append(SPEC.spectrum.tolist()) for KEY in KEYS: col_dict[KEY].append(SPEC.D[KEY]) del SPEC if os.path.exists(OUT_FILE): os.system('rm %s' % OUT_FILE) col_dict['coeff0'] = rebin_coeff0 col_dict['coeff1'] = rebin_coeff1 create_fits(OUT_FILE,numpy.asarray( spec_list ),**col_dict) print " - wrote %i spectra" % len(NUMS) if __name__ == '__main__': main(OUT_DIR = "/astro/net/scratch1/vanderplas/SDSS_GAL_RESTFRAME/", DIR_ROOT = "/astro/net/scratch1/sdssspec/spectro/1d_26/*/1d", #LINES_FILE = "LINES_SHORT.TXT", LINES_FILE = None, z_min = 0.0, #zmax is set such that SII lines will z_max = 0.36, # fall in range of 3830 to 9200 angstroms rebin_coeff0 = 3.583, # rebin parameters give a wavelength rebin_coeff1 = 0.0002464, # range from 3830A to 9200A rebin_length = 1000, remove_sky_absorption = False, normalize = False)
excelly/xpy-ml
sdss/jake_lib/make_condensed_fits.py
Python
apache-2.0
4,199
# -*- coding: utf-8 -*- ''' 部分通用的数据结构 container.py : NetInterface : 描述容器的一个虚拟网卡 -name : 虚拟网卡名称 -hostVeth : 虚拟网卡对应的主机veth名称 -ip : IP地址 -mac : mac地址 -vethMac : 主机veth的mac地址 + NetInterface::create : 创建一个虚拟网卡,返回NetInterface对象 container : 目标容器 vName : 容器端peer名字 h_vName : 主机端peer的名字 Container : 描述一个容器的数据结构,可持久化存储 -host : 容器所属的主机 -pid : 主机中容器的pid -id : docker daemon 赋予容器的ID -ifaces [list] : 容器的虚拟网卡列表 ,为Interface对象集合 -netns : 容器的网络命名空间,为NetworkNamespace对象实例 -image : 创建容器所用的镜像名称 -dataDirectory : 容器数据存储路径 -createTime : 创建时间 -state : 当前运行状态 -belongsTo : 所属用户 +attachToNetworkNamespace : 加入一个命名空间 netns : 要加入的命名空间对象 +detachNetworkNamespace : 离开命名空间 netns : 要离开的命名空间对象 net.py : NetworkNamespace : 描述网络命名空间的数据结构 -uid : 唯一ID,初始化时通过uuid函数生成 -addrs [list] : 网络命名空间所属IP,可谓多个,为cidr地址 -containers : 加入网络的容器 -initHost : 初始化该命名空间时,该命名空间所属的主机 -createTime : 创建时间 -belongsTo : 所属用户 utils.py: Host : 描述主机的数据结构 -mac : mac地址 -transportIp : 数据传输所用IP -containers : 主机所包行的容器,为Container对象列表 -proxys : 主机上的容器创建代理代理列表 +getConcreteProxy :获取特定的容器创建代理类型 ProxyClass : 代理类型 Switch : 描述主机上安装着的虚拟交换机 -host : 所属主机 -portsToContainers : 交换机端口和容器的对应关系 -portsInfo : 每个端口的相关信息 -bridgeName : 网桥名称 exceptions.py : ContainerCreatorTypeInvalidError : 容器创建器与容器创建代理类型不匹配 tools.py : '''
onlysheep5200/NetnsEx
lib/__init__.py
Python
apache-2.0
2,175
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import unittest import apache_beam as beam from apache_beam.runners.portability import fn_api_runner from apache_beam.runners.portability import maptask_executor_runner_test from apache_beam.testing.util import assert_that from apache_beam.testing.util import equal_to class FnApiRunnerTest( maptask_executor_runner_test.MapTaskExecutorRunnerTest): def create_pipeline(self): return beam.Pipeline( runner=fn_api_runner.FnApiRunner()) def test_combine_per_key(self): # TODO(BEAM-1348): Enable once Partial GBK is supported in fn API. pass def test_combine_per_key(self): # TODO(BEAM-1348): Enable once Partial GBK is supported in fn API. pass def test_pardo_side_inputs(self): # TODO(BEAM-1348): Enable once side inputs are supported in fn API. pass def test_pardo_unfusable_side_inputs(self): # TODO(BEAM-1348): Enable once side inputs are supported in fn API. pass def test_assert_that(self): # TODO: figure out a way for fn_api_runner to parse and raise the # underlying exception. with self.assertRaisesRegexp(Exception, 'Failed assert'): with self.create_pipeline() as p: assert_that(p | beam.Create(['a', 'b']), equal_to(['a'])) # Inherits all tests from maptask_executor_runner.MapTaskExecutorRunner if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) unittest.main()
eljefe6a/incubator-beam
sdks/python/apache_beam/runners/portability/fn_api_runner_test.py
Python
apache-2.0
2,196
# Copyright (c) 2016. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division, absolute_import from .effect_collection import EffectCollection from .effect_ordering import ( effect_priority, top_priority_effect, ) from .effect_prediction import ( predict_variant_effects, predict_variant_effect_on_transcript, predict_variant_effect_on_transcript_or_failure, ) from .effect_classes import ( MutationEffect, TranscriptMutationEffect, NonsilentCodingMutation, Failure, IncompleteTranscript, Intergenic, Intragenic, NoncodingTranscript, Intronic, ThreePrimeUTR, FivePrimeUTR, Silent, Substitution, Insertion, Deletion, ComplexSubstitution, AlternateStartCodon, IntronicSpliceSite, ExonicSpliceSite, StopLoss, SpliceDonor, SpliceAcceptor, PrematureStop, FrameShiftTruncation, StartLoss, FrameShift, ExonLoss, ) __all__ = [ "EffectCollection", # effect ordering "effect_priority", "top_priority_effect", # prediction functions "predict_variant_effects", "predict_variant_effect_on_transcript", "predict_variant_effect_on_transcript_or_failure", # effect classes "MutationEffect", "TranscriptMutationEffect", "Failure", "IncompleteTranscript", "Intergenic", "Intragenic", "IncompleteTranscript", "NoncodingTranscript", "ThreePrimeUTR", "FivePrimeUTR", "Intronic", "Silent", "NonsilentCodingMutation", "Substitution", "Insertion", "Deletion", "ComplexSubstitution", "AlternateStartCodon", "IntronicSpliceSite", "ExonicSpliceSite", "StopLoss", "SpliceDonor", "SpliceAcceptor", "PrematureStop", "FrameShiftTruncation", "StartLoss", "FrameShift", "ExonLoss", ]
hammerlab/varcode
varcode/effects/__init__.py
Python
apache-2.0
2,401