filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_20836
|
import asyncio
from dataclasses import dataclass, field
from dateutil import parser
from enum import Enum
import json
from typing import Dict, List
from typing import Optional
from typing import NoReturn
from functools import cached_property
from pydantic import BaseSettings
from rich.progress import (
BarColumn,
DownloadColumn,
TextColumn,
TransferSpeedColumn,
TimeRemainingColumn,
Progress,
Task,
)
from ..settings import get_settings
from ..ws_client import WebsocketClient
from ..console import console
class StatusColors:
starting = "gold3"
initialized = ""
finished = "green"
postprocessing = "royal_blue1"
crashed = "red"
ended = "dodger_blue1"
class MessageTypes(Enum):
initial = 'active-simulations'
progress = 'progress'
status = 'status'
new = "active-simulation"
progress = Progress(
TextColumn("[bold blue]ID {task.fields[sim_id]} | {task.fields[status]}", justify="left"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
TextColumn("[bold cyan]{task.fields[name]}", justify="left"),
"•",
TextColumn("[bold]{task.fields[user]}", justify="left"),
"•",
TextColumn("[bold dark_goldenrod]{task.fields[organisation]}", justify="left"),
"•",
TimeRemainingColumn(),
console=console,
auto_refresh=False,
# transient=True
)
@dataclass
class ProgressInfo:
# defaults are used for the header,
# instance overrides these values
sim_id: str = "ID"
progress: str = "PROGRESS"
name: str = "NAME"
user: str = "USER"
organisation: str = "ORGANISATION"
time_reamining: str = "TIME REMAINING"
@classmethod
def header(cls):
return f"Currently running simulations [{cls.sim_id} • {cls.progress} • {cls.name} • {cls.user} • {cls.organisation} • {cls.time_reamining}]" # noqa
@dataclass
class ProgressTask(ProgressInfo):
"""
every progress bar is managed through a rich.progress.Task.
This class holds all task related info, plus the actual
rich.progress.Task instance called 'task'
"""
description: str = "running"
start: bool = field(init=False)
total: int = 100
status: str = ""
task: Task = None
is_live: bool = False
_kwarg_fields = {
"description",
"sim_id",
"name",
"user",
"organisation",
"start",
"total",
"status",
}
def __post_init__(self):
self.start = True if self.progress else False
@property
def kwargs(self) -> Dict:
d = {
field_name: getattr(self, field_name)
for field_name in self._kwarg_fields
}
return {k: v for k, v in d.items() if not k.startswith("_")}
class ActiveSimulations:
STATUS_COLORS = {
"starting": "gold3",
"initialized": "",
"finished": "green",
"postprocessing": "royal_blue1",
"crashed": "red",
"ended": "dodger_blue1",
}
def __init__(self, env_file: str):
self.env_file = env_file
# local cache {simulation_id: ProgressTask}
self.tasks = {}
@cached_property
def websocket_client(self):
settings = get_settings(self.env_file)
return WebsocketClient(
host=settings.host_name,
token=settings.token,
proto=settings.proto,
api_version=settings.api_version
)
async def add_progress_bar(self, progress_task: ProgressTask, progress: Progress) -> None:
"""creates a new progress task and adds a ProgressTask instance to the internal registry"""
task_id = progress.add_task(**progress_task.kwargs)
if progress_task.progress:
progress.update(task_id, advance=progress_task.progress)
task = [t for t in progress.tasks if t.id == task_id][0]
progress_task.task = task
self.tasks[int(progress_task.sim_id)] = progress_task
async def advance_progress(self, data: Dict, progress: Progress) -> None:
"""advances the progress bar for the given tasks by the current simulation progress"""
simulation_id = data["data"]["simulation_id"]
sim_progress = data["data"]["progress"]
progress_task = self.tasks.get(int(simulation_id))
if not progress_task.task.started:
progress.start_task(progress_task.task.id)
# progress.console.print(f">>>>>>>>>> {sim_progress}")
if int(sim_progress) <= progress_task.task.percentage:
return
advance_by = int(sim_progress) - int(progress_task.task.percentage)
progress.update(progress_task.task.id, advance=advance_by)
# auto_refresh is False so do this manually
progress.refresh()
@staticmethod
def is_live_finished(status, progress_task: ProgressTask) -> bool:
return all(
(status == 'crashed',
progress_task.is_live,
progress_task.status in ("ended", "postprocessing"))
)
async def update_status(self, data: Dict, progress: Progress):
status = data["data"]["status"]
simulation_id = data["data"]["simulation_id"]
progress_task = self.tasks.get(int(simulation_id))
if self.is_live_finished(status, progress_task):
status = "finished"
status_txt = status
color = self.STATUS_COLORS.get(status)
if color:
status_txt = f"[bold {color}]{status_txt}[/bold {color}]"
update_kwargs = {"status": status_txt}
advance_by = 100 if status in ("finished", "ended", "postprocessing") else None
if advance_by and not progress_task.task.started:
progress.start_task(progress_task.task.id)
update_kwargs.update({"advance": advance_by})
progress.update(progress_task.task.id, **update_kwargs)
# auto_refresh is False so do this manually
progress.refresh()
progress_task.status = status
async def run_monitor(self) -> NoReturn:
asyncio.ensure_future(self.websocket_client.listen('active-simulations/'))
q = self.websocket_client.get_queue()
try:
await asyncio.wait_for(
self.websocket_client.is_connected(), timeout=5)
except asyncio.TimeoutError:
console.print("Could not establish WS connection", style="error")
return
with progress:
progress.console.rule(ProgressInfo.header())
while True:
data = await q.get()
message_type = await self.get_msg_type(data)
if not message_type:
continue
if message_type == MessageTypes.initial:
progress_tasks = await self.get_initial_progress_tasks(data)
for progress_task in progress_tasks:
await self.add_progress_bar(progress_task, progress)
elif message_type == MessageTypes.progress:
await self.advance_progress(data, progress)
elif message_type == MessageTypes.status:
await self.update_status(data, progress)
elif message_type == MessageTypes.new:
progress_task = await self.get_new_progress_task(data)
await self.add_progress_bar(progress_task, progress)
@staticmethod
async def get_msg_type(data) -> Optional[MessageTypes]:
try:
return MessageTypes(data["type"])
except (KeyError, ValueError):
return
@staticmethod
async def get_new_progress_task(data):
"""
message from "active-simulations" URI
type declaration "active-simulation"
"""
data = data["data"]
simulation_id = list(data.keys())[0]
data_json = list(data.values())[0]
return await ActiveSimulations._get_progress_task(simulation_id, data_json)
@staticmethod
async def _get_progress_task(simulation_id, simulation_details) -> ProgressTask:
details = json.loads(simulation_details)
name = details["name"]
is_live = int(details["duration"]) == 3153600000
if is_live:
dt = parser.parse(details["date_created"])
name = f"{name} [bold red][LIVE][/bold red]" # since {dt.strftime('%Y-%m-%d %H:%M')}]"
sim_progress = details["progress"]
return ProgressTask(
sim_id=simulation_id,
name=name,
user=details["user_name"],
organisation=details["organisation_name"],
progress=sim_progress,
status=details["status"],
is_live=is_live
)
@staticmethod
async def get_initial_progress_tasks(data: Dict) -> List[ProgressTask]:
progress_tasks = []
for simulation_id, json_details in data["data"].items():
task_info = await ActiveSimulations._get_progress_task(
simulation_id, json_details
)
progress_tasks.append(task_info)
return progress_tasks
|
the-stack_106_20837
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A federation sender that forwards things to be sent across replication to
a worker process.
It assumes there is a single worker process feeding off of it.
Each row in the replication stream consists of a type and some json, where the
types indicate whether they are presence, or edus, etc.
Ephemeral or non-event data are queued up in-memory. When the worker requests
updates since a particular point, all in-memory data since before that point is
dropped. We also expire things in the queue after 5 minutes, to ensure that a
dead worker doesn't cause the queues to grow limitlessly.
Events are replicated via a separate events stream.
"""
import logging
from collections import namedtuple
from six import iteritems
from sortedcontainers import SortedDict
from synapse.metrics import LaterGauge
from synapse.storage.presence import UserPresenceState
from synapse.util.metrics import Measure
from .units import Edu
logger = logging.getLogger(__name__)
class FederationRemoteSendQueue(object):
"""A drop in replacement for FederationSender"""
def __init__(self, hs):
self.server_name = hs.hostname
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.presence_map = {} # Pending presence map user_id -> UserPresenceState
self.presence_changed = SortedDict() # Stream position -> user_id
self.keyed_edu = {} # (destination, key) -> EDU
self.keyed_edu_changed = SortedDict() # stream position -> (destination, key)
self.edus = SortedDict() # stream position -> Edu
self.device_messages = SortedDict() # stream position -> destination
self.pos = 1
self.pos_time = SortedDict()
# EVERYTHING IS SAD. In particular, python only makes new scopes when
# we make a new function, so we need to make a new function so the inner
# lambda binds to the queue rather than to the name of the queue which
# changes. ARGH.
def register(name, queue):
LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,),
"", [], lambda: len(queue))
for queue_name in [
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
"edus", "device_messages", "pos_time",
]:
register(queue_name, getattr(self, queue_name))
self.clock.looping_call(self._clear_queue, 30 * 1000)
def _next_pos(self):
pos = self.pos
self.pos += 1
self.pos_time[self.clock.time_msec()] = pos
return pos
def _clear_queue(self):
"""Clear the queues for anything older than N minutes"""
FIVE_MINUTES_AGO = 5 * 60 * 1000
now = self.clock.time_msec()
keys = self.pos_time.keys()
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
if not keys[:time]:
return
position_to_delete = max(keys[:time])
for key in keys[:time]:
del self.pos_time[key]
self._clear_queue_before_pos(position_to_delete)
def _clear_queue_before_pos(self, position_to_delete):
"""Clear all the queues from before a given position"""
with Measure(self.clock, "send_queue._clear"):
# Delete things out of presence maps
keys = self.presence_changed.keys()
i = self.presence_changed.bisect_left(position_to_delete)
for key in keys[:i]:
del self.presence_changed[key]
user_ids = set(
user_id
for uids in self.presence_changed.values()
for user_id in uids
)
to_del = [
user_id for user_id in self.presence_map if user_id not in user_ids
]
for user_id in to_del:
del self.presence_map[user_id]
# Delete things out of keyed edus
keys = self.keyed_edu_changed.keys()
i = self.keyed_edu_changed.bisect_left(position_to_delete)
for key in keys[:i]:
del self.keyed_edu_changed[key]
live_keys = set()
for edu_key in self.keyed_edu_changed.values():
live_keys.add(edu_key)
to_del = [edu_key for edu_key in self.keyed_edu if edu_key not in live_keys]
for edu_key in to_del:
del self.keyed_edu[edu_key]
# Delete things out of edu map
keys = self.edus.keys()
i = self.edus.bisect_left(position_to_delete)
for key in keys[:i]:
del self.edus[key]
# Delete things out of device map
keys = self.device_messages.keys()
i = self.device_messages.bisect_left(position_to_delete)
for key in keys[:i]:
del self.device_messages[key]
def notify_new_events(self, current_id):
"""As per FederationSender"""
# We don't need to replicate this as it gets sent down a different
# stream.
pass
def build_and_send_edu(self, destination, edu_type, content, key=None):
"""As per FederationSender"""
if destination == self.server_name:
logger.info("Not sending EDU to ourselves")
return
pos = self._next_pos()
edu = Edu(
origin=self.server_name,
destination=destination,
edu_type=edu_type,
content=content,
)
if key:
assert isinstance(key, tuple)
self.keyed_edu[(destination, key)] = edu
self.keyed_edu_changed[pos] = (destination, key)
else:
self.edus[pos] = edu
self.notifier.on_new_replication_data()
def send_read_receipt(self, receipt):
"""As per FederationSender
Args:
receipt (synapse.types.ReadReceipt):
"""
# nothing to do here: the replication listener will handle it.
pass
def send_presence(self, states):
"""As per FederationSender
Args:
states (list(UserPresenceState))
"""
pos = self._next_pos()
# We only want to send presence for our own users, so lets always just
# filter here just in case.
local_states = list(filter(lambda s: self.is_mine_id(s.user_id), states))
self.presence_map.update({state.user_id: state for state in local_states})
self.presence_changed[pos] = [state.user_id for state in local_states]
self.notifier.on_new_replication_data()
def send_device_messages(self, destination):
"""As per FederationSender"""
pos = self._next_pos()
self.device_messages[pos] = destination
self.notifier.on_new_replication_data()
def get_current_token(self):
return self.pos - 1
def federation_ack(self, token):
self._clear_queue_before_pos(token)
def get_replication_rows(self, from_token, to_token, limit, federation_ack=None):
"""Get rows to be sent over federation between the two tokens
Args:
from_token (int)
to_token(int)
limit (int)
federation_ack (int): Optional. The position where the worker is
explicitly acknowledged it has handled. Allows us to drop
data from before that point
"""
# TODO: Handle limit.
# To handle restarts where we wrap around
if from_token > self.pos:
from_token = -1
# list of tuple(int, BaseFederationRow), where the first is the position
# of the federation stream.
rows = []
# There should be only one reader, so lets delete everything its
# acknowledged its seen.
if federation_ack:
self._clear_queue_before_pos(federation_ack)
# Fetch changed presence
i = self.presence_changed.bisect_right(from_token)
j = self.presence_changed.bisect_right(to_token) + 1
dest_user_ids = [
(pos, user_id)
for pos, user_id_list in self.presence_changed.items()[i:j]
for user_id in user_id_list
]
for (key, user_id) in dest_user_ids:
rows.append((key, PresenceRow(
state=self.presence_map[user_id],
)))
# Fetch changes keyed edus
i = self.keyed_edu_changed.bisect_right(from_token)
j = self.keyed_edu_changed.bisect_right(to_token) + 1
# We purposefully clobber based on the key here, python dict comprehensions
# always use the last value, so this will correctly point to the last
# stream position.
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
for ((destination, edu_key), pos) in iteritems(keyed_edus):
rows.append((pos, KeyedEduRow(
key=edu_key,
edu=self.keyed_edu[(destination, edu_key)],
)))
# Fetch changed edus
i = self.edus.bisect_right(from_token)
j = self.edus.bisect_right(to_token) + 1
edus = self.edus.items()[i:j]
for (pos, edu) in edus:
rows.append((pos, EduRow(edu)))
# Fetch changed device messages
i = self.device_messages.bisect_right(from_token)
j = self.device_messages.bisect_right(to_token) + 1
device_messages = {v: k for k, v in self.device_messages.items()[i:j]}
for (destination, pos) in iteritems(device_messages):
rows.append((pos, DeviceRow(
destination=destination,
)))
# Sort rows based on pos
rows.sort()
return [(pos, row.TypeId, row.to_data()) for pos, row in rows]
class BaseFederationRow(object):
"""Base class for rows to be sent in the federation stream.
Specifies how to identify, serialize and deserialize the different types.
"""
TypeId = None # Unique string that ids the type. Must be overriden in sub classes.
@staticmethod
def from_data(data):
"""Parse the data from the federation stream into a row.
Args:
data: The value of ``data`` from FederationStreamRow.data, type
depends on the type of stream
"""
raise NotImplementedError()
def to_data(self):
"""Serialize this row to be sent over the federation stream.
Returns:
The value to be sent in FederationStreamRow.data. The type depends
on the type of stream.
"""
raise NotImplementedError()
def add_to_buffer(self, buff):
"""Add this row to the appropriate field in the buffer ready for this
to be sent over federation.
We use a buffer so that we can batch up events that have come in at
the same time and send them all at once.
Args:
buff (BufferedToSend)
"""
raise NotImplementedError()
class PresenceRow(BaseFederationRow, namedtuple("PresenceRow", (
"state", # UserPresenceState
))):
TypeId = "p"
@staticmethod
def from_data(data):
return PresenceRow(
state=UserPresenceState.from_dict(data)
)
def to_data(self):
return self.state.as_dict()
def add_to_buffer(self, buff):
buff.presence.append(self.state)
class KeyedEduRow(BaseFederationRow, namedtuple("KeyedEduRow", (
"key", # tuple(str) - the edu key passed to send_edu
"edu", # Edu
))):
"""Streams EDUs that have an associated key that is ued to clobber. For example,
typing EDUs clobber based on room_id.
"""
TypeId = "k"
@staticmethod
def from_data(data):
return KeyedEduRow(
key=tuple(data["key"]),
edu=Edu(**data["edu"]),
)
def to_data(self):
return {
"key": self.key,
"edu": self.edu.get_internal_dict(),
}
def add_to_buffer(self, buff):
buff.keyed_edus.setdefault(
self.edu.destination, {}
)[self.key] = self.edu
class EduRow(BaseFederationRow, namedtuple("EduRow", (
"edu", # Edu
))):
"""Streams EDUs that don't have keys. See KeyedEduRow
"""
TypeId = "e"
@staticmethod
def from_data(data):
return EduRow(Edu(**data))
def to_data(self):
return self.edu.get_internal_dict()
def add_to_buffer(self, buff):
buff.edus.setdefault(self.edu.destination, []).append(self.edu)
class DeviceRow(BaseFederationRow, namedtuple("DeviceRow", (
"destination", # str
))):
"""Streams the fact that either a) there is pending to device messages for
users on the remote, or b) a local users device has changed and needs to
be sent to the remote.
"""
TypeId = "d"
@staticmethod
def from_data(data):
return DeviceRow(destination=data["destination"])
def to_data(self):
return {"destination": self.destination}
def add_to_buffer(self, buff):
buff.device_destinations.add(self.destination)
TypeToRow = {
Row.TypeId: Row
for Row in (
PresenceRow,
KeyedEduRow,
EduRow,
DeviceRow,
)
}
ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
"presence", # list(UserPresenceState)
"keyed_edus", # dict of destination -> { key -> Edu }
"edus", # dict of destination -> [Edu]
"device_destinations", # set of destinations
))
def process_rows_for_federation(transaction_queue, rows):
"""Parse a list of rows from the federation stream and put them in the
transaction queue ready for sending to the relevant homeservers.
Args:
transaction_queue (FederationSender)
rows (list(synapse.replication.tcp.streams.FederationStreamRow))
"""
# The federation stream contains a bunch of different types of
# rows that need to be handled differently. We parse the rows, put
# them into the appropriate collection and then send them off.
buff = ParsedFederationStreamData(
presence=[],
keyed_edus={},
edus={},
device_destinations=set(),
)
# Parse the rows in the stream and add to the buffer
for row in rows:
if row.type not in TypeToRow:
logger.error("Unrecognized federation row type %r", row.type)
continue
RowType = TypeToRow[row.type]
parsed_row = RowType.from_data(row.data)
parsed_row.add_to_buffer(buff)
if buff.presence:
transaction_queue.send_presence(buff.presence)
for destination, edu_map in iteritems(buff.keyed_edus):
for key, edu in edu_map.items():
transaction_queue.send_edu(edu, key)
for destination, edu_list in iteritems(buff.edus):
for edu in edu_list:
transaction_queue.send_edu(edu, None)
for destination in buff.device_destinations:
transaction_queue.send_device_messages(destination)
|
the-stack_106_20839
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.api import base
from neutron.tests.tempest import config
CONF = config.CONF
class FloatingIPNegativeTestJSON(base.BaseNetworkTest):
@classmethod
def resource_setup(cls):
super(FloatingIPNegativeTestJSON, cls).resource_setup()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
cls.ext_net_id = CONF.network.public_network_id
# Create a network with a subnet connected to a router.
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(data_utils.rand_name('router'))
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.port = cls.create_port(cls.network)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('0b5b8797-6de7-4191-905c-a48b888eb429')
def test_associate_floatingip_with_port_with_floatingip(self):
net = self.create_network()
subnet = self.create_subnet(net)
r = self.create_router('test')
self.create_router_interface(r['id'], subnet['id'])
self.client.update_router(
r['id'],
external_gateway_info={
'network_id': self.ext_net_id})
self.addCleanup(self.client.update_router, self.router['id'],
external_gateway_info={})
port = self.create_port(net)
body1 = self.client.create_floatingip(
floating_network_id=self.ext_net_id)
floating_ip1 = body1['floatingip']
self.addCleanup(self.client.delete_floatingip, floating_ip1['id'])
body2 = self.client.create_floatingip(
floating_network_id=self.ext_net_id)
floating_ip2 = body2['floatingip']
self.addCleanup(self.client.delete_floatingip, floating_ip2['id'])
self.client.update_floatingip(floating_ip1['id'],
port_id=port['id'])
self.assertRaises(lib_exc.Conflict, self.client.update_floatingip,
floating_ip2['id'], port_id=port['id'])
|
the-stack_106_20843
|
"""This module contains the virtual motion glove interface."""
import contextlib
import io
import ipaddress
import struct
import time
from typing import ContextManager, Sequence
import serial
from .data import GloveSample, IMUSample
from .error import GloveConnectionError, GloveError, GlovePacketError, GloveTimeoutError
__all__ = ('Glove')
class Glove:
"""Virtual motion glove (VMG30) interface."""
def __init__(self, port: str = '/dev/ttyUSB0'):
"""Connect to the glove.
Keyword Arguments:
port {str} -- serial device name (default: {'/dev/ttyUSB0'})
"""
try:
self._conn = serial.Serial(
port, baudrate=230400, timeout=2.0, write_timeout=2.0)
self._buffer = b''
self.stop_sampling()
self._label = self._exec(0x11).decode().split('\0', 1)[0]
self._firmware = '{}.{}.{}'.format(*self._exec(0x13))
info = struct.unpack('>BBHIIIBB', self._exec(0x0C))
self._device_type = info[0]
self._device_id = info[2]
self._address = ipaddress.ip_address(info[3])
self._netmask = ipaddress.ip_address(info[4])
self._gateway = ipaddress.ip_address(info[5])
self._dhcp = info[6]
except serial.SerialException as ex:
raise GloveConnectionError(ex.strerror)
except GloveTimeoutError:
raise GloveConnectionError(
f'The glove on "{port}" is not responding, ensure it is turned on.')
@property
def device_id(self) -> int:
"""Device identificator.
Returns:
int -- id
"""
return self._device_id
@device_id.setter
def device_id(self, device_id: int) -> None:
"""Update device identificator.
Arguments:
device_id {int} -- new id
"""
echo = self._exec(0x0D, struct.pack('>H', device_id))
self._device_id = struct.unpack('>H', echo)
@property
def label(self) -> str:
"""Device string identificator.
Returns:
str -- label
"""
return self._label
@label.setter
def label(self, label: str) -> None:
"""Update label.
Arguments:
label {str} -- new label
"""
echo = self._exec(0x11, struct.pack(
'16s', label.ljust(16, '\0').encode()))
self._label = echo.decode().split('\0', 1)[0]
@property
def firmware(self) -> str:
"""Firmware version.
Returns:
str -- version string x.y.z
"""
return self._firmware
@property
def has_wifi_module(self) -> bool:
"""Device has WIFI module.
Returns:
bool -- True if has module
"""
return self._device_type == 0x02
def calibration(self):
"""Start self calibration of the dataglove orientation module.
Yields:
int -- calibration status (from 0 to 100)
"""
self._send(0x31)
stage = 0
while stage != 100:
stage, = self._recv(0x31)
if stage == 255:
raise GloveError('IMU calibration failed')
yield stage
def start_sampling(self, raw=False) -> None:
"""Start data sampling.
Keyword Arguments:
raw {bool} -- return IMU data instead of quaternions (default: {False})
"""
self._send(0x0A, bytes([0x03 if raw else 0x01]))
def stop_sampling(self) -> None:
"""Stop data sampling."""
self._send(0x0A, bytes([0x00]))
time.sleep(0.1)
self._send(0x0B)
time.sleep(0.1)
def next_sample(self) -> GloveSample:
"""Receive next sample."""
data = io.BytesIO(self._recv(0x0A))
sample_type, device_id, clock = struct.unpack('>BHI', data.read(7))
raw = sample_type == 0x03
if raw:
values = struct.unpack('>' + 'h' * 18, data.read(36))
wrist_imu = IMUSample(
angular_velocity=(*[v / 0x8000 * 10 for v in values[0:3]],),
acceleration=(*[v / 0x8000 * 4 for v in values[3:6]],),
magnetic_field=(*values[6:9],))
hand_imu = IMUSample(
angular_velocity=(*[v / 0x8000 * 10 for v in values[9:12]],),
acceleration=(*[v / 0x8000 * 4 for v in values[12:15]],),
magnetic_field=(*values[15:18],))
else:
values = struct.unpack('>' + 'i' * 8, data.read(32))
wrist_quat = (*[v / 0x10000 for v in values[:4]],)
hand_quat = (*[v / 0x10000 for v in values[4:]],)
values = struct.unpack('>' + 'H' * 24, data.read(48))
return GloveSample(
device_id=device_id,
clock=clock / 1000,
wrist_imu=wrist_imu if raw else None,
hand_imu=hand_imu if raw else None,
wrist_quat=wrist_quat if not raw else None,
hand_quat=hand_quat if not raw else None,
pip_joints=(*[v / 1000 for v in values[1:10:2]],),
dip_joints=(*[v / 1000 for v in values[0:10:2]],),
palm_arch=values[10] / 1000,
thumb_cross_over=values[12] / 1000,
pressures=(*[1.0 - v / 999 for v in values[14:19]],),
abductions=(*[v / 1000 for v in values[19:23]],),
battery_charge=values[23] / 1000)
@contextlib.contextmanager
def sampling(self, raw=False) -> ContextManager:
""""Start data sampling.
Keyword Arguments:
raw {bool} -- return IMU data instead of quaternions (default: {False})
"""
def _sample_iterator():
while True:
yield self.next_sample()
self.start_sampling(raw)
yield _sample_iterator()
self.stop_sampling()
def set_vibro_feedback(self, levels: Sequence[float]) -> None:
"""Set vibrotactile feedback.
Arguments:
levels {Sequence[float]} -- vibro intensity on tips of the fingers [0..1]
"""
values = [int(min(max(i, 0.0), 1.0) * 140 + 110) for i in levels]
self._send(0x60, bytes([values] + [0x00]))
def reboot(self) -> None:
"""Reboot the dataglove."""
self._send(0x0E)
def turn_off(self) -> None:
"""Turn off the dataglove."""
self._send(0x40)
self.disconnect()
def disconnect(self) -> None:
"""Close connection."""
self._conn.close()
def __enter__(self):
"""Enter context guard.
Returns:
Glove -- this glove
"""
return self
def __exit__(self, *args, **kwargs):
"""Disconnect the glove at context guard exit."""
self.disconnect()
def __repr__(self):
"""String representation.
Returns:
str -- string representing the glove
"""
return f'Glove(port="{self._conn.name}", id={self.device_id}, label="{self.label}")'
def _read(self, size=1) -> bytes:
if size > len(self._buffer):
self._buffer += self._conn.read(
max(size - len(self._buffer), self._conn.in_waiting))
if size > len(self._buffer):
raise GloveTimeoutError('Read timeout')
data, self._buffer = self._buffer[:size], self._buffer[size:]
return data
def _recv(self, package_type: int) -> bytes:
while True:
package = self._read(1)
if package[0] == 0x24:
package += self._read(2)
package += self._read(package[2] - 2)
crc, end = self._read(2)
if crc != sum(package) % 256 or end != 0x23:
raise GlovePacketError()
if package[1] == package_type:
package_data = package[3:]
return package_data
def _send(self, package_type: int, package_data: bytes = None) -> int:
try:
package = bytes([0x24, package_type])
if package_data is not None:
package += bytes([len(package_data) + 2]) + package_data
else:
package += bytes([2])
crc = sum(package) % 256
return self._conn.write(package + bytes([crc, 0x23]))
except serial.SerialTimeoutException:
raise GloveTimeoutError('Write timeout')
def _exec(self, package_type: int, package_data: bytes = None) -> bytes:
self._send(package_type, package_data)
return self._recv(package_type)
|
the-stack_106_20844
|
from __future__ import unicode_literals
from functools import total_ordering
from operator import attrgetter
from django import VERSION
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import connections, models, router
from django.db.models import signals
from django.db.models.fields.related import (
ManyToManyRel,
OneToOneRel,
RelatedField,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import CommonGenericTaggedItemBase, TaggedItem
from taggit.utils import require_instance_manager
class ExtraJoinRestriction(object):
"""
An extra restriction used for contenttype restriction in joins.
"""
contains_aggregate = False
def __init__(self, alias, col, content_types):
self.alias = alias
self.col = col
self.content_types = content_types
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
if len(self.content_types) == 1:
extra_where = "%s.%s = %%s" % (qn(self.alias), qn(self.col))
else:
extra_where = "%s.%s IN (%s)" % (
qn(self.alias),
qn(self.col),
",".join(["%s"] * len(self.content_types)),
)
return extra_where, self.content_types
def relabel_aliases(self, change_map):
self.alias = change_map.get(self.alias, self.alias)
def clone(self):
return self.__class__(self.alias, self.col, self.content_types[:])
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance, prefetch_cache_name):
super(_TaggableManager, self).__init__()
self.through = through
self.model = model
self.instance = instance
self.prefetch_cache_name = prefetch_cache_name
def is_cached(self, instance):
return self.prefetch_cache_name in instance._prefetched_objects_cache
def get_queryset(self, extra_filters=None):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
kwargs = extra_filters if extra_filters else {}
return self.through.tags_for(self.model, self.instance, **kwargs)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
instance = instances[0]
db = self._db or router.db_for_read(instance.__class__, instance=instance)
fieldname = (
"object_id"
if issubclass(self.through, CommonGenericTaggedItemBase)
else "content_object"
)
fk = self.through._meta.get_field(fieldname)
query = {
"%s__%s__in"
% (self.through.tag_relname(), fk.name): {
obj._get_pk_val() for obj in instances
}
}
join_table = self.through._meta.db_table
source_col = fk.column
connection = connections[db]
qn = connection.ops.quote_name
qs = (
self.get_queryset(query)
.using(db)
.extra(
select={
"_prefetch_related_val": "%s.%s" % (qn(join_table), qn(source_col))
}
)
)
if VERSION < (2, 0):
return (
qs,
attrgetter("_prefetch_related_val"),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name,
)
else:
return (
qs,
attrgetter("_prefetch_related_val"),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name,
False,
)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
db = router.db_for_write(self.through, instance=self.instance)
tag_objs = self._to_tag_model_instances(tags)
new_ids = {t.pk for t in tag_objs}
# NOTE: can we hardcode 'tag_id' here or should the column name be got
# dynamically from somewhere?
vals = (
self.through._default_manager.using(db)
.values_list("tag_id", flat=True)
.filter(**self._lookup_kwargs())
)
new_ids = new_ids - set(vals)
signals.m2m_changed.send(
sender=self.through,
action="pre_add",
instance=self.instance,
reverse=False,
model=self.through.tag_model(),
pk_set=new_ids,
using=db,
)
for tag in tag_objs:
self.through._default_manager.using(db).get_or_create(
tag=tag, **self._lookup_kwargs()
)
signals.m2m_changed.send(
sender=self.through,
action="post_add",
instance=self.instance,
reverse=False,
model=self.through.tag_model(),
pk_set=new_ids,
using=db,
)
def _to_tag_model_instances(self, tags):
"""
Takes an iterable containing either strings, tag objects, or a mixture
of both and returns set of tag objects.
"""
db = router.db_for_write(self.through, instance=self.instance)
str_tags = set()
tag_objs = set()
for t in tags:
if isinstance(t, self.through.tag_model()):
tag_objs.add(t)
elif isinstance(t, six.string_types):
str_tags.add(t)
else:
raise ValueError(
"Cannot add {0} ({1}). Expected {2} or str.".format(
t, type(t), type(self.through.tag_model())
)
)
case_insensitive = getattr(settings, "TAGGIT_CASE_INSENSITIVE", False)
manager = self.through.tag_model()._default_manager.using(db)
if case_insensitive:
# Some databases can do case-insensitive comparison with IN, which
# would be faster, but we can't rely on it or easily detect it.
existing = []
tags_to_create = []
for name in str_tags:
try:
tag = manager.get(name__iexact=name)
existing.append(tag)
except self.through.tag_model().DoesNotExist:
tags_to_create.append(name)
else:
# If str_tags has 0 elements Django actually optimizes that to not
# do a query. Malcolm is very smart.
existing = manager.filter(name__in=str_tags)
tags_to_create = str_tags - {t.name for t in existing}
tag_objs.update(existing)
for new_tag in tags_to_create:
if case_insensitive:
tag, created = manager.get_or_create(
name__iexact=new_tag, defaults={"name": new_tag}
)
else:
tag, created = manager.get_or_create(name=new_tag)
tag_objs.add(tag)
return tag_objs
@require_instance_manager
def names(self):
return self.get_queryset().values_list("name", flat=True)
@require_instance_manager
def slugs(self):
return self.get_queryset().values_list("slug", flat=True)
@require_instance_manager
def set(self, *tags, **kwargs):
"""
Set the object's tags to the given n tags. If the clear kwarg is True
then all existing tags are removed (using `.clear()`) and the new tags
added. Otherwise, only those tags that are not present in the args are
removed and any new tags added.
"""
db = router.db_for_write(self.through, instance=self.instance)
clear = kwargs.pop("clear", False)
if clear:
self.clear()
self.add(*tags)
else:
# make sure we're working with a collection of a uniform type
objs = self._to_tag_model_instances(tags)
# get the existing tag strings
old_tag_strs = set(
self.through._default_manager.using(db)
.filter(**self._lookup_kwargs())
.values_list("tag__name", flat=True)
)
new_objs = []
for obj in objs:
if obj.name in old_tag_strs:
old_tag_strs.remove(obj.name)
else:
new_objs.append(obj)
self.remove(*old_tag_strs)
self.add(*new_objs)
@require_instance_manager
def remove(self, *tags):
if not tags:
return
db = router.db_for_write(self.through, instance=self.instance)
qs = (
self.through._default_manager.using(db)
.filter(**self._lookup_kwargs())
.filter(tag__name__in=tags)
)
old_ids = set(qs.values_list("tag_id", flat=True))
signals.m2m_changed.send(
sender=self.through,
action="pre_remove",
instance=self.instance,
reverse=False,
model=self.through.tag_model(),
pk_set=old_ids,
using=db,
)
qs.delete()
signals.m2m_changed.send(
sender=self.through,
action="post_remove",
instance=self.instance,
reverse=False,
model=self.through.tag_model(),
pk_set=old_ids,
using=db,
)
@require_instance_manager
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
signals.m2m_changed.send(
sender=self.through,
action="pre_clear",
instance=self.instance,
reverse=False,
model=self.through.tag_model(),
pk_set=None,
using=db,
)
self.through._default_manager.using(db).filter(**self._lookup_kwargs()).delete()
signals.m2m_changed.send(
sender=self.through,
action="post_clear",
instance=self.instance,
reverse=False,
model=self.through.tag_model(),
pk_set=None,
using=db,
)
def most_common(self, min_count=None, extra_filters=None):
queryset = (
self.get_queryset(extra_filters)
.annotate(num_times=models.Count(self.through.tag_relname()))
.order_by("-num_times")
)
if min_count:
queryset = queryset.filter(num_times__gte=min_count)
return queryset
@require_instance_manager
def similar_objects(self):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*six.iterkeys(lookup_kwargs))
qs = qs.annotate(n=models.Count("pk"))
qs = qs.exclude(**lookup_kwargs)
qs = qs.filter(tag__in=self.all())
qs = qs.order_by("-n")
# TODO: This all feels like a bit of a hack.
items = {}
if len(lookup_keys) == 1:
# Can we do this without a second query by using a select_related()
# somehow?
f = self.through._meta.get_field(lookup_keys[0])
remote_field = f.remote_field
rel_model = remote_field.model
objs = rel_model._default_manager.filter(
**{
"%s__in"
% remote_field.field_name: [r["content_object"] for r in qs]
}
)
actual_remote_field_name = f.target_field.get_attname()
for obj in objs:
# items[(getattr(obj, remote_field.field_name),)] = obj
items[(getattr(obj, actual_remote_field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result["content_type"], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.items():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[tuple(result[k] for k in lookup_keys)]
obj.similar_tags = result["n"]
results.append(obj)
return results
@total_ordering
class TaggableManager(RelatedField):
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
_related_name_counter = 0
def __init__(
self,
verbose_name=_("Tags"),
help_text=_("A comma-separated list of tags."),
through=None,
blank=False,
related_name=None,
to=None,
manager=_TaggableManager,
):
self.through = through or TaggedItem
rel = ManyToManyRel(self, to, related_name=related_name, through=self.through)
super(TaggableManager, self).__init__(
verbose_name=verbose_name,
help_text=help_text,
blank=blank,
null=True,
serialize=False,
rel=rel,
)
self.swappable = False
self.manager = manager
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError(
"%s objects need to have a primary key value "
"before you can access their tags." % model.__name__
)
manager = self.manager(
through=self.through,
model=model,
instance=instance,
prefetch_cache_name=self.name,
)
return manager
def deconstruct(self):
"""
Deconstruct the object, used with migrations.
"""
name, path, args, kwargs = super(TaggableManager, self).deconstruct()
# Remove forced kwargs.
for kwarg in ("serialize", "null"):
del kwargs[kwarg]
# Add arguments related to relations.
# Ref: https://github.com/jazzband/django-taggit/issues/206#issuecomment-37578676
rel = self.remote_field
if isinstance(rel.through, six.string_types):
kwargs["through"] = rel.through
elif not rel.through._meta.auto_created:
kwargs["through"] = "%s.%s" % (
rel.through._meta.app_label,
rel.through._meta.object_name,
)
related_model = rel.model
if isinstance(related_model, six.string_types):
kwargs["to"] = related_model
else:
kwargs["to"] = "%s.%s" % (
related_model._meta.app_label,
related_model._meta.object_name,
)
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
self.opts = cls._meta
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.remote_field.model, six.string_types):
def resolve_related_class(cls, model, field):
field.remote_field.model = model
lazy_related_operation(
resolve_related_class, cls, self.remote_field.model, field=self
)
if isinstance(self.through, six.string_types):
def resolve_related_class(cls, model, field):
self.through = model
self.remote_field.through = model
self.post_through_setup(cls)
lazy_related_operation(
resolve_related_class, cls, self.through, field=self
)
else:
self.post_through_setup(cls)
def get_internal_type(self):
return "ManyToManyField"
def post_through_setup(self, cls):
self.use_gfk = self.through is None or issubclass(
self.through, CommonGenericTaggedItemBase
)
if not self.remote_field.model:
self.remote_field.model = self.through._meta.get_field(
"tag"
).remote_field.model
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
for rel in cls._meta.local_many_to_many:
if rel == self or not isinstance(rel, TaggableManager):
continue
if rel.through == self.through:
raise ValueError(
"You can't have two TaggableManagers with the"
" same through model."
)
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank,
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.model_name
def m2m_reverse_name(self):
return self.through._meta.get_field("tag").column
def m2m_reverse_field_name(self):
return self.through._meta.get_field("tag").name
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.remote_field.model._meta.pk.name
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field("content_object").column
def m2m_db_table(self):
return self.through._meta.db_table
def bulk_related_objects(self, new_objs, using):
return []
def _get_mm_case_path_info(self, direct=False, filtered_relation=None):
pathinfos = []
linkfield1 = self.through._meta.get_field("content_object")
linkfield2 = self.through._meta.get_field(self.m2m_reverse_field_name())
if direct:
if VERSION < (2, 0):
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield1.get_reverse_path_info(
filtered_relation=filtered_relation
)
join2infos = linkfield2.get_path_info(
filtered_relation=filtered_relation
)
else:
if VERSION < (2, 0):
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info(
filtered_relation=filtered_relation
)
join2infos = linkfield1.get_path_info(
filtered_relation=filtered_relation
)
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def _get_gfk_case_path_info(self, direct=False, filtered_relation=None):
pathinfos = []
from_field = self.model._meta.pk
opts = self.through._meta
linkfield = self.through._meta.get_field(self.m2m_reverse_field_name())
if direct:
if VERSION < (2, 0):
join1infos = [
PathInfo(
self.model._meta,
opts,
[from_field],
self.remote_field,
True,
False,
)
]
join2infos = linkfield.get_path_info()
else:
join1infos = [
PathInfo(
self.model._meta,
opts,
[from_field],
self.remote_field,
True,
False,
filtered_relation,
)
]
join2infos = linkfield.get_path_info(
filtered_relation=filtered_relation
)
else:
if VERSION < (2, 0):
join1infos = linkfield.get_reverse_path_info()
join2infos = [
PathInfo(opts, self.model._meta, [from_field], self, True, False)
]
else:
join1infos = linkfield.get_reverse_path_info(
filtered_relation=filtered_relation
)
join2infos = [
PathInfo(
opts,
self.model._meta,
[from_field],
self,
True,
False,
filtered_relation,
)
]
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self, filtered_relation=None):
if self.use_gfk:
return self._get_gfk_case_path_info(
direct=True, filtered_relation=filtered_relation
)
else:
return self._get_mm_case_path_info(
direct=True, filtered_relation=filtered_relation
)
def get_reverse_path_info(self, filtered_relation=None):
if self.use_gfk:
return self._get_gfk_case_path_info(
direct=False, filtered_relation=filtered_relation
)
else:
return self._get_mm_case_path_info(
direct=False, filtered_relation=filtered_relation
)
def get_joining_columns(self, reverse_join=False):
if reverse_join:
return ((self.model._meta.pk.column, "object_id"),)
else:
return (("object_id", self.model._meta.pk.column),)
def get_extra_restriction(self, where_class, alias, related_alias):
extra_col = self.through._meta.get_field("content_type").column
content_type_ids = [
ContentType.objects.get_for_model(subclass).pk
for subclass in _get_subclasses(self.model)
]
return ExtraJoinRestriction(related_alias, extra_col, content_type_ids)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
@property
def related_fields(self):
return [(self.through._meta.get_field("object_id"), self.model._meta.pk)]
@property
def foreign_related_fields(self):
return [self.related_fields[0][1]]
def _get_subclasses(model):
subclasses = [model]
for field in model._meta.get_fields():
if isinstance(field, OneToOneRel) and getattr(
field.field.remote_field, "parent_link", None
):
subclasses.extend(_get_subclasses(field.related_model))
return subclasses
|
the-stack_106_20845
|
from bs4 import BeautifulSoup
import requests
import random
HEADERS_LIST = [
'Mozilla/5.0 (Windows; U; Windows NT 6.1; x64; fr; rv:1.9.2.13) Gecko/20101203 Firebird/3.6.13',
'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201',
'Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16',
'Mozilla/5.0 (Windows NT 5.2; RW; rv:7.0a1) Gecko/20091211 SeaMonkey/9.23a1pre'
]
HEADER = {'User-Agent': random.choice(HEADERS_LIST)}
def parsegit(url, lineno):
response = requests.get(url, headers=HEADER)
soup = BeautifulSoup(response.text, "lxml")
div = soup.find_all("div", {"itemprop" : "text"})
div[0].find('details').decompose()
if lineno is not None:
trs = div[0].find_all('tr')
# split line by comma separated
# 1,5 - display 1 and 5 lines
# 1,3-5,8 - display 1,3,4,5 and 8 lines
vlines = linestodisplay(lineno, len(trs))
index = 1
for tr in trs:
if index not in vlines:
tr.decompose()
index = index + 1
return str(div[0])
def linestodisplay(linestring, maxlines):
lines = linestring.split(",")
vlines = []
for line in lines:
#find if there is any range
rlines = line.split("-")
if len(rlines) > 1:
vlines.extend(range(int(rlines[0]), int(rlines[1])))
vlines.append(int(rlines[1]))
else:
vlines.append(int(rlines[0]))
vlines.sort()
while vlines[-1] > int(maxlines):
vlines.remove(vlines[-1])
return vlines
def github_contribution(url):
response = requests.get(url, headers=HEADER)
soup = BeautifulSoup(response.text, "lxml")
div = soup.find("div", {"class": "js-yearly-contributions"})
details = soup.find("details")
if details is not None:
details.decompose()
footer = soup.find("div", {"class": "contrib-footer"})
if footer is not None:
tdiv = footer.find_all("div")
if tdiv is not None:
tdiv[0].decompose()
return str(div)
|
the-stack_106_20846
|
import pytest
from commitizen.cz.conventional_commits.conventional_commits import (
ConventionalCommitsCz,
parse_scope,
parse_subject,
)
from commitizen.cz.exceptions import AnswerRequiredError
valid_scopes = ["", "simple", "dash-separated", "camelCase" "UPPERCASE"]
scopes_transformations = [["with spaces", "with-spaces"], [None, ""]]
valid_subjects = ["this is a normal text", "aword"]
subjects_transformations = [["with dot.", "with dot"]]
invalid_subjects = ["", " ", ".", " .", "", None]
def test_parse_scope_valid_values():
for valid_scope in valid_scopes:
assert valid_scope == parse_scope(valid_scope)
def test_scopes_transformations():
for scopes_transformation in scopes_transformations:
invalid_scope, transformed_scope = scopes_transformation
assert transformed_scope == parse_scope(invalid_scope)
def test_parse_subject_valid_values():
for valid_subject in valid_subjects:
assert valid_subject == parse_subject(valid_subject)
def test_parse_subject_invalid_values():
for valid_subject in invalid_subjects:
with pytest.raises(AnswerRequiredError):
parse_subject(valid_subject)
def test_subject_transformations():
for subject_transformation in subjects_transformations:
invalid_subject, transformed_subject = subject_transformation
assert transformed_subject == parse_subject(invalid_subject)
def test_questions(config):
conventional_commits = ConventionalCommitsCz(config)
questions = conventional_commits.questions()
assert isinstance(questions, list)
assert isinstance(questions[0], dict)
def test_choices_all_have_keyboard_shortcuts(config):
conventional_commits = ConventionalCommitsCz(config)
questions = conventional_commits.questions()
list_questions = (q for q in questions if q["type"] == "list")
for select in list_questions:
assert all("key" in choice for choice in select["choices"])
def test_small_answer(config):
conventional_commits = ConventionalCommitsCz(config)
answers = {
"prefix": "fix",
"scope": "users",
"subject": "email pattern corrected",
"is_breaking_change": False,
"body": "",
"footer": "",
}
message = conventional_commits.message(answers)
assert message == "fix(users): email pattern corrected"
def test_long_answer(config):
conventional_commits = ConventionalCommitsCz(config)
answers = {
"prefix": "fix",
"scope": "users",
"subject": "email pattern corrected",
"is_breaking_change": False,
"body": "complete content",
"footer": "closes #24",
}
message = conventional_commits.message(answers)
assert (
message
== "fix(users): email pattern corrected\n\ncomplete content\n\ncloses #24" # noqa
)
def test_breaking_change_in_footer(config):
conventional_commits = ConventionalCommitsCz(config)
answers = {
"prefix": "fix",
"scope": "users",
"subject": "email pattern corrected",
"is_breaking_change": True,
"body": "complete content",
"footer": "migrate by renaming user to users",
}
message = conventional_commits.message(answers)
print(message)
assert (
message
== "fix(users): email pattern corrected\n\ncomplete content\n\nBREAKING CHANGE: migrate by renaming user to users" # noqa
)
def test_example(config):
"""just testing a string is returned. not the content"""
conventional_commits = ConventionalCommitsCz(config)
example = conventional_commits.example()
assert isinstance(example, str)
def test_schema(config):
"""just testing a string is returned. not the content"""
conventional_commits = ConventionalCommitsCz(config)
schema = conventional_commits.schema()
assert isinstance(schema, str)
def test_info(config):
"""just testing a string is returned. not the content"""
conventional_commits = ConventionalCommitsCz(config)
info = conventional_commits.info()
assert isinstance(info, str)
@pytest.mark.parametrize(
("commit_message", "expected_message"),
[
(
"test(test_scope): this is test msg",
"this is test msg",
),
(
"test(test_scope)!: this is test msg",
"this is test msg",
),
(
"test!(test_scope): this is test msg",
"",
),
],
)
def test_process_commit(commit_message, expected_message, config):
conventional_commits = ConventionalCommitsCz(config)
message = conventional_commits.process_commit(commit_message)
assert message == expected_message
|
the-stack_106_20847
|
#Import Libraries
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
dataset = pd.read_csv('BankNote_Authentication.csv')
X = dataset.iloc[:, [0,1]].values
y = dataset.iloc[:, 4].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.ensemble import RandomForestClassifier
rf_c = RandomForestClassifier(n_estimators = 200, random_state = 2)
rf_c.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
y_pred_test = rf_c.predict(X_test)
test_acc = accuracy_score(y_test, y_pred_test)
print(test_acc)
from matplotlib.colors import ListedColormap
import numpy as np
#Define Variables
clf = rf_c
h = 0.01
X_plot, z_plot = X_test, y_test
#Standard Template to draw graph
x_min, x_max = X_plot[:, 0].min() - 1, X_plot[:, 0].max() + 1
y_min, y_max = X_plot[:, 1].min() - 1, X_plot[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh
Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z,
alpha = 0.7, cmap = ListedColormap(('red', 'green')))
for i, j in enumerate(np.unique(z_plot)):
plt.scatter(X_plot[z_plot == j, 0], X_plot[z_plot == j, 1],
c = ['red', 'green'][i], cmap = ListedColormap(('red', 'green')), label = j)
#X[:, 0], X[:, 1]
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('Random Forest Classification')
plt.xlabel('variance')
plt.ylabel('skewness')
plt.legend()
plt.show()
|
the-stack_106_20851
|
"""
Checks the latest release from the "Ace" project
(https://github.com/ajaxorg/ace-builds) which provides builds of the Ace code
editor.
"""
import click
import glob
import json
import logging
import os
import requests
import shutil
import tempfile
import zipfile
# Refers to the root of the project.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# The log file (for all the details).
LOGFILE = os.path.join(BASE_DIR, "get_ace.log")
# Ace directory in source tree.
ACE_DIR = os.path.join(BASE_DIR, "mu", "js", "ace")
# Version tag file (keeps track of tags of latest release).
TAG_FILE = os.path.join(BASE_DIR, "versions.json")
# Setup logging.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logfile_handler = logging.FileHandler(LOGFILE)
log_formatter = logging.Formatter("%(levelname)s: %(message)s")
logfile_handler.setFormatter(log_formatter)
logger.addHandler(logfile_handler)
def get_latest_tag():
"""
Find the value of the latest tag for the Ace editor.
:return: The most recent tag value for the project.
"""
url = "https://api.github.com/repos/ajaxorg/ace-builds/tags"
logger.info("Requesting tag information: {}".format(url))
response = requests.get(url)
logger.info("Response url: {}".format(response.url))
tag = response.json()[0]["name"]
logger.info(f"Remote tag: {tag}")
return tag
def download_file(url, tmpdir):
"""
Download a build into the tmpdir.
"""
click.echo(f"Downloading {url}")
logger.info(f"Downloading {url}")
r = requests.get(url, stream=True)
if r.status_code != requests.codes.ok:
logger.warning(f"Unable to connect to {url}")
r.raise_for_status()
total_size = int(r.headers.get("Content-Length", "20000000"))
tmp_file = os.path.join(tmpdir, f"ace.zip")
with click.progressbar(
r.iter_content(1024), length=total_size
) as bar, open(tmp_file, "wb") as f:
for chunk in bar:
f.write(chunk)
bar.update(len(chunk))
logger.info(f"Saved to {tmp_file}")
click.secho("OK", fg="green")
def get_zipfile(tag):
"""
Get the URL for the zip file of the referenced tag.
"""
return f"https://github.com/ajaxorg/ace-builds/archive/{tag}.zip"
def unzip(path):
"""
Unzips the file into the right place in the repository.
"""
logger.info(f"Unzipping {path}.")
click.echo(f"Unzipping {path}.")
zipdir = os.path.join(os.path.dirname(path), "ace")
with zipfile.ZipFile(path) as zf:
zf.extractall(zipdir)
release_dir = glob.glob(os.path.join(zipdir, "*", ""))[0]
source = os.path.join(zipdir, release_dir, "src-min-noconflict")
target = ACE_DIR
logger.info(f"Copying from {source} to {target}.")
click.echo(f"Copying from {source} to {target}.")
try:
shutil.rmtree(target)
except FileNotFoundError:
pass
shutil.move(source, target)
def run():
logger.info("Checking and updating Ace assets.")
click.echo("Starting...")
# Check current local version with remote version.
local_tag_info = {}
if os.path.exists(TAG_FILE):
with open(TAG_FILE) as tf:
local_tag_info = json.load(tf)
logger.info(local_tag_info)
else:
local_tag_info["ace"] = "0"
remote_tag = get_latest_tag()
# Ensure the Ace platform directories exist.
force_download = not os.path.exists(ACE_DIR)
try:
if force_download:
local_tag_info["ace"] = "0"
if remote_tag > local_tag_info.get("ace", "0"):
logger.info(f"Updating to {remote_tag}.")
click.echo(f"Updating to {remote_tag}.")
to_download = get_zipfile(remote_tag)
with tempfile.TemporaryDirectory() as tmpdir:
# Download the assets:
download_file(to_download, tmpdir)
asset = os.path.join(tmpdir, "ace.zip")
unzip(asset)
local_tag_info["ace"] = remote_tag
with open(TAG_FILE, "w") as tf:
json.dump(local_tag_info, tf, indent=2)
click.secho(
f"Finished. Updated to release {remote_tag}.", fg="green"
)
else:
# Nothing to do.
logger.info("Nothing to do.")
click.secho("Already at the latest version.", fg="green")
except Exception as ex:
logger.exception(ex)
click.secho(
f"Something went wrong: {ex}.\n\nCheck the logs: {LOGFILE}",
fg="red",
)
if __name__ == "__main__":
run()
|
the-stack_106_20852
|
import time
import struct
import pytest
import pysoem
class El1259ConfigFunction:
def __init__(self, device):
self._device = device
def fn(self, slave_pos):
"""
struct format characters
B - uint8
x - pac byte
H - uint16
"""
self._device.sdo_write(0x8001, 2, struct.pack('B', 1))
rx_map_obj = [0x1603, 0x1607, 0x160B, 0x160F, 0x1613, 0x1617, 0x161B, 0x161F,
0x1620, 0x1621, 0x1622, 0x1623, 0x1624, 0x1625, 0x1626, 0x1627]
pack_fmt = 'Bx' + ''.join(['H' for _ in range(len(rx_map_obj))])
rx_map_obj_bytes = struct.pack(pack_fmt, len(rx_map_obj), *rx_map_obj)
self._device.sdo_write(0x1c12, 0, rx_map_obj_bytes, True)
tx_map_obj = [0x1A00, 0x1A01, 0x1A02, 0x1A03, 0x1A04, 0x1A05, 0x1A06, 0x1A07, 0x1A08,
0x1A0C, 0x1A10, 0x1A14, 0x1A18, 0x1A1C, 0x1A20, 0x1A24]
pack_fmt = 'Bx' + ''.join(['H' for _ in range(len(tx_map_obj))])
tx_map_obj_bytes = struct.pack(pack_fmt, len(tx_map_obj), *tx_map_obj)
self._device.sdo_write(0x1c13, 0, tx_map_obj_bytes, True)
self._device.dc_sync(1, 1000000)
@pytest.mark.parametrize('overlapping_enable', [False, True])
def test_io_toggle(pysoem_env, overlapping_enable):
pysoem_env.config_init()
el1259 = pysoem_env.get_el1259()
pysoem_env.el1259_config_func = El1259ConfigFunction(el1259).fn
pysoem_env.config_map(overlapping_enable)
pysoem_env.go_to_op_state()
output_len = len(el1259.output)
tmp = bytearray([0 for _ in range(output_len)])
for i in range(8):
out_offset = 12 * i
in_offset = 4 * i
tmp[out_offset] = 0x02
el1259.output = bytes(tmp)
time.sleep(0.1)
assert el1259.input[in_offset] & 0x04 == 0x04
tmp[out_offset] = 0x00
el1259.output = bytes(tmp)
time.sleep(0.1)
assert el1259.input[in_offset] & 0x04 == 0x00
|
the-stack_106_20853
|
# coding: utf-8
from __future__ import unicode_literals
from debparse import utils
from . import paragraphs, classes
def parse(path=None, data=None):
"""
Main deb_control package api method.
Takes path to debian control file or its contents.
"""
assert path or data, 'path or data should be given'
if path:
data = utils.get_file_contents(path)
raw_paragraphs = paragraphs.get_raw_paragraphs(data)
parsed_paragraphs = map(paragraphs.parse_paragraph, raw_paragraphs)
return classes.ControlData(
_raw=data,
_path=path,
packages=parsed_paragraphs,
)
|
the-stack_106_20854
|
"""Base class for encoders and generic multi encoders."""
import torch.nn as nn
from onmt.utils.misc import aeq
class EncoderBase(nn.Module):
"""
Base encoder class. Specifies the interface used by different encoder types
and required by :class:`onmt.Models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
F[Memory_Bank]
G[Final]
A-->C
A-->D
A-->E
C-->F
D-->F
E-->F
E-->G
"""
@classmethod
def from_opt(cls, opt, embeddings=None):
raise NotImplementedError
def _check_args(self, src, lengths=None, hidden=None):
if isinstance(src, tuple):
src = src[0]
_, n_batch, _ = src.size()
if lengths is not None:
n_batch_, = lengths.size()
aeq(n_batch, n_batch_)
def forward(self, src, lengths=None):
"""
Args:
src (LongTensor):
padded sequences of sparse indices ``(src_len, batch, nfeat)``
lengths (LongTensor): length of each sequence ``(batch,)``
Returns:
(FloatTensor, FloatTensor):
* final encoder state, used to initialize decoder
* memory bank for attention, ``(src_len, batch, hidden)``
"""
raise NotImplementedError
|
the-stack_106_20855
|
# Copyright (c) 2022 F5, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fetch all agents for a given organization and write the results to .csv
Additional resources:
https://apidocs.threatstack.com/v2/rest-api-v2/authentication
https://github.com/threatstack/rest-api-examples
"""
import requests
import argparse
import configparser
import os
import re
import sys
import csv
from mohawk import Sender
from pprint import pprint
from datetime import date
def get_args():
"""
Get arguments from the CLI as well as the configuration file.
Returns:
user_id, api_key, org_id, org_name (str)
debug, quiet (bool)
"""
parser = argparse.ArgumentParser(
description="Fetch all Threat Stack agents for a given organization and write the results to CSV."
)
parser.add_argument(
"--config_file",
dest="config_file",
help="Override the default threatstack.cfg file.",
required=False,
default="threatstack.cfg",
)
parser.add_argument(
"--org",
dest="org_config",
help="Which organization's configuration to load from the config file.",
required=False,
default="DEFAULT",
)
# Mutually exclusive verbosity
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument("--quiet", action="store_true", help="Disable CLI logging.")
verbosity.add_argument(
"--debug", action="store_true", help="Enable additional debug CLI logging."
)
cli_args = parser.parse_args()
config_file = cli_args.config_file
org_config = cli_args.org_config
quiet = cli_args.quiet
debug = cli_args.debug
if not os.path.isfile(config_file):
print("Unable to find config file: " + config_file + ", exiting.")
sys.exit(-1)
config = configparser.ConfigParser()
config.read(config_file)
if org_config not in config:
print("Config file does not contain config name: " + org_config + ", exiting.")
sys.exit(-1)
user_opts = config["USER_INFO"]
org_opts = config[org_config]
for config_val in ["TS_USER_ID", "TS_API_KEY"]:
if config_val not in user_opts:
print(
"Config file is missing necessary value: " + config_val + ", exiting."
)
sys.exit(-1)
for config_val in ["TS_ORGANIZATION_ID", "TS_ORGANIZATION_NAME"]:
if config_val not in org_opts:
print(
"Config file is missing necessary value: " + config_val + ", exiting."
)
sys.exit(-1)
user_id = user_opts["TS_USER_ID"]
api_key = user_opts["TS_API_KEY"]
org_id = org_opts["TS_ORGANIZATION_ID"]
# sanitize the provided organization name for use in the CSV filename
tmp_org_name = re.sub("[\W_]+", "_", org_opts["TS_ORGANIZATION_NAME"])
org_name = re.sub("[^A-Za-z0-9]+", "", tmp_org_name)
return user_id, api_key, org_id, org_name, debug, quiet
def get_agents(
credentials,
BASE_PATH,
org_id,
OUTPUT_FILE,
debug=False,
quiet=False,
token=None,
):
CONTENT_TYPE = "application/json"
METHOD = "GET"
if token is None:
URI_PATH = "agents?status=online"
else:
URI_PATH = "agents?status=online" + "&token=" + token
URL = BASE_PATH + URI_PATH
try:
sender = Sender(credentials, URL, METHOD, always_hash_content=False, ext=org_id)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(-1)
response = requests.get(
URL,
headers={"Authorization": sender.request_header, "Content-Type": CONTENT_TYPE,},
)
if not response.ok:
print("Request returned status: " + str(response.status_code) + ", exiting.")
pprint(response)
sys.exit(-1)
try:
agent_json = response.json()
except:
print("Failed to decode API JSON response, exiting.")
pprint(response)
sys.exit(-1)
if not "agents" in agent_json:
print(
"Malformed JSON object received - expected 'agents' key in response. Exiting."
)
pprint(agent_json)
sys.exit(-1)
agents = agent_json["agents"]
num_agents = len(agents)
if not num_agents >= 1:
print("0 agents found, exiting.")
sys.exit()
if not quiet:
print("Returned", num_agents, "agents.")
AGENT_KEYS = [
"id",
"instanceId",
"status",
"createdAt",
"lastReportedAt",
"version",
"name",
"description",
"hostname",
"tags",
"agentType",
"osVersion",
"kernel",
]
# Write the agents out to the CSV file
with open(OUTPUT_FILE, "a") as f:
w = csv.writer(f)
agents_list = []
for agent in agents:
agent_info = {}
ipAddressList = []
for key, val in agent.items():
if key == "ipAddresses":
for addrType, ipAddresses in agent["ipAddresses"].items():
# Exclude link_local
if addrType == "private" or addrType == "public":
for addr in ipAddresses:
# Exclude localhost
if addr != "127.0.0.1/8" and addr != "::1/128":
ipAddressList.append(addr)
agent_info[key] = ipAddressList
elif key == "agentModuleHealth":
if debug:
print(key, ":", val)
agent_info[key] = key + ":" + str(val)
else:
if val is None:
agent_info[key] = ""
else:
agent_info[key] = val["isHealthy"]
else:
if key in AGENT_KEYS:
if debug:
print(key, ":", val)
agent_info[key] = key + ":" + str(val)
else:
agent_info[key] = val
else:
print("Unexpected key,val pair: ", key, val)
if agent_info:
agents_list.append(agent_info)
w.writerow(agent_info.values())
if agents_list and not quiet:
print(len(agents_list), "agents written to file.")
if "paginationToken" in agent_json:
if agent_json["paginationToken"] != None:
if debug:
print("Found pagination token.")
paginationToken = agent_json["paginationToken"]
return paginationToken
else:
return None
if "token" in agent_json:
if agent_json["token"] != None:
if debug:
print("Found pagination token.")
paginationToken = agent_json["token"]
return paginationToken
else:
return None
return None
def main():
timestamp = date.today().isoformat()
user_id, api_key, org_id, org_name, debug, quiet = get_args()
OUTPUT_FILE = "agents" + "-" + org_name + "-" + timestamp + ".csv"
BASE_PATH = "https://api.threatstack.com/v2/"
credentials = {"id": user_id, "key": api_key, "algorithm": "sha256"}
with open(OUTPUT_FILE, "w") as f:
w = csv.writer(f)
# Write header
HEADER = [
"agentId",
"instanceId",
"status",
"CreatedAt",
"LastReportedAt",
"version",
"name",
"description",
"hostname",
"ipAddresses",
"tags",
"agentType",
"osVersion",
"kernel",
"isHealthy",
]
w.writerow(HEADER)
token = get_agents(
credentials, BASE_PATH, org_id, OUTPUT_FILE, debug, quiet
)
while token is not None:
token = get_agents(
credentials, BASE_PATH, org_id, OUTPUT_FILE, debug, quiet, token
)
if __name__ == "__main__":
main()
|
the-stack_106_20856
|
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import crayons
def general_usage():
"""Returns general usage string."""
message = """
Usage Examples:
Analyze the latest build of a job
$ {}
List all jobs
$ {}
List jobs with substring 'neutron'
$ {}
Show job information
$ {}
""".format(crayons.yellow('pykins analyze <job_name>'),
crayons.yellow('pykins job list'),
crayons.yellow('pykins job list neutron'),
crayons.yellow('pykins show my_job'))
return message
|
the-stack_106_20857
|
# Copyright 2020- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import shutil
import string
import sys
import time
from concurrent.futures._base import Future
from datetime import timedelta
from pathlib import Path
from typing import Dict, List, Optional, Set, Union
from assertionengine import AssertionOperator
from overrides import overrides
from robot.libraries.BuiltIn import EXECUTION_CONTEXTS, BuiltIn # type: ignore
from robot.result.model import TestCase as TestCaseResult # type: ignore
from robot.running.model import TestCase as TestCaseRunning # type: ignore
from robot.utils import secs_to_timestr, timestr_to_secs # type: ignore
from robotlibcore import DynamicCore # type: ignore
from .base import ContextCache, LibraryComponent
from .generated.playwright_pb2 import Request
from .keywords import (
Control,
Cookie,
Devices,
Evaluation,
Getters,
Interaction,
Network,
PlaywrightState,
Promises,
RunOnFailureKeywords,
Waiter,
WebAppState,
)
from .playwright import Playwright
from .utils import AutoClosingLevel, is_falsy, is_same_keyword, keyword, logger
# Importing this directly from .utils break the stub type checks
from .utils.data_types import DelayedKeyword, SupportedBrowsers
from .version import __version__ as VERSION
class Browser(DynamicCore):
"""Browser library is a browser automation library for Robot Framework.
This is the keyword documentation for Browser library. For information
about installation, support, and more please visit the
[https://github.com/MarketSquare/robotframework-playwright|project pages].
For more information about Robot Framework itself, see [https://robotframework.org|robotframework.org].
Browser library uses
[https://github.com/microsoft/playwright|Playwright Node module]
to automate [https://www.chromium.org/Home|Chromium],
[https://www.mozilla.org/en-US/firefox/new/|Firefox]
and [https://webkit.org/|WebKit] with a single library.
== Table of contents ==
%TOC%
= Browser, Context and Page =
Browser library works with three different layers that build on each other:
*Browser*, *Context* and *Page*.
== Browsers ==
A *browser* can be started with one of the three
different engines Chromium, Firefox or Webkit.
=== Supported Browsers ===
| Browser | Browser with this engine |
| ``chromium`` | Google Chrome, Microsoft Edge (since 2020), Opera |
| ``firefox`` | Mozilla Firefox |
| ``webkit`` | Apple Safari, Mail, AppStore on MacOS and iOS |
Since [https://github.com/microsoft/playwright|Playwright] comes with a pack of builtin
binaries for all browsers, no additional drivers e.g. geckodriver are needed.
All these browsers that cover more than 85% of the world wide used browsers,
can be tested on Windows, Linux and MacOS.
Theres is not need for dedicated machines anymore.
A browser process is started ``headless`` (without a GUI) by default.
Run `New Browser` with specified arguments if a browser with a GUI is requested
or if a proxy has to be configured.
A browser process can contain several contexts.
== Contexts ==
A *context* corresponds to set of independent incognito pages in a browser
that share cookies, sessions or profile settings. Pages in two separate
contexts do not share cookies, sessions or profile settings.
Compared to Selenium, these do *not* require their own browser process.
To get a clean environment a test can just open a new context.
Due to this new independent browser sessions can be opened with
Robot Framework Browser about 10 times faster than with Selenium by
just opening a `New Context` within the opened browser.
The context layer is useful e.g. for testing different users sessions on the
same webpage without opening a whole new browser context.
Contexts can also have detailed configurations, such as geo-location, language settings,
the viewport size or color scheme.
Contexts do also support http credentials to be set, so that basic authentication
can also be tested. To be able to download files within the test,
the ``acceptDownloads`` argument must be set to ``True`` in `New Context` keyword.
A context can contain different pages.
== Pages ==
A *page* does contain the content of the loaded web site and has a browsing history.
Pages and browser tabs are the same.
Typical usage could be:
| *** Test Cases ***
| Starting a browser with a page
| New Browser chromium headless=false
| New Context viewport={'width': 1920, 'height': 1080}
| New Page https://marketsquare.github.io/robotframework-browser/Browser.html
| Get Title == Browser
The `Open Browser` keyword opens a new browser, a new context and a new page.
This keyword is useful for quick experiments or debugging sessions.
When a `New Page` is called without an open browser, `New Browser`
and `New Context` are executed with default values first.
Each Browser, Context and Page has a unique ID with which they can be addressed.
A full catalog of what is open can be received by `Get Browser Catalog` as dictionary.
= Finding elements =
All keywords in the library that need to interact with an element
on a web page take an argument typically named ``selector`` that specifies
how to find the element.
Selector strategies that are supported by default are listed in the table
below.
| = Strategy = | = Match based on = | = Example = |
| ``css`` | CSS selector. | ``css=.class > #login_btn`` |
| ``xpath`` | XPath expression. | ``xpath=//input[@id="login_btn"]`` |
| ``text`` | Browser text engine. | ``text=Login`` |
| ``id`` | Element ID Attribute. | ``id=login_btn`` |
CSS Selectors can also be recorded with `Record selector` keyword.
== Explicit Selector Strategy ==
The explicit selector strategy is specified with a prefix using syntax
``strategy=value``. Spaces around the separator are ignored, so
``css=foo``, ``css= foo`` and ``css = foo`` are all equivalent.
== Implicit Selector Strategy ==
*The default selector strategy is `css`.*
If selector does not contain one of the know explicit selector strategies, it is
assumed to contain css selector.
Selectors that are starting with ``//`` or ``..`` are considered as xpath selectors.
Selectors that are in quotes are considered as text selectors.
Examples:
| # CSS selectors are default.
| `Click` span > button.some_class # This is equivalent
| `Click` css=span > button.some_class # to this.
|
| # // or .. leads to xpath selector strategy
| `Click` //span/button[@class="some_class"]
| `Click` xpath=//span/button[@class="some_class"]
|
| # "text" in quotes leads to exact text selector strategy
| `Click` "Login"
| `Click` text="Login"
== CSS ==
As written before, the default selector strategy is `css`. See
[https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors | css selector]
for more information.
Any malformed selector not starting with ``//`` or ``..`` nor starting and ending
with a quote is assumed to be a css selector.
Example:
| `Click` span > button.some_class
== XPath ==
XPath engine is equivalent to [https://developer.mozilla.org/en/docs/Web/API/Document/evaluate|Document.evaluate].
Example: ``xpath=//html/body//span[text()="Hello World"]``.
Malformed selector starting with ``//`` or ``..`` is assumed to be an xpath selector.
For example, ``//html/body`` is converted to ``xpath=//html/body``. More
examples are displayed in `Examples`.
Note that xpath does not pierce [https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM|shadow_roots].
== Text ==
Text engine finds an element that contains a text node with the passed text.
For example, ``Click text=Login`` clicks on a login button, and
``Wait For Elements State text="lazy loaded text"`` waits for the "lazy loaded text"
to appear in the page.
Text engine finds fields based on their labels in text inserting keywords.
Malformed selector starting and ending with a quote (either ``"`` or ``'``) is assumed
to be a text selector. For example, ``Click "Login"`` is converted to ``Click text="Login"``.
Be aware that these leads to exact matches only!
More examples are displayed in `Examples`.
=== Insensitive match ===
By default, the match is case-insensitive, ignores leading/trailing whitespace and
searches for a substring. This means ``text= Login`` matches
``<button>Button loGIN (click me)</button>``.
=== Exact match ===
Text body can be escaped with single or double quotes for precise matching,
insisting on exact match, including specified whitespace and case.
This means ``text="Login "`` will only match ``<button>Login </button>`` with exactly
one space after "Login". Quoted text follows the usual escaping rules, e.g.
use ``\\"`` to escape double quote in a double-quoted string: ``text="foo\\"bar"``.
=== RegEx ===
Text body can also be a JavaScript-like regex wrapped in / symbols.
This means ``text=/^hello .*!$/i`` or ``text=/^Hello .*!$/`` will match ``<span>Hello Peter Parker!</span>``
with any name after ``Hello``, ending with ``!``.
The first one flagged with ``i`` for case-insensitive.
See [https://regex101.com/|https://regex101.com] for more information about RegEx.
=== Button and Submit Values ===
Input elements of the type button and submit are rendered with their value as text,
and text engine finds them. For example, ``text=Login`` matches
``<input type=button value="Login">``.
== Cascaded selector syntax ==
Browser library supports the same selector strategies as the underlying
Playwright node module: xpath, css, id and text. The strategy can either
be explicitly specified with a prefix or the strategy can be implicit.
A major advantage of Browser is, that multiple selector engines can be used
within one selector. It is possible to mix XPath, CSS and Text selectors while
selecting a single element.
Selectors are strings that consists of one or more clauses separated by
``>>`` token, e.g. ``clause1 >> clause2 >> clause3``. When multiple clauses
are present, next one is queried relative to the previous one's result.
Browser library supports concatination of different selectors seperated by ``>>``.
For example:
| `Highlight Elements` "Hello" >> ../.. >> .select_button
| `Highlight Elements` text=Hello >> xpath=../.. >> css=.select_button
Each clause contains a selector engine name and selector body, e.g.
``engine=body``. Here ``engine`` is one of the supported engines (e.g. css or
a custom one). Selector ``body`` follows the format of the particular engine,
e.g. for css engine it should be a [https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors | css selector].
Body format is assumed to ignore leading and trailing white spaces,
so that extra whitespace can be added for readability. If selector
engine needs to include ``>>`` in the body, it should be escaped
inside a string to not be confused with clause separator,
e.g. ``text="some >> text"``.
Selector engine name can be prefixed with ``*`` to capture element that
matches the particular clause instead of the last one. For example,
``css=article >> text=Hello`` captures the element with the text ``Hello``,
and ``*css=article >> text=Hello`` (note the *) captures the article element
that contains some element with the text Hello.
For convenience, selectors in the wrong format are heuristically converted
to the right format. See `Implicit Selector Strategy`
== Examples ==
| # queries 'div' css selector
| Get Element css=div
|
| # queries '//html/body/div' xpath selector
| Get Element //html/body/div
|
| # queries '"foo"' text selector
| Get Element text=foo
|
| # queries 'span' css selector inside the result of '//html/body/div' xpath selector
| Get Element xpath=//html/body/div >> css=span
|
| # converted to 'css=div'
| Get Element div
|
| # converted to 'xpath=//html/body/div'
| Get Element //html/body/div
|
| # converted to 'text="foo"'
| Get Element "foo"
|
| # queries the div element of every 2nd span element inside an element with the id foo
| Get Element \\#foo >> css=span:nth-child(2n+1) >> div
| Get Element id=foo >> css=span:nth-child(2n+1) >> div
Be aware that using ``#`` as a starting character in Robot Framework would be interpreted as comment.
Due to that fact a ``#id`` must be escaped as ``\\#id``.
== Frames ==
By default, selector chains do not cross frame boundaries. It means that a
simple CSS selector is not able to select and element located inside an iframe
or a frameset. For this usecase, there is a special selector ``>>>`` which can
be used to combine a selector for the frame and a selector for an element
inside a frame.
Given this simple pseudo html snippet:
| <iframe id="iframe" src="src.html">
| #document
| <!DOCTYPE html>
| <html>
| <head></head>
| <body>
| <button id="btn">Click Me</button>
| </body>
| </html>
| </iframe>
Here's a keyword call that clicks the button inside the frame.
| Click id=iframe >>> id=btn
The selectors on the left and right side of ``>>>`` can be any valid selectors.
The selector clause directly before the frame opener ``>>>`` must select the frame element.
== WebComponents and Shadow DOM ==
Playwright and so also Browser are able to do automatic piercing of Shadow DOMs
and therefore are the best automation technology when working with WebComponents.
Also other technologies claim that they can handle
[https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM|Shadow DOM and Web Components].
However, non of them do pierce shadow roots automatically,
which may be inconvenient when working with Shadow DOM and Web Components.
For that reason, css engine pierces shadow roots. More specifically, every
[https://developer.mozilla.org/en-US/docs/Web/CSS/Descendant_combinator|Descendant combinator]
pierces an arbitrary number of open shadow roots, including the implicit descendant combinator
at the start of the selector.
That means, it is not nessesary to select each shadow host, open its shadow root and
select the next shadow host until you reach the element that should be controlled.
=== CSS:light ===
``css:light`` engine is equivalent to [https://developer.mozilla.org/en/docs/Web/API/Document/querySelector | Document.querySelector]
and behaves according to the CSS spec.
However, it does not pierce shadow roots.
``css`` engine first searches for elements in the light dom in the iteration order,
and then recursively inside open shadow roots in the iteration order. It does not
search inside closed shadow roots or iframes.
Examples:
| <article>
| <div>In the light dom</div>
| <div slot='myslot'>In the light dom, but goes into the shadow slot</div>
| <open mode shadow root>
| <div class='in-the-shadow'>
| <span class='content'>
| In the shadow dom
| <open mode shadow root>
| <li id='target'>Deep in the shadow</li>
| </open mode shadow root>
| </span>
| </div>
| <slot name='myslot'></slot>
| </open mode shadow root>
| </article>
Note that ``<open mode shadow root>`` is not an html element, but rather a shadow root
created with ``element.attachShadow({mode: 'open'})``.
- Both ``"css=article div"`` and ``"css:light=article div"`` match the first ``<div>In the light dom</div>``.
- Both ``"css=article > div"`` and ``"css:light=article > div"`` match two ``div`` elements that are direct children of the ``article``.
- ``"css=article .in-the-shadow"`` matches the ``<div class='in-the-shadow'>``, piercing the shadow root, while ``"css:light=article .in-the-shadow"`` does not match anything.
- ``"css:light=article div > span"`` does not match anything, because both light-dom ``div`` elements do not contain a ``span``.
- ``"css=article div > span"`` matches the ``<span class='content'>``, piercing the shadow root.
- ``"css=article > .in-the-shadow"`` does not match anything, because ``<div class='in-the-shadow'>`` is not a direct child of ``article``
- ``"css:light=article > .in-the-shadow"`` does not match anything.
- ``"css=article li#target"`` matches the ``<li id='target'>Deep in the shadow</li>``, piercing two shadow roots.
=== text:light ===
``text`` engine open pierces shadow roots similarly to ``css``, while ``text:light`` does not.
Text engine first searches for elements in the light dom in the iteration order, and then
recursively inside open shadow roots in the iteration order. It does not search inside
closed shadow roots or iframes.
=== id, data-testid, data-test-id, data-test and their :light counterparts ===
Attribute engines are selecting based on the corresponding attribute value.
For example: ``data-test-id=foo`` is equivalent to ``css=[data-test-id="foo"]``,
and ``id:light=foo`` is equivalent to ``css:light=[id="foo"]``.
== Element reference syntax ==
It is possible to get a reference to an element by using `Get Element` keyword. This
reference can be used as a *first* part of a selector by using a special selector
syntax `element=` like this:
| ${ref}= Get Element .some_class
| Click element=${ref} >> .some_child
The `.some_child` selector in the example is relative to the element referenced by ${ref}.
= Assertions =
Keywords that accept arguments ``assertion_operator`` <`AssertionOperator`> and ``assertion_expected``
can optionally assert.
%ASSERTION_TABLE%
But default the keywords will provide an error message if the assertion fails,
but default error message can be overwritten with a ``message`` argument. The
``message`` argument accepts `{value}`, `{value_type}`, `{expected}` and
`{expected_type}` [https://docs.python.org/3/library/stdtypes.html#str.format|format]
options. The `{value}` is the value returned by the keyword and the `{expected}`
is the expected value defined by the user, usually value in the
``assertion_expected`` argument. The `{value_type}` and
`{expected_type}` are the type definitions from `{value}` and `{expected}`
arguments. In similar fashion as Python
[https://docs.python.org/3/library/functions.html#type|type] returns type definition.
Assertions will retry until ``timeout`` has expired if they do not pass.
The assertion ``assertion_expected`` value is not converted by the library and
is used as is. Therefore when assertion is made, the ``assertion_expected``
argument value and value returned the keyword must have same type. If types
are not same, assertion will fail. Example `Get Text` always returns a string
and has to be compared with a string, even the returnd value might look like
a number.
Other Keywords have other specific types they return.
`Get Element Count` always returns an integer.
`Get Bounding Box` and `Get Viewport Size` can be filtered.
They return a dictionary without filter and a number when filtered.
These Keywords do autoconvert the expected value if a number is returned.
* < less or greater > With Strings*
Compairisons of strings with ``greater than`` or ``less than`` compares each character,
starting from 0 reagarding where it stands in the code page.
Example: ``A < Z``, ``Z < a``, ``ac < dc`
It does never compare the length of elements. Neither lists nor strings.
The comparison stops at the first character that is different.
Examples: ``'abcde' < 'abd'``, ``'100.000' < '2'``
In Python 3 and therefore also in Browser it is not possible to compare numbers
with strings with a greater or less operator.
On keywords that return numbers, the given expected value is automatically
converted to a number before comparison.
The getters `Get Page State` and `Get Browser Catalog` return a dictionary. Values of the dictionary can directly asserted.
Pay attention of possible types because they are evaluated in Python. For example:
| Get Page State validate 2020 >= value['year'] # Compairsion of numbers
| Get Page State validate "IMPORTANT MESSAGE!" == value['message'] # Compairsion of strings
== The 'then' or 'evaluate' closure ==
Keywords that accept arguments ``assertion_operator`` and ``assertion_expected``
can optionally also use ``then`` or ``evaluate`` closure to modify the returned value with
BuiltIn Evaluate. Actual value can be accessed with ``value``.
For example ``Get Title then 'TITLE: '+value``.
See
[https://robotframework.org/robotframework/latest/libraries/BuiltIn.html#Evaluating%20expressions|
Builtin Evaluating expressions]
for more info on the syntax.
== Examples ==
| # *Keyword* *Selector* *Key* *Assertion Operator* *Assertion Expected*
| Get Title equal Page Title
| Get Title ^= Page
| Get Style //*[@id="div-element"] width > 100
| Get Title matches \\\\w+\\\\s\\\\w+
| Get Title validate value == "Login Page"
| Get Title evaluate value if value == "some value" else "something else"
= Automatic page and context closing =
%AUTO_CLOSING_LEVEL%
= Experimental: Re-using same node process =
Browser library integrated nodejs and python. NodeJS side can be also executed as a standalone process.
Browser libraries running on the same machine can talk to that instead of starting new node processes.
This can speed execution when running tests parallel.
To start node side run on the directory when Browser package is
``PLAYWRIGHT_BROWSERS_PATH=0 node Browser/wrapper/index.js PORT``.
``PORT`` is port you want to use for the node process.
To execute tests then with pabot for example do ``ROBOT_FRAMEWORK_BROWSER_NODE_PORT=PORT pabot ..``.
= Extending Browser library with a JavaScript module =
Browser library can be extended with JavaScript. Module must be in CommonJS format that Node.js uses.
You can translate your ES6 module to Node.js CommonJS style with Babel. Many other languages
can be also translated to modules that can be used from Node.js. For example TypeScript, PureScript and
ClojureScript just to mention few.
| async function myGoToKeyword(page, args, logger, playwright) {
| logger(args.toString())
| playwright.coolNewFeature()
| return await page.goto(args[0]);
| }
``page``: [https://playwright.dev/docs/api/class-page|the playwright Page object].
``args``: list of strings from Robot Framework keyword call.
!! A BIT UNSTABLE AND SUBJECT TO API CHANGES !!
``logger``: callback function that takes strings as arguments and writes them to robot log. Can be called multiple times.
``playwright``: playwright module (* from 'playwright'). Useful for integrating with Playwright features that Browser library doesn't support with it's own keywords. [https://playwright.dev/docs/api/class-playwright| API docs]
== Example module.js ==
| async function myGoToKeyword(page, args) {
| await page.goto(args[0]);
| return await page.title();
| }
| exports.__esModule = true;
| exports.myGoToKeyword = myGoToKeyword;
== Example Robot Framework side ==
| *** Settings ***
| Library Browser jsextension=${CURDIR}/module.js
|
| *** Test Cases ***
| Hello
| New Page
| ${title}= myGoToKeyword https://playwright.dev
| Should be equal ${title} Playwright
Also selector syntax can be extended withm custom selector with a js module
== Example module keyword for custom selector registerin ==
| async function registerMySelector(page, args, log, playwright) {
| playwright.selectors.register("myselector", () => ({
| // Returns the first element matching given selector in the root's subtree.
| query(root, selector) {
| return root.querySelector(`a[data-title="${selector}"]`);
| },
|
| // Returns all elements matching given selector in the root's subtree.
| queryAll(root, selector) {
| return Array.from(root.querySelectorAll(`a[data-title="${selector}"]`));
| }
| }));
| return 1;
| }
| exports.__esModule = true;
| exports.registerMySelector = registerMySelector;
"""
ROBOT_LIBRARY_VERSION = VERSION
ROBOT_LISTENER_API_VERSION = 3
ROBOT_LIBRARY_LISTENER: "Browser"
ROBOT_LIBRARY_SCOPE = "GLOBAL"
_context_cache = ContextCache()
_suite_cleanup_done = False
run_on_failure_keyword: Optional[DelayedKeyword] = None
def __init__(
self,
timeout: timedelta = timedelta(seconds=10),
enable_playwright_debug: bool = False,
auto_closing_level: AutoClosingLevel = AutoClosingLevel.TEST,
retry_assertions_for: timedelta = timedelta(seconds=1),
run_on_failure: str = "Take Screenshot",
external_browser_executable: Optional[Dict[SupportedBrowsers, str]] = None,
jsextension: Optional[str] = None,
enable_presenter_mode: bool = False,
):
"""Browser library can be taken into use with optional arguments:
- ``timeout`` <str>
Timeout for keywords that operate on elements. The keywords will wait
for this time for the element to appear into the page. Defaults to "10s" => 10 seconds.
- ``enable_playwright_debug`` <bool>
Enable low level debug information from the playwright tool. Mainly
Useful for the library developers and for debugging purposes.
- ``auto_closing_level`` < ``TEST`` | ``SUITE`` | ``MANUAL`` >
Configure context and page automatic closing. Default is ``TEST``,
for more details, see `AutoClosingLevel`
- ``retry_assertions_for`` <str>
Timeout for retrying assertions on keywords before failing the keywords.
This timeout starts counting from the first failure.
Global ``timeout`` will still be in effect.
This allows stopping execution faster to assertion failure when element is found fast.
- ``run_on_failure`` <str>
Sets the keyword to execute in case of a failing Browser keyword.
It can be the name of any keyword that does not have any mandatory argument.
If no extra action should be done after a failure, set it to ``None`` or any other robot falsy value.
- ``external_browser_executable`` <Dict <SupportedBrowsers, Path>>
Dict mapping name of browser to path of executable of a browser.
Will make opening new browsers of the given type use the set executablePath.
Currently only configuring of `chromium` to a separate executable (chrome,
chromium and Edge executables all work with recent versions) works.
- ``jsextension`` <str>
Path to Javascript module exposed as extra keywords. Module must be in CommonJS.
- ``enable_presenter_mode`` <bool>
Automatic highlights to interacted components, slowMo and a small pause at the end.
"""
self.timeout = self.convert_timeout(timeout)
self.retry_assertions_for = self.convert_timeout(retry_assertions_for)
self.ROBOT_LIBRARY_LISTENER = self
self._execution_stack: List[dict] = []
self._running_on_failure_keyword = False
self._pause_on_failure: Set["Browser"] = set()
self.run_on_failure_keyword = (
None if is_falsy(run_on_failure) else {"name": run_on_failure, "args": ()}
)
self.external_browser_executable: Dict[SupportedBrowsers, str] = (
external_browser_executable or {}
)
self._unresolved_promises: Set[Future] = set()
self._playwright_state = PlaywrightState(self)
libraries = [
self._playwright_state,
Control(self),
Cookie(self),
Devices(self),
Evaluation(self),
Interaction(self),
Getters(self),
Network(self),
RunOnFailureKeywords(self),
Promises(self),
Waiter(self),
WebAppState(self),
]
self.playwright = Playwright(self, enable_playwright_debug)
self._auto_closing_level = auto_closing_level
self.current_arguments = ()
if jsextension is not None:
libraries.append(self._initialize_jsextension(jsextension))
self.presenter_mode = enable_presenter_mode
DynamicCore.__init__(self, libraries)
def _initialize_jsextension(self, jsextension: str) -> LibraryComponent:
component = LibraryComponent(self)
with self.playwright.grpc_channel() as stub:
response = stub.InitializeExtension(
Request().FilePath(path=os.path.abspath(jsextension))
)
for name in response.keywords:
setattr(component, name, self._jskeyword_call(name))
return component
def _jskeyword_call(self, name: str):
@keyword
def func(*args):
with self.playwright.grpc_channel() as stub:
responses = stub.CallExtensionKeyword(
Request().KeywordCall(name=name, arguments=args)
)
for response in responses:
logger.info(response.log)
if response.json == "":
return
return json.loads(response.json)
return func
@property
def outputdir(self) -> str:
if EXECUTION_CONTEXTS.current:
return BuiltIn().get_variable_value("${OUTPUTDIR}")
else:
return "."
@property
def browser_output(self) -> Path:
return Path(self.outputdir, "browser")
def _start_suite(self, suite, result):
if not self._suite_cleanup_done and self.browser_output.is_dir():
self._suite_cleanup_done = True
logger.debug(f"Removing: {self.browser_output}")
shutil.rmtree(str(self.browser_output), ignore_errors=True)
if self._auto_closing_level != AutoClosingLevel.MANUAL:
try:
self._execution_stack.append(self.get_browser_catalog())
except ConnectionError as e:
logger.debug(f"Browser._start_suite connection problem: {e}")
def _start_test(self, test, result):
if self._auto_closing_level == AutoClosingLevel.TEST:
try:
self._execution_stack.append(self.get_browser_catalog())
except ConnectionError as e:
logger.debug(f"Browser._start_test connection problem: {e}")
def _end_test(self, test: TestCaseRunning, result: TestCaseResult):
if len(self._unresolved_promises) > 0:
logger.warn(f"Waiting unresolved promises at the end of test '{test.name}'")
self.wait_for_all_promises()
if self._auto_closing_level == AutoClosingLevel.TEST:
if self.presenter_mode:
logger.debug("Presenter mode: Wait for 5 seconds before pruning pages")
time.sleep(5.0)
if len(self._execution_stack) == 0:
logger.debug("Browser._end_test empty execution stack")
return
try:
catalog_before_test = self._execution_stack.pop()
self._prune_execution_stack(catalog_before_test)
except AssertionError as e:
logger.debug(f"Test Case: {test.name}, End Test: {e}")
except ConnectionError as e:
logger.debug(f"Browser._end_test connection problem: {e}")
def _end_suite(self, suite, result):
if self._auto_closing_level != AutoClosingLevel.MANUAL:
if len(self._execution_stack) == 0:
logger.debug("Browser._end_suite empty execution stack")
return
try:
catalog_before_suite = self._execution_stack.pop()
self._prune_execution_stack(catalog_before_suite)
except AssertionError as e:
logger.debug(f"Test Suite: {suite.name}, End Suite: {e}")
except ConnectionError as e:
logger.debug(f"Browser._end_suite connection problem: {e}")
def _prune_execution_stack(self, catalog_before: dict) -> None:
catalog_after = self.get_browser_catalog()
ctx_before_ids = [c["id"] for b in catalog_before for c in b["contexts"]]
ctx_after_ids = [c["id"] for b in catalog_after for c in b["contexts"]]
new_ctx_ids = [c for c in ctx_after_ids if c not in ctx_before_ids]
for ctx_id in new_ctx_ids:
self._playwright_state.switch_context(ctx_id)
self._playwright_state.close_context()
pages_before = [
(p["id"], c["id"])
for b in catalog_before
for c in b["contexts"]
for p in c["pages"]
]
pages_after = [
(p["id"], c["id"])
for b in catalog_after
for c in b["contexts"]
for p in c["pages"]
if c["id"] not in new_ctx_ids
]
new_page_ids = [p for p in pages_after if p not in pages_before]
for page_id, ctx_id in new_page_ids:
self._playwright_state.close_page(page_id, ctx_id)
def run_keyword(self, name, args, kwargs=None):
try:
return DynamicCore.run_keyword(self, name, args, kwargs)
except AssertionError as e:
self.keyword_error()
if self._pause_on_failure:
sys.__stdout__.write(f"\n[ FAIL ] {e}")
sys.__stdout__.write(
"\n[Paused on failure] Press Enter to continue..\n"
)
sys.__stdout__.flush()
input()
raise e
def start_keyword(self, name, attrs):
"""Take screenshot of tests that have failed due to timeout.
This method is part of the Listener API implemented by the library.
This can be done with BuiltIn keyword `Run Keyword If Timeout
Occurred`, but the problem there is that you have to remember to
put it into your Suite/Test Teardown. Since taking screenshot is
the most obvious thing to do on failure, let's do it automatically.
This cannot be implemented as a `end_test` listener method, since at
that time, the teardown has already been executed and browser may have
been closed already. This implementation will take the screenshot
before the teardown begins to execute.
"""
self.current_arguments = tuple(attrs["args"])
if attrs["type"] == "Teardown":
timeout_pattern = "Test timeout .* exceeded."
test = EXECUTION_CONTEXTS.current.test
if (
test is not None
and test.status == "FAIL"
and re.match(timeout_pattern, test.message)
):
self.screenshot_on_failure(test.name)
def keyword_error(self):
"""Sends screenshot command to Playwright.
Only works during testing since this uses robot's outputdir for output.
"""
if self._running_on_failure_keyword or not self.run_on_failure_keyword:
return
try:
self._running_on_failure_keyword = True
if is_same_keyword(self.run_on_failure_keyword["name"], "Take Screenshot"):
args = self.run_on_failure_keyword["args"]
path = args[0] if args else self._failure_screenshot_path()
self.take_screenshot(path)
else:
BuiltIn().run_keyword(
self.run_on_failure_keyword["name"],
*self.run_on_failure_keyword["args"],
)
except Exception as err:
logger.warn(
f"Keyword '{self.run_on_failure_keyword['name']}' could not be run on failure:\n{err}"
)
finally:
self._running_on_failure_keyword = False
def _failure_screenshot_path(self):
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
test_name = BuiltIn().get_variable_value("${TEST NAME}", "GENERIC")
return os.path.join(
self.outputdir,
"".join(c for c in test_name if c in valid_chars).replace(" ", "_")
+ "_FAILURE_SCREENSHOT_{index}",
)
def get_timeout(self, timeout: Union[timedelta, None]) -> float:
if timeout is None:
return self.timeout
return self.convert_timeout(timeout)
def convert_timeout(
self, timeout: Union[timedelta, float], to_ms: bool = True
) -> float:
convert = 1000 if to_ms else 1
if isinstance(timeout, timedelta):
return timeout.total_seconds() * convert
return timestr_to_secs(timeout) * convert
def millisecs_to_timestr(self, timeout: float) -> str:
return secs_to_timestr(timeout / 1000)
@overrides
def get_keyword_documentation(self, name):
doc = DynamicCore.get_keyword_documentation(self, name)
if name == "__intro__":
doc = doc.replace("%ASSERTION_TABLE%", AssertionOperator.__doc__)
doc = doc.replace("%AUTO_CLOSING_LEVEL%", AutoClosingLevel.__doc__)
return doc
|
the-stack_106_20858
|
import boto3
import json
import os,sys,time
from opereto.helpers.services import ServiceTemplate
from opereto.utils.validations import JsonSchemeValidator, validate_dict
from opereto.exceptions import *
class ServiceRunner(ServiceTemplate):
def __init__(self, **kwargs):
ServiceTemplate.__init__(self, **kwargs)
def setup(self):
raise_if_not_ubuntu()
def validate_input(self):
input_scheme = {
"type": "object",
"properties" : {
"bucket_name": {
"type" : "string",
"minLength": 1
},
"source_path": {
"type" : "string",
"minLength": 1
},
"target_path": {
"type" : ["string", "null"]
},
"presigned_url_expiry": {
"type": "integer"
},
"create_bucket": {
"type" : "boolean"
},
"make_public": {
"type" : "boolean"
},
"content_type": {
"type": "string"
},
"aws_access_key": {
"type" : "string",
"minLength": 1
},
"aws_secret_key": {
"type" : "string",
"minLength": 1
},
"required": ['bucket_name', 'source_path', 'target_path', 'aws_access_key','aws_secret_key', 'content_type'],
"additionalProperties": True
}
}
validator = JsonSchemeValidator(self.input, input_scheme)
validator.validate()
self.source_path = self.input['source_path']
self.target_path = self.input['target_path'].lstrip('/')
self.session = boto3.session.Session(
aws_access_key_id=self.input['aws_access_key'],
aws_secret_access_key=self.input['aws_secret_key']
)
self.s3_client = self.session.client('s3')
self.s3_res = self.session.resource('s3')
def process(self):
self.acl = 'private'
if self.input['make_public']:
self.acl = 'public-read'
if self.input['create_bucket']:
self._print_step_title('Creating bucket {} if does not exist..'.format(self.input['bucket_name']))
response = self.s3_client.create_bucket(
ACL=self.acl,
Bucket=self.input['bucket_name']
)
if validate_dict(response):
print(json.dumps(response, indent=4))
time.sleep(1) ## for logs to appear in right order
self._print_step_title('Copying local data to s3..')
if not os.path.exists(self.input['source_path']):
raise OperetoRuntimeError('Source path does not exist')
if os.path.isdir(self.input['source_path']):
print('Saving the content of directory {} to {} in bucket {}..'.format(self.source_path, self.target_path, self.input['bucket_name']))
main_root_dir=os.path.basename(os.path.normpath(self.source_path))
for root, dirs, files in os.walk(self.source_path):
for name in files:
path = root[root.find(main_root_dir):].split(os.path.sep)
path.append(name)
target_id = '/'.join(path)
if self.target_path:
target_id = '{}/{}'.format(self.target_path, target_id)
self.s3_res.meta.client.upload_file(os.path.join(root, name), self.input['bucket_name'], target_id, ExtraArgs={'ACL':self.acl, 'ContentType': self.input['content_type']})
else:
print('Saving local file {} to {} in bucket {}..'.format(self.source_path, self.target_path, self.input['bucket_name']))
self.s3_res.meta.client.upload_file(self.source_path, self.input['bucket_name'], self.target_path, ExtraArgs={'ACL':self.acl, 'ContentType': self.input['content_type']})
print('Operation completed successfuly.')
if self.input['presigned_url_expiry']:
print('Generating pre-signed url expired in {} seconds..'.format(self.input['presigned_url_expiry']))
presigned_url = self.s3_client.generate_presigned_url('get_object', Params={'Bucket': self.input['bucket_name'], 'Key': self.target_path}, ExpiresIn=self.input['presigned_url_expiry'])
print('[OPERETO_HTML]<a target="_blank" href="' + presigned_url + '">'+ presigned_url +'</a>')
print('\n\n')
self.client.modify_process_property('storage_url', presigned_url)
return self.client.SUCCESS
def teardown(self):
pass
if __name__ == "__main__":
exit(ServiceRunner().run())
|
the-stack_106_20860
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
cisco_plugins_opts = [
cfg.StrOpt('vswitch_plugin',
default='neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
help=_("Virtual Switch to use")),
cfg.StrOpt('nexus_plugin',
default='neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.'
'NexusPlugin',
help=_("Nexus Switch to use")),
]
cisco_opts = [
cfg.StrOpt('vlan_name_prefix', default='q-',
help=_("VLAN Name prefix")),
cfg.StrOpt('provider_vlan_name_prefix', default='p-',
help=_("VLAN Name prefix for provider vlans")),
cfg.BoolOpt('provider_vlan_auto_create', default=True,
help=_('Provider VLANs are automatically created as needed '
'on the Nexus switch')),
cfg.BoolOpt('provider_vlan_auto_trunk', default=True,
help=_('Provider VLANs are automatically trunked as needed '
'on the ports of the Nexus switch')),
cfg.BoolOpt('svi_round_robin', default=False,
help=_("Distribute SVI interfaces over all switches")),
cfg.StrOpt('model_class',
default='neutron.plugins.cisco.models.virt_phy_sw_v2.'
'VirtualPhysicalSwitchModelV2',
help=_("Model Class")),
cfg.StrOpt('nexus_driver',
default='neutron.plugins.cisco.test.nexus.'
'fake_nexus_driver.CiscoNEXUSFakeDriver',
help=_("Nexus Driver Name")),
]
cisco_n1k_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("N1K Integration Bridge")),
cfg.BoolOpt('enable_tunneling', default=True,
help=_("N1K Enable Tunneling")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("N1K Tunnel Bridge")),
cfg.StrOpt('local_ip', default='10.0.0.3',
help=_("N1K Local IP")),
cfg.StrOpt('tenant_network_type', default='local',
help=_("N1K Tenant Network Type")),
cfg.StrOpt('bridge_mappings', default='',
help=_("N1K Bridge Mappings")),
cfg.StrOpt('vxlan_id_ranges', default='5000:10000',
help=_("N1K VXLAN ID Ranges")),
cfg.StrOpt('network_vlan_ranges', default='vlan:1:4095',
help=_("N1K Network VLAN Ranges")),
cfg.StrOpt('default_network_profile', default='default_network_profile',
help=_("N1K default network profile")),
cfg.StrOpt('default_policy_profile', default='service_profile',
help=_("N1K default policy profile")),
cfg.StrOpt('network_node_policy_profile', default='dhcp_pp',
help=_("N1K policy profile for network node")),
cfg.StrOpt('poll_duration', default='10',
help=_("N1K Policy profile polling duration in seconds")),
]
cfg.CONF.register_opts(cisco_opts, "CISCO")
cfg.CONF.register_opts(cisco_n1k_opts, "CISCO_N1K")
cfg.CONF.register_opts(cisco_plugins_opts, "CISCO_PLUGINS")
config.register_root_helper(cfg.CONF)
# shortcuts
CONF = cfg.CONF
CISCO = cfg.CONF.CISCO
CISCO_N1K = cfg.CONF.CISCO_N1K
CISCO_PLUGINS = cfg.CONF.CISCO_PLUGINS
#
# device_dictionary - Contains all external device configuration.
#
# When populated the device dictionary format is:
# {('<device ID>', '<device ipaddr>', '<keyword>'): '<value>', ...}
#
# Example:
# {('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin',
# ('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword',
# ('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1', ...}
#
device_dictionary = {}
class CiscoConfigOptions():
"""Cisco Configuration Options Class."""
def __init__(self):
self._create_device_dictionary()
def _create_device_dictionary(self):
"""
Create the device dictionary from the cisco_plugins.ini
device supported sections. Ex. NEXUS_SWITCH, N1KV.
"""
multi_parser = cfg.MultiConfigParser()
read_ok = multi_parser.read(CONF.config_file)
if len(read_ok) != len(CONF.config_file):
raise cfg.Error(_("Some config files were not parsed properly"))
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
dev_id, sep, dev_ip = parsed_item.partition(':')
if dev_id.lower() in ['nexus_switch', 'n1kv']:
for dev_key, value in parsed_file[parsed_item].items():
device_dictionary[dev_id, dev_ip, dev_key] = value[0]
def get_device_dictionary():
return device_dictionary
|
the-stack_106_20861
|
import typing
class UnionFind():
def __init__(
self,
n: int,
) -> typing.NoReturn:
self.__a = [-1] * n
def find(
self,
u: int,
) -> int:
a = self.__a
au = a[u]
if au < 0: return u
au = self.find(au)
a[u] = au
return au
def unite(
self,
u: int,
v: int,
) -> typing.NoReturn:
u = self.find(u)
v = self.find(v)
if u == v: return
a = self.__a
if a[u] > a[v]: u, v = v, u
a[u] += a[v]
a[v] = u
def same(
self,
u: int,
v: int,
) -> bool:
u = self.find(u)
v = self.find(v)
return u == v
import sys
def main() -> typing.NoReturn:
n, q = map(
int, input().split(),
)
uf = UnionFind(n)
q = map(
int,
sys.stdin.read().split(),
)
for t, u, v in zip(*[q] * 3):
if t == 0:
uf.unite(u, v)
continue
print(uf.same(u, v) * 1)
main()
|
the-stack_106_20863
|
# WE SHOULD PUT MORE STRUCTURE ON THESE TAGS SO WE CAN ACCESS DOCUMENT
# FIELDS ELEGANTLY
# These are common variable tags that we'll want to access
INSTANCE_DOC_NAME = u"_name"
ID = u"_id"
UUID = u"_uuid"
SITE = u"fs_site"
FS_STATUS = u"fs_status"
FS_UUID = u"fs_uuid"
FS_PROJECT_UUID = u"fs_project_uuid"
FS_SITE_IDENTIFIER = u"identifier"
FS_SITE_SUPERVISOR = u"_submitted_by"
FS_SITE_NAME = u"site_name"
FS_SITE_ADDRESS = u"address"
FS_SITE_PHONE = u"phone"
PICTURE = u"picture"
GPS = u"location/gps"
SURVEY_TYPE = u'_survey_type_slug'
# Phone IMEI:
DEVICE_ID = u"device_id" # This tag was used in Phase I
IMEI = u"imei" # This tag was used in Phase II
# Survey start time:
START_TIME = u"start_time" # This tag was used in Phase I
START = u"start" # This tag was used in Phase II
END_TIME = u"end_time"
END = u"end"
# value of INSTANCE_DOC_NAME that indicates a regisration form
REGISTRATION = u"registration"
# keys that we'll look for in the registration form
NAME = u"name"
# extra fields that we're adding to our mongo doc
XFORM_ID_STRING = u"_xform_id_string"
STATUS = u"_status"
ATTACHMENTS = u"_attachments"
UUID = u"_uuid"
USERFORM_ID = u"_userform_id"
DATE = u"_date"
GEOLOCATION = u"_geolocation"
SUBMISSION_TIME = u'_submission_time'
DELETEDAT = u"_deleted_at" # marker for delete surveys
BAMBOO_DATASET_ID = u"_bamboo_dataset_id"
SUBMITTED_BY = u"_submitted_by"
INSTANCE_ID = u"instanceID"
META_INSTANCE_ID = u"meta/instanceID"
INDEX = u"_index"
PARENT_INDEX = u"_parent_index"
PARENT_TABLE_NAME = u"_parent_table_name"
# datetime format that we store in mongo
MONGO_STRFTIME = '%Y-%m-%dT%H:%M:%S'
# how to represent N/A in exports
NA_REP = 'n/a'
# hold tags
TAGS = u"_tags"
NOTES = u"_notes"
# statistics
MEAN = u"mean"
MIN = u"min"
MAX = u"max"
RANGE = u"range"
MEDIAN = u"median"
MODE = u"mode"
|
the-stack_106_20865
|
"""
The GROVER models for pretraining, finetuning and fingerprint generating.
"""
from argparse import Namespace
from typing import List, Dict, Callable
import numpy as np
import torch
from torch import nn as nn
from grover.data import get_atom_fdim, get_bond_fdim
from grover.model.layers import Readout, GTransEncoder
from grover.util.nn_utils import get_activation_function
class GROVEREmbedding(nn.Module):
"""
The GROVER Embedding class. It contains the GTransEncoder.
This GTransEncoder can be replaced by any validate encoders.
"""
def __init__(self, args: Namespace):
"""
Initialize the GROVEREmbedding class.
:param args:
"""
super(GROVEREmbedding, self).__init__()
self.embedding_output_type = args.embedding_output_type
edge_dim = get_bond_fdim() + get_atom_fdim()
node_dim = get_atom_fdim()
if not hasattr(args, "backbone"):
print("No backbone specified in args, use gtrans backbone.")
args.backbone = "gtrans"
if args.backbone == "gtrans" or args.backbone == "dualtrans":
# dualtrans is the old name.
self.encoders = GTransEncoder(args,
hidden_size=args.hidden_size,
edge_fdim=edge_dim,
node_fdim=node_dim,
dropout=args.dropout,
activation=args.activation,
num_mt_block=args.num_mt_block,
num_attn_head=args.num_attn_head,
atom_emb_output=self.embedding_output_type,
bias=args.bias,
cuda=args.cuda)
def forward(self, graph_batch: List) -> Dict:
"""
The forward function takes graph_batch as input and output a dict. The content of the dict is decided by
self.embedding_output_type.
:param graph_batch: the input graph batch generated by MolCollator.
:return: a dict containing the embedding results.
"""
output = self.encoders(graph_batch)
if self.embedding_output_type == 'atom':
return {"atom_from_atom": output[0], "atom_from_bond": output[1],
"bond_from_atom": None, "bond_from_bond": None} # atom_from_atom, atom_from_bond
elif self.embedding_output_type == 'bond':
return {"atom_from_atom": None, "atom_from_bond": None,
"bond_from_atom": output[0], "bond_from_bond": output[1]} # bond_from_atom, bond_from_bond
elif self.embedding_output_type == "both":
return {"atom_from_atom": output[0][0], "bond_from_atom": output[0][1],
"atom_from_bond": output[1][0], "bond_from_bond": output[1][1]}
class AtomVocabPrediction(nn.Module):
"""
The atom-wise vocabulary prediction task. The atom vocabulary is constructed by the context.
"""
def __init__(self, args, vocab_size, hidden_size=None):
"""
:param args: the argument.
:param vocab_size: the size of atom vocabulary.
"""
super(AtomVocabPrediction, self).__init__()
if not hidden_size:
hidden_size = args.hidden_size
self.linear = nn.Linear(hidden_size, vocab_size)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, embeddings):
"""
If embeddings is None: do not go through forward pass.
:param embeddings: the atom embeddings, num_atom X fea_dim.
:return: the prediction for each atom, num_atom X vocab_size.
"""
if embeddings is None:
return None
return self.logsoftmax(self.linear(embeddings))
class BondVocabPrediction(nn.Module):
"""
The bond-wise vocabulary prediction task. The bond vocabulary is constructed by the context.
"""
def __init__(self, args, vocab_size, hidden_size=None):
"""
Might need to use different architecture for bond vocab prediction.
:param args:
:param vocab_size: size of bond vocab.
:param hidden_size: hidden size
"""
super(BondVocabPrediction, self).__init__()
if not hidden_size:
hidden_size = args.hidden_size
self.linear = nn.Linear(hidden_size, vocab_size)
# ad-hoc here
# If TWO_FC_4_BOND_VOCAB, we will use two distinct fc layer to deal with the bond and rev bond.
self.TWO_FC_4_BOND_VOCAB = True
if self.TWO_FC_4_BOND_VOCAB:
self.linear_rev = nn.Linear(hidden_size, vocab_size)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, embeddings):
"""
If embeddings is None: do not go through forward pass.
:param embeddings: the atom embeddings, num_bond X fea_dim.
:return: the prediction for each atom, num_bond X vocab_size.
"""
if embeddings is None:
return None
nm_bonds = embeddings.shape[0] # must be an odd number
# The bond and rev bond have odd and even ids respectively. See definition in molgraph.
ids1 = [0] + list(range(1, nm_bonds, 2))
ids2 = list(range(0, nm_bonds, 2))
if self.TWO_FC_4_BOND_VOCAB:
logits = self.linear(embeddings[ids1]) + self.linear_rev(embeddings[ids2])
else:
logits = self.linear(embeddings[ids1] + embeddings[ids2])
return self.logsoftmax(logits)
class FunctionalGroupPrediction(nn.Module):
"""
The functional group (semantic motifs) prediction task. This is a graph-level task.
"""
def __init__(self, args, fg_size):
"""
:param args: The arguments.
:param fg_size: The size of semantic motifs.
"""
super(FunctionalGroupPrediction, self).__init__()
first_linear_dim = args.hidden_size
hidden_size = args.hidden_size
# In order to retain maximal information in the encoder, we use a simple readout function here.
self.readout = Readout(rtype="mean", hidden_size=hidden_size)
# We have four branches here. But the input with less than four branch is OK.
# Since we use BCEWithLogitsLoss as the loss function, we only need to output logits here.
self.linear_atom_from_atom = nn.Linear(first_linear_dim, fg_size)
self.linear_atom_from_bond = nn.Linear(first_linear_dim, fg_size)
self.linear_bond_from_atom = nn.Linear(first_linear_dim, fg_size)
self.linear_bond_from_bond = nn.Linear(first_linear_dim, fg_size)
def forward(self, embeddings: Dict, ascope: List, bscope: List) -> Dict:
"""
The forward function of semantic motif prediction. It takes the node/bond embeddings, and the corresponding
atom/bond scope as input and produce the prediction logits for different branches.
:param embeddings: The input embeddings are organized as dict. The output of GROVEREmbedding.
:param ascope: The scope for bonds. Please refer BatchMolGraph for more details.
:param bscope: The scope for aotms. Please refer BatchMolGraph for more details.
:return: a dict contains the predicted logits.
"""
preds_atom_from_atom, preds_atom_from_bond, preds_bond_from_atom, preds_bond_from_bond = \
None, None, None, None
if embeddings["bond_from_atom"] is not None:
preds_bond_from_atom = self.linear_bond_from_atom(self.readout(embeddings["bond_from_atom"], bscope))
if embeddings["bond_from_bond"] is not None:
preds_bond_from_bond = self.linear_bond_from_bond(self.readout(embeddings["bond_from_bond"], bscope))
if embeddings["atom_from_atom"] is not None:
preds_atom_from_atom = self.linear_atom_from_atom(self.readout(embeddings["atom_from_atom"], ascope))
if embeddings["atom_from_bond"] is not None:
preds_atom_from_bond = self.linear_atom_from_bond(self.readout(embeddings["atom_from_bond"], ascope))
return {"atom_from_atom": preds_atom_from_atom, "atom_from_bond": preds_atom_from_bond,
"bond_from_atom": preds_bond_from_atom, "bond_from_bond": preds_bond_from_bond}
class GroverTask(nn.Module):
"""
The pretrain module.
"""
def __init__(self, args, grover, atom_vocab_size, bond_vocab_size, fg_size):
super(GroverTask, self).__init__()
self.grover = grover
self.av_task_atom = AtomVocabPrediction(args, atom_vocab_size)
self.av_task_bond = AtomVocabPrediction(args, atom_vocab_size)
self.bv_task_atom = BondVocabPrediction(args, bond_vocab_size)
self.bv_task_bond = BondVocabPrediction(args, bond_vocab_size)
self.fg_task_all = FunctionalGroupPrediction(args, fg_size)
self.embedding_output_type = args.embedding_output_type
@staticmethod
def get_loss_func(args: Namespace) -> Callable:
"""
The loss function generator.
:param args: the arguments.
:return: the loss fucntion for GroverTask.
"""
def loss_func(preds, targets, dist_coff=args.dist_coff):
"""
The loss function for GroverTask.
:param preds: the predictions.
:param targets: the targets.
:param dist_coff: the default disagreement coefficient for the distances between different branches.
:return:
"""
av_task_loss = nn.NLLLoss(ignore_index=0, reduction="mean") # same for av and bv
fg_task_loss = nn.BCEWithLogitsLoss(reduction="mean")
# av_task_dist_loss = nn.KLDivLoss(reduction="mean")
av_task_dist_loss = nn.MSELoss(reduction="mean")
fg_task_dist_loss = nn.MSELoss(reduction="mean")
sigmoid = nn.Sigmoid()
av_atom_loss, av_bond_loss, av_dist_loss = 0.0, 0.0, 0.0
fg_atom_from_atom_loss, fg_atom_from_bond_loss, fg_atom_dist_loss = 0.0, 0.0, 0.0
bv_atom_loss, bv_bond_loss, bv_dist_loss = 0.0, 0.0, 0.0
fg_bond_from_atom_loss, fg_bond_from_bond_loss, fg_bond_dist_loss = 0.0, 0.0, 0.0
if preds["av_task"][0] is not None:
av_atom_loss = av_task_loss(preds['av_task'][0], targets["av_task"])
fg_atom_from_atom_loss = fg_task_loss(preds["fg_task"]["atom_from_atom"], targets["fg_task"])
if preds["av_task"][1] is not None:
av_bond_loss = av_task_loss(preds['av_task'][1], targets["av_task"])
fg_atom_from_bond_loss = fg_task_loss(preds["fg_task"]["atom_from_bond"], targets["fg_task"])
if preds["bv_task"][0] is not None:
bv_atom_loss = av_task_loss(preds['bv_task'][0], targets["bv_task"])
fg_bond_from_atom_loss = fg_task_loss(preds["fg_task"]["bond_from_atom"], targets["fg_task"])
if preds["bv_task"][1] is not None:
bv_bond_loss = av_task_loss(preds['bv_task'][1], targets["bv_task"])
fg_bond_from_bond_loss = fg_task_loss(preds["fg_task"]["bond_from_bond"], targets["fg_task"])
if preds["av_task"][0] is not None and preds["av_task"][1] is not None:
av_dist_loss = av_task_dist_loss(preds['av_task'][0], preds['av_task'][1])
fg_atom_dist_loss = fg_task_dist_loss(sigmoid(preds["fg_task"]["atom_from_atom"]),
sigmoid(preds["fg_task"]["atom_from_bond"]))
if preds["bv_task"][0] is not None and preds["bv_task"][1] is not None:
bv_dist_loss = av_task_dist_loss(preds['bv_task'][0], preds['bv_task'][1])
fg_bond_dist_loss = fg_task_dist_loss(sigmoid(preds["fg_task"]["bond_from_atom"]),
sigmoid(preds["fg_task"]["bond_from_bond"]))
av_loss = av_atom_loss + av_bond_loss
bv_loss = bv_atom_loss + bv_bond_loss
fg_atom_loss = fg_atom_from_atom_loss + fg_atom_from_bond_loss
fg_bond_loss = fg_bond_from_atom_loss + fg_bond_from_bond_loss
fg_loss = fg_atom_loss + fg_bond_loss
fg_dist_loss = fg_atom_dist_loss + fg_bond_dist_loss
# dist_loss = av_dist_loss + bv_dist_loss + fg_dist_loss
# print("%.4f %.4f %.4f %.4f %.4f %.4f"%(av_atom_loss,
# av_bond_loss,
# fg_atom_loss,
# fg_bond_loss,
# av_dist_loss,
# fg_dist_loss))
# return av_loss + fg_loss + dist_coff * dist_loss
overall_loss = av_loss + bv_loss + fg_loss + dist_coff * av_dist_loss + \
dist_coff * bv_dist_loss + fg_dist_loss
return overall_loss, av_loss, bv_loss, fg_loss, av_dist_loss, bv_dist_loss, fg_dist_loss
return loss_func
def forward(self, graph_batch: List):
"""
The forward function.
:param graph_batch:
:return:
"""
_, _, _, _, _, a_scope, b_scope, _ = graph_batch
a_scope = a_scope.data.cpu().numpy().tolist()
embeddings = self.grover(graph_batch)
av_task_pred_atom = self.av_task_atom(
embeddings["atom_from_atom"]) # if None: means not go through this fowward
av_task_pred_bond = self.av_task_bond(embeddings["atom_from_bond"])
bv_task_pred_atom = self.bv_task_atom(embeddings["bond_from_atom"])
bv_task_pred_bond = self.bv_task_bond(embeddings["bond_from_bond"])
fg_task_pred_all = self.fg_task_all(embeddings, a_scope, b_scope)
return {"av_task": (av_task_pred_atom, av_task_pred_bond),
"bv_task": (bv_task_pred_atom, bv_task_pred_bond),
"fg_task": fg_task_pred_all}
class GroverFpGeneration(nn.Module):
"""
GroverFpGeneration class.
It loads the pre-trained model and produce the fingerprints for input molecules.
"""
def __init__(self, args):
"""
Init function.
:param args: the arguments.
"""
super(GroverFpGeneration, self).__init__()
self.fingerprint_source = args.fingerprint_source
self.iscuda = args.cuda
self.grover = GROVEREmbedding(args)
self.readout = Readout(rtype="mean", hidden_size=args.hidden_size)
def forward(self, batch, features_batch):
"""
The forward function.
It takes graph batch and molecular feature batch as input and produce the fingerprints of this molecules.
:param batch:
:param features_batch:
:return:
"""
_, _, _, _, _, a_scope, b_scope, _ = batch
output = self.grover(batch)
# Share readout
mol_atom_from_bond_output = self.readout(output["atom_from_bond"], a_scope)
mol_atom_from_atom_output = self.readout(output["atom_from_atom"], a_scope)
if self.fingerprint_source == "bond" or self.fingerprint_source == "both":
mol_bond_from_atom_output = self.readout(output["bond_from_atom"], b_scope)
mol_bond_from_bodd_output = self.readout(output["bond_from_bond"], b_scope)
if features_batch[0] is not None:
features_batch = torch.from_numpy(np.stack(features_batch)).float()
if self.iscuda:
features_batch = features_batch.cuda()
features_batch = features_batch.to(output["atom_from_atom"])
if len(features_batch.shape) == 1:
features_batch = features_batch.view([1, features_batch.shape[0]])
else:
features_batch = None
if self.fingerprint_source == "atom":
fp = torch.cat([mol_atom_from_atom_output, mol_atom_from_bond_output], 1)
elif self.fingerprint_source == "bond":
fp = torch.cat([mol_bond_from_atom_output, mol_bond_from_bodd_output], 1)
else:
# the both case.
fp = torch.cat([mol_atom_from_atom_output, mol_atom_from_bond_output,
mol_bond_from_atom_output, mol_bond_from_bodd_output], 1)
if features_batch is not None:
fp = torch.cat([fp, features_batch], 1)
return fp
class GroverFinetuneTask(nn.Module):
"""
The finetune
"""
def __init__(self, args):
super(GroverFinetuneTask, self).__init__()
self.hidden_size = args.hidden_size
self.iscuda = args.cuda
self.grover = GROVEREmbedding(args)
if args.self_attention:
self.readout = Readout(rtype="self_attention", hidden_size=self.hidden_size,
attn_hidden=args.attn_hidden,
attn_out=args.attn_out)
else:
self.readout = Readout(rtype="mean", hidden_size=self.hidden_size)
self.mol_atom_from_atom_ffn = self.create_ffn(args)
self.mol_atom_from_bond_ffn = self.create_ffn(args)
#self.ffn = nn.ModuleList()
#self.ffn.append(self.mol_atom_from_atom_ffn)
#self.ffn.append(self.mol_atom_from_bond_ffn)
self.classification = args.dataset_type == 'classification'
if self.classification:
self.sigmoid = nn.Sigmoid()
def create_ffn(self, args: Namespace):
"""
Creates the feed-forward network for the model.
:param args: Arguments.
"""
# Note: args.features_dim is set according the real loaded features data
if args.features_only:
first_linear_dim = args.features_size + args.features_dim
else:
if args.self_attention:
first_linear_dim = args.hidden_size * args.attn_out
# TODO: Ad-hoc!
# if args.use_input_features:
first_linear_dim += args.features_dim
else:
first_linear_dim = args.hidden_size + args.features_dim
dropout = nn.Dropout(args.dropout)
activation = get_activation_function(args.activation)
# TODO: ffn_hidden_size
# Create FFN layers
if args.ffn_num_layers == 1:
ffn = [
dropout,
nn.Linear(first_linear_dim, args.output_size)
]
else:
ffn = [
dropout,
nn.Linear(first_linear_dim, args.ffn_hidden_size)
]
for _ in range(args.ffn_num_layers - 2):
ffn.extend([
activation,
dropout,
nn.Linear(args.ffn_hidden_size, args.ffn_hidden_size),
])
ffn.extend([
activation,
dropout,
nn.Linear(args.ffn_hidden_size, args.output_size),
])
# Create FFN model
return nn.Sequential(*ffn)
@staticmethod
def get_loss_func(args):
def loss_func(preds, targets,
dt=args.dataset_type,
dist_coff=args.dist_coff):
if dt == 'classification':
pred_loss = nn.BCEWithLogitsLoss(reduction='none')
elif dt == 'regression':
pred_loss = nn.MSELoss(reduction='none')
else:
raise ValueError(f'Dataset type "{args.dataset_type}" not supported.')
# print(type(preds))
# TODO: Here, should we need to involve the model status? Using len(preds) is just a hack.
if type(preds) is not tuple:
# in eval mode.
return pred_loss(preds, targets)
# in train mode.
dist_loss = nn.MSELoss(reduction='none')
# dist_loss = nn.CosineSimilarity(dim=0)
# print(pred_loss)
dist = dist_loss(preds[0], preds[1])
pred_loss1 = pred_loss(preds[0], targets)
pred_loss2 = pred_loss(preds[1], targets)
return pred_loss1 + pred_loss2 + dist_coff * dist
return loss_func
def forward(self, batch, features_batch):
_, _, _, _, _, a_scope, _, _ = batch
output = self.grover(batch)
# Share readout
mol_atom_from_bond_output = self.readout(output["atom_from_bond"], a_scope)
mol_atom_from_atom_output = self.readout(output["atom_from_atom"], a_scope)
if features_batch[0] is not None:
features_batch = torch.from_numpy(np.stack(features_batch)).float()
if self.iscuda:
features_batch = features_batch.cuda()
features_batch = features_batch.to(output["atom_from_atom"])
if len(features_batch.shape) == 1:
features_batch = features_batch.view([1, features_batch.shape[0]])
else:
features_batch = None
if features_batch is not None:
mol_atom_from_atom_output = torch.cat([mol_atom_from_atom_output, features_batch], 1)
mol_atom_from_bond_output = torch.cat([mol_atom_from_bond_output, features_batch], 1)
if self.training:
atom_ffn_output = self.mol_atom_from_atom_ffn(mol_atom_from_atom_output)
bond_ffn_output = self.mol_atom_from_bond_ffn(mol_atom_from_bond_output)
return atom_ffn_output, bond_ffn_output
else:
atom_ffn_output = self.mol_atom_from_atom_ffn(mol_atom_from_atom_output)
bond_ffn_output = self.mol_atom_from_bond_ffn(mol_atom_from_bond_output)
if self.classification:
atom_ffn_output = self.sigmoid(atom_ffn_output)
bond_ffn_output = self.sigmoid(bond_ffn_output)
output = (atom_ffn_output + bond_ffn_output) / 2
return output
|
the-stack_106_20866
|
from textwrap import wrap
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
__all__ = ["likert_bar_plot"]
DEFAULT_GROUPED_CHOICES = [
{
"left": ["Yes"],
"center": ["I don’t know", "I don’t want to answer this question", "No Answer"],
"right": ["No"],
},
{
"left": ["Very satisfied", "Satisfied"],
"center": [
"Neither/nor",
"Does not apply",
"I don’t want to answer this question",
"No Answer",
],
"right": ["Dissatisfied", "Very dissatisfied"],
},
{
"left": ["Very attractive", "Attractive"],
"center": ["Neutral", "I don’t want to answer this question", "No Answer"],
"right": ["Unattractive", "Very unattractive"],
},
{
"left": ["Very much", "To some extent"],
"center": [
"Does not apply",
"I don’t want to answer this question",
"No Answer",
],
"right": [
"Rather not",
"Not at all",
],
},
{
"left": ["Very much", "Moderately"],
"center": ["I don’t want to answer this question", "No Answer"],
"right": ["Somewhat", "Not at all"],
},
{
"left": ["Not at all"],
"center": ["I don’t want to answer this question", "No Answer"],
"right": ["Several days", "More than half the days", "Nearly every day"],
},
{
"left": ["Male"],
"center": [
"Diverse",
"I don’t know",
"Does not apply",
"I don’t want to answer this question",
"No Answer",
],
"right": ["Female"],
},
{
"left": ["Fully agree", "Partially agree"],
"center": [
"Neither agree nor disagree",
"I don’t know",
"I don’t want to answer this question",
"No Answer",
],
"right": ["Partially disagree", "Fully disagree"],
},
{
"left": ["Very much", "Rather yes"],
"center": [
"Indifferent",
"I don’t know",
"I don’t want to answer this question",
"No Answer",
],
"right": ["Rather not", "Not at all"],
},
{
"left": ["Yes, to a great extent", "Yes, to some extent"],
"center": ["I don’t know", "I don’t want to answer this question", "No Answer"],
"right": ["No"],
},
{
"left": ["very positively", "positively"],
"center": [
"neutral",
"no base for comparison in my case",
"I don’t know",
"I don’t want to answer this question",
"No Answer",
],
"right": ["negatively", "very negatively"],
},
{
"left": ["never", "rarely"],
"center": [
"sometimes",
"I don’t want to answer this question",
"Does not apply",
"No Answer",
],
"right": ["often", "always"],
},
{
"left": ["not at all", "rather not"],
"center": [
"I never had the option",
"I don’t know",
"I don’t want to answer this question",
"No Answer",
],
"right": ["to some extent", "very much"],
},
]
def get_grouped_default_choices(responses):
"""
Get a dict of presorted choices (potential responses)
Args:
responses (list): responses given in the data
Returns:
dict: dict of sorted choices
"""
for choices in DEFAULT_GROUPED_CHOICES:
responses_lower = [response.lower() for response in responses]
choices_lower = [choice.lower() for choice in choices_to_list(choices.values())]
if len(set(responses_lower).difference(set(choices_lower))) == 0:
return remove_unused_default_choices(responses, choices)
raise AssertionError(
f"No sorted default choices found for responses {responses} - "
f"please provide your own sorting as follows: {DEFAULT_GROUPED_CHOICES[0]}"
)
def remove_unused_default_choices(responses, choices):
"""
Filter out default choices that are not occurring in the responses
Args:
responses (list): responses given in the data
choices (dict): sorted choices (potential responses)
Returns:
dict: of sorted choices with choices that are not available in the responses removed
"""
filtered_choices = dict()
responses_lower = [response.lower() for response in responses]
for location, groupded_choices in choices.items():
filtered_choices[location] = [
choice for choice in groupded_choices if choice.lower() in responses_lower
]
return filtered_choices
def choices_to_list(choices):
return [choice for grouped_choices in choices for choice in grouped_choices]
def clean_up_apostrophes_in_responses(data_df):
"""
Replace apostrophes in the responses
Args:
data_df (df): Dataframe of data to plot
Returns:
df: Dataframe of data to plot
"""
for response in data_df.index:
if isinstance(response, str):
data_df = data_df.rename(index={response: response.replace("'", "’")})
return data_df
def get_colors(palette, grouped_choices):
"""
Get colors for plotting
Args:
palette (str): if None a default palette is used otherwise a
grouped_choices (dict): choices sorted by location where they will be plotted ("left", "center", "right")
Returns:
dict: for each plot location ("left", "center", "right") a list of colors
"""
if palette is None:
color_palette = {"left": "Blues", "center": "Greys", "right": "Reds"}
colors = {
position: sns.color_palette(color_map, len(grouped_choices[position]))
for position, color_map in color_palette.items()
}
colors["left"] = colors["left"][
::-1
] # flip color scale so dark colors on the out sides of the bars
else:
n_colors = sum([len(choices) for choices in grouped_choices.values()])
color_palette = sns.color_palette(palette, n_colors=n_colors)
colors = {
"left": color_palette[: len(grouped_choices["left"])],
"center": color_palette[
len(grouped_choices["left"]) : -len(grouped_choices["right"])
],
"right": color_palette[-len(grouped_choices["right"]) :],
}
return colors
def calc_y_bar_position(grouped_questions, bar_thickness, bar_spacing, group_spacing):
"""
Calculate y axis position of bar plots
Args:
grouped_questions (list): each sub-list is a group of questions which will be plotted together
bar_thickness (float): thickness of a single bar
bar_spacing (float): horizontal spacing between bars belonging to the same group
group_spacing (float): horizontal spacing between different question groups
Returns:
list: bar locations for plotting
"""
bar_positions = []
prev_position = 0
for i_group, question_group in enumerate(grouped_questions):
group_positions = (bar_thickness + bar_spacing) * np.array(
(range(len(question_group)))
) + prev_position
if i_group > 0:
group_positions = group_positions + group_spacing
bar_positions.extend(group_positions)
prev_position = bar_positions[-1]
return bar_positions
def wrap_ticks_label(labels, wrap_text):
if not wrap_text:
return labels
return ["\n".join(wrap(label, 30)) for label in labels]
def format_labels(labels):
"""
Format labels to be first letter upper case, all other characters lowercase.
Args:
labels (list): labels to be formatted
Returns:
list: formatted labels
"""
edited_labels = []
for label in labels:
label = label.lower()
label = label[0].upper() + label[1:]
label = label.replace(" i ", " I ")
edited_labels.append(label)
return edited_labels
def sort_questions(data_df, grouped_questions, grouped_choices, sort_by_choices):
"""
Sort questions in the dataframe to plot them in their group and sort them in their group according to the responses.
Args:
data_df (df): Dataframe of data to plot
grouped_questions (list): each sub-list is a group of questions which will be plotted together
grouped_choices (dict): choices sorted by location where they will be plotted ("left", "center", "right")
sort_by_choices (str): choices by which to sort the questions ("left", "right", "no_sorting")
Returns:
df: Dataframe with reordered columns (columns represent the questions)
"""
if sort_by_choices == "no_sorting":
return data_df
# sort questions by the selected sorting (left / right)
# and add half of the "centered" responses as well since they shift the bars outside as well
sorted_questions = (
(
data_df.loc[grouped_choices[sort_by_choices]].sum(axis=0)
+ data_df.loc[grouped_choices["center"]].sum(axis=0) / 2
)
.sort_values(ascending=False)
.index
)
# resort data frame according to the groups so the questions are sorted within each group by the selected sorting
sorted_group_questions = [
question
for question_group in grouped_questions
for question in sorted_questions
if question in question_group
]
return data_df.reindex(columns=sorted_group_questions)
def absolute_responses_to_percentage(data_df):
total_responses = data_df.sum(axis=0)
data_df = data_df / total_responses * 100
return data_df.round(1)
def check_question_sorting(sort_questions_by):
available_sortings = ["left", "right", "no_sorting"]
if sort_questions_by not in available_sortings:
raise AssertionError(
f"Unknown sorting of responses received {sort_questions_by}, allowed options: {available_sortings}"
)
def likert_bar_plot(
data_df,
grouped_choices=None,
grouped_questions=None,
sort_questions_by_choices="left",
title_question=None,
theme=None,
bar_thickness=0.2,
bar_spacing=0.5,
group_spacing=1,
calc_fig_size=True,
x_axis_max_values=None,
wrap_text=True,
**kwargs,
):
"""
Plot the responses to an array question as a Likert plot
Args:
data_df (df): dataframe of containing response counts (rows) and questions (columns)
grouped_choices (dict, optional): choices sorted by location where they will be plotted ("left", "center", "right"). If None provided, a set of default pre-sorted choices is selected.
grouped_questions (list, optional): each sub-list is a group of questions which will be plotted together. If None provided all columns are assigned to a single group.
sort_by_choices (str, optional): choices by which to sort the questions ("left", "right", "no_sorting").
title_question (str, optional): The title question. If None provided, no title is added to the plot.
theme(dict, optional): plot theme. If None is provided a default theme is selected.
bar_thickness (float): thickness of a single bar
bar_spacing (float): horizontal spacing between bars belonging to the same group
group_spacing (float): horizontal spacing between different question groups
calc_fig_size (bool, optional): Calculate a figure size from the provided data and bar parameters, if False a default figure size is used
x_axis_max_values (float, optional): Crop the x-Axis to [N%...N%], if None provided the x-axis is [100%...100%]
wrap_text (bool, optional): Add line breaks to long questions
"""
palette = None
check_question_sorting(sort_questions_by_choices)
total_answers = data_df.loc["Total", "Total"]
data_df = data_df.drop("Total", axis=0)
data_df = data_df.drop("Total", axis=1)
data_df.index = data_df.index.map(str)
data_df = clean_up_apostrophes_in_responses(data_df)
if grouped_questions is None:
grouped_questions = [data_df.columns.values]
if grouped_choices is None:
grouped_choices = get_grouped_default_choices(data_df.index)
if theme is not None:
sns.set_theme(**theme)
palette = theme.get("palette", None)
if calc_fig_size:
rc = {
"figure.figsize": (
12,
len(data_df.columns) * (bar_thickness + bar_spacing)
+ len(grouped_questions) * group_spacing
+ 0.5,
)
}
sns.set_theme(rc=rc)
fig, ax = plt.subplots()
# rearrange responses in dataframe according to the selected sorting so they are plotted in the correct order
location_and_response = [
(position, choice)
for position in ["left", "center", "right"]
for choice in grouped_choices[position]
]
_, sorted_responses = list(zip(*location_and_response))
data_df = data_df.reindex(sorted_responses)
# reorder dataframe columns and the questions for plotting according to the sorting
n_responses_absolute = data_df.sum(axis=0)
data_df = absolute_responses_to_percentage(data_df)
data_df = sort_questions(
data_df, grouped_questions, grouped_choices, sort_questions_by_choices
)
responses_cumulated = data_df.cumsum(axis=0)
# dict of colors for the bar parts ["left", "center", "right"]
colors = get_colors(palette, grouped_choices)
bar_positions = calc_y_bar_position(
grouped_questions, bar_thickness, bar_spacing, group_spacing
)
# own size + half of the size of the central box
# have it centered around the choices assigned to the "center" location
bar_offsets = (
data_df.loc[grouped_choices["left"]].sum(axis=0)
+ data_df.loc[grouped_choices["center"]].sum(axis=0) / 2
)
sorted_questions = data_df.columns.values
for response in location_and_response:
position, response_name = response
index_response = grouped_choices[position].index(response_name)
widths = data_df.loc[response_name].values
starts = responses_cumulated.loc[response_name].values - widths - bar_offsets
ax.barh(
bar_positions,
widths,
left=starts,
height=bar_thickness,
label=format_labels([response_name])[0],
color=colors[position][index_response],
**kwargs,
)
plt.yticks(bar_positions, wrap_ticks_label(data_df.columns.values, wrap_text))
# add counts per question
for question, bar_position in zip(sorted_questions, bar_positions):
n_counts = "(" + str(int(n_responses_absolute[question])) + ")"
ax.text(0, bar_position + bar_thickness * 0.25, n_counts)
# add zero reference line
ax.axvline(0, linestyle="--", color="black", alpha=0.25)
# x axis in percent
if x_axis_max_values is not None:
ax_min_max = x_axis_max_values
else:
ax_min_max = 100
ax.set_xlim(-ax_min_max, ax_min_max)
ax.set_xticks(np.arange(-ax_min_max, ax_min_max + 1, 10))
ax.xaxis.set_major_formatter(lambda x, pos: str(abs(int(x))) + "%")
# add total answers and answers per question
ax.text(ax_min_max, -0.05, f"Total: {total_answers}")
# y axis
ax.invert_yaxis()
# remove spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
# legend
if len(location_and_response) > 4:
n_columns = int(np.ceil(len(location_and_response) / 2))
else:
n_columns = len(location_and_response)
ax.legend(
ncol=n_columns,
bbox_to_anchor=(0, 1, 1, 1),
mode="expand",
loc="lower left",
fontsize="small",
framealpha=0.5,
)
# add title
if title_question is not None:
n_rows = int(np.ceil(len(location_and_response) / n_columns))
ax.set_title(title_question, pad=(n_rows + 1.5) * plt.rcParams["font.size"])
plt.tight_layout()
return fig, ax
|
the-stack_106_20867
|
# -*- coding: utf-8 -*-
import pytest
def test_local(pepper_cli, session_minion_id):
'''Sanity-check: Has at least one minion - /run - /login query type is parameterized'''
ret = pepper_cli('*', 'test.ping')
assert ret[session_minion_id] is True
@pytest.mark.xfail(
pytest.config.getoption("--salt-api-backend") == "rest_tornado",
reason="this is broken in rest_tornado until future release",
)
def test_long_local(pepper_cli, session_minion_id):
'''Test a long call blocks until the return'''
ret = pepper_cli('--timeout=60', '*', 'test.sleep', '30')
assert ret[session_minion_id] is True
|
the-stack_106_20868
|
#
# Copyright 2014 Infoxchange Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test services API.
"""
import unittest
import forklift.services
from tests.base import docker_image_available
class ServiceTestCase(unittest.TestCase):
"""
Generic service tests.
"""
service_class = None
def test_api_conformance(self):
"""
Test that the service has the correct API.
"""
# assert we have at least one provider
self.assertGreaterEqual(len(self.service_class.providers), 1)
# assert those providers exist on the class
for provider in self.service_class.providers:
assert hasattr(self.service_class, provider)
# assert can build a provider
service = getattr(self.service_class,
self.service_class.providers[0])('fake')
# assert we can set the host
#
# Only the Docker driver uses the host property, and it is
# currently optional. However this test is useful because the
# property is useful. If it turns out there are services for
# which host is not useful, then this test should be changed :)
assert hasattr(service, 'host')
service.host = 'badger'
assert hasattr(service, 'environment')
assert hasattr(service, 'available')
# Test all attributes in allow_override exist
for attr in service.allow_override:
value = getattr(service, attr)
setattr(service, attr, value)
def test_available(self):
"""
Test that the service provided by the image is available.
"""
image = self.service_class.CONTAINER_IMAGE
if image and not docker_image_available(image):
raise unittest.SkipTest(
"Docker image {0} is required.".format(image))
service = self.service_class.provide('fake', transient=True)
self.assertTrue(service.available())
service.cleanup()
def load_tests(loader, tests, pattern):
"""
Generate a test class for each service.
"""
suite = unittest.TestSuite()
for cls in forklift.services.register.values():
test_class = type(ServiceTestCase)(
cls.__name__ + 'TestCase',
(ServiceTestCase,),
{
'service_class': cls,
}
)
suite.addTests(loader.loadTestsFromTestCase(test_class))
return suite
|
the-stack_106_20869
|
"""Load & convert data from CSV file using Python built-in csv module"""
import bz2
import csv
from collections import namedtuple
from datetime import datetime
Column = namedtuple('Column', 'src dest convert')
def parse_timestamp(text):
return datetime.strptime(text, '%Y-%m-%d %H:%M:%S')
columns = [
Column('VendorID', 'vendor_id', int),
Column('passenger_count', 'num_passengers', int),
Column('tip_amount', 'tip', float),
Column('total_amount', 'price', float),
Column('tpep_dropoff_datetime', 'dropoff_time', parse_timestamp),
Column('tpep_pickup_datetime', 'pickup_time', parse_timestamp),
Column('trip_distance', 'distance', float),
]
def iter_records(file_name):
with bz2.open(file_name, 'rt') as fp:
reader = csv.DictReader(fp)
for csv_record in reader:
record = {}
for col in columns:
value = csv_record[col.src]
record[col.dest] = col.convert(value)
yield record
def example():
from pprint import pprint
for i, record in enumerate(iter_records('taxi.csv.bz2')):
if i >= 10:
break
pprint(record)
example()
|
the-stack_106_20871
|
import numpy as np
from time import time
import datetime
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.optimizers import Adam, SGD
from keras.callbacks import TensorBoard
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.initializers import glorot_uniform
from keras.utils import plot_model
from sklearn.model_selection import train_test_split
import glob
import os
from PIL import Image, ImageOps
import json
import keras.backend as K
# K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
def load_data(path: str='./data', project_id: str=None, binary: bool=True, resize: tuple=(144, 144), test_size=0.1):
# data:
x = []
# classifications:
y = []
# hand-picked test data:
x_test = []
y_test = []
# get json file with project metadata
project_meta_json = os.path.join(path, f'{project_id}.json')
with open(project_meta_json) as f:
project_meta = json.load(f)
# print(project_meta)
classes_list = project_meta['classes']
# if it's a binary problem, do {'class1': 0, 'class2': 1}
# if multi-class (N), do {'class1': np.array([1, 0, ..., 0]), 'class2': np.array([0, 1, ..., 0]),
# 'classN': np.array([0, 0, ..., 1])}
if binary:
classes = {classes_list[0]: 0, classes_list[1]: 1}
else:
classes = {}
n_c = len(classes_list)
for ci, cls in enumerate(classes_list):
classes[cls] = np.zeros(n_c)
classes[cls][ci] = 1
# print(classes)
path_project = os.path.join(path, project_id)
# FIXME:
for dataset_id in project_meta['datasets'][:3]:
print(f'Loading dataset {dataset_id}')
dataset_json = glob.glob(os.path.join(path_project, f'{dataset_id}.*.json'))[0]
with open(dataset_json) as f:
classifications = json.load(f)
# print(classifications)
path_dataset = glob.glob(os.path.join(path_project, f'{dataset_id}.*'))[0]
# print(path_dataset)
nnn = 1
for k, v in classifications.items():
image_class = classes[v[0]]
if dataset_id == '5b96ecf05ec848000c70a870' and image_class == 1 and nnn <= 50:
# FIXME: use streak examples from Zooniverse as test cases
y_test.append(image_class)
# resize and normalize:
image_path = os.path.join(path_dataset, k)
# the assumption is that images are grayscale
image = np.expand_dims(np.array(ImageOps.grayscale(Image.open(image_path)).resize(resize,
Image.BILINEAR)) / 255.,
2)
x_test.append(image)
nnn += 1
else:
y.append(image_class)
# resize and normalize:
image_path = os.path.join(path_dataset, k)
# the assumption is that images are grayscale
image = np.expand_dims(np.array(ImageOps.grayscale(Image.open(image_path)).resize(resize,
Image.BILINEAR)) / 255.,
2)
x.append(image)
# numpy-fy and split to test/train
x = np.array(x)
y = np.array(y)
print(x.shape)
print(y.shape)
# check statistics on different classes
if not binary:
print('\n')
for i in classes.keys():
print(f'{i}:', np.sum(y[:, i]))
print('\n')
else:
print('\n')
cs = list(classes.keys())
print(f'{cs[0]}:', len(y) - np.sum(y))
print(f'{cs[1]}:', np.sum(y))
print('\n')
# # X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)
# X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=test_size)
# print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# FIXME:
x_test = np.array(x_test)
y_test = np.array(y_test)
X_train, X_test, y_train, y_test = x, x_test, y, y_test
return X_train, y_train, X_test, y_test, classes
# def VGGModel(input_shape, nf: tuple=(16, 32), f: int=3, s: int=1, nfc: int=128, n_classes: int=8):
# """
# Implementation of the HappyModel.
#
# Arguments:
# input_shape -- shape of the images of the dataset
# f -- filter size
# s -- stride
#
# padding is always 'same'
#
# Returns:
# model -- a Model() instance in Keras
# """
#
# # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
# X_input = Input(input_shape)
#
# nf1, nf2 = nf
#
# # CONV -> BN -> RELU Block applied to X
# X = Conv2D(nf1, (f, f), strides=(s, s), padding='same', name='conv0')(X_input)
# # X = Conv2D(nf1, (f, f), strides=(s, s), padding='same', name='conv0', data_format=K.image_data_format())(X_input)
# X = BatchNormalization(axis=-1, name='bn0')(X, training=1)
# # X = BatchNormalization(axis=-1, name='bn0')(X)
# X = Activation('relu')(X)
# # X = Activation('sigmoid')(X)
#
# # MAXPOOL
# X = MaxPooling2D((2, 2), strides=(2, 2), name='max_pool0')(X)
#
# # CONV -> BN -> RELU Block applied to X
# X = Conv2D(nf2, (f, f), strides=(s, s), padding='same', name='conv1')(X)
# # X = Conv2D(nf2, (f, f), strides=(s, s), padding='same', name='conv1', data_format=K.image_data_format())(X)
# X = BatchNormalization(axis=-1, name='bn1')(X, training=1)
# # X = BatchNormalization(axis=-1, name='bn1')(X)
# X = Activation('relu')(X)
# # X = Activation('sigmoid')(X)
#
# # MAXPOOL
# X = MaxPooling2D((2, 2), strides=(2, 2), name='max_pool1')(X)
#
# # FLATTEN X (means convert it to a vector)
# X = Flatten()(X)
#
# # FULLYCONNECTED
# X = Dense(nfc, activation='sigmoid', name='fc2')(X)
#
# # FULLYCONNECTED
# # X = Dense(nfc, activation='sigmoid', name='fc3')(X)
#
# # output layer
# activation = 'sigmoid' if n_classes == 1 else 'softmax'
# X = Dense(n_classes, activation=activation, name='fcOUT', kernel_initializer=glorot_uniform(seed=0))(X)
#
# # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
# model = Model(inputs=X_input, outputs=X, name='vgg_model_v1')
#
# return model
def VGGModel_v2(input_shape, nf: tuple=(16, 32, 64), f: int=3, s: int=1, nfc: tuple=(128,), n_classes: int=8):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
nf -- number of filters in conv blocks
f -- filter size
s -- stride
nf -- number of neurons in FC layers
padding is always 'same'
Returns:
model -- a Model() instance in Keras
"""
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
''' first convolutional block: [CONV] -> [BATCH_NORM] -> [RELU] -> [MAXPOOL] '''
# CONV -> BN -> RELU Block applied to X
X = Conv2D(nf[0], (f, f), strides=(s, s), padding='same', name='conv0')(X_input)
X = BatchNormalization(axis=-1, name='bn0')(X, training=1)
X = Activation('relu')(X)
# X = Activation('sigmoid')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), strides=(2, 2), name='max_pool0')(X)
''' convolutional blocks: [CONV] -> [BATCH_NORM] -> [RELU] -> [MAXPOOL] '''
for i in range(1, len(nf)):
# CONV -> BN -> RELU Block applied to X
X = Conv2D(nf[i], (f, f), strides=(s, s), padding='same', name=f'conv{i}')(X)
X = BatchNormalization(axis=-1, name=f'bn{i}')(X, training=1)
X = Activation('relu')(X)
# X = Activation('sigmoid')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), strides=(2, 2), name=f'max_pool{i}')(X)
''' FLATTEN X (means convert it to a vector) '''
X = Flatten()(X)
''' FULLYCONNECTED layers '''
for i, nfc_i in enumerate(nfc):
X = Dense(nfc_i, activation='sigmoid', name=f'fc{i+len(nf)}')(X)
''' FULLYCONNECTED output layer '''
activation = 'sigmoid' if n_classes == 1 else 'softmax'
X = Dense(n_classes, activation=activation, name='fcOUT', kernel_initializer=glorot_uniform(seed=0))(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs=X_input, outputs=X, name='vgg_model_v2')
return model
def main():
K.clear_session()
# streak / not streak? or with subclasses of bogus?
binary_classification = True
# binary_classification = False
n_classes = 1 if binary_classification else 2
n_fc = 32 if binary_classification else 128
loss = 'binary_crossentropy' if binary_classification else 'categorical_crossentropy'
# load data
X_train, Y_train, X_test, Y_test, classes = load_data(path='./data',
project_id='5b96af9c0354c9000b0aea36',
binary=binary_classification,
test_size=0.1)
# image shape:
image_shape = X_train.shape[1:]
print('image shape:', image_shape)
print("number of training examples = " + str(X_train.shape[0]))
print("number of test examples = " + str(X_test.shape[0]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))
# build model
# model = VGGModel(image_shape, n_classes=n_classes)
# model = VGGModel(image_shape, nf=(16, 32), f=3, s=1, nfc=128, n_classes=n_classes)
# model = VGGModel(image_shape, nf=(16, 32), f=3, s=1, nfc=32, n_classes=n_classes)
# model = VGGModel(image_shape, nf=(16, 32), f=3, s=1, nfc=n_fc, n_classes=n_classes)
model = VGGModel_v2(image_shape, nf=(16, 32), f=3, s=1, nfc=(128,), n_classes=n_classes)
# set up optimizer:
# adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# sgd = SGD(lr=0.01, momentum=0.0, decay=0.0)
sgd = SGD(lr=0.01, momentum=0.9, decay=1e-6)
# model.compile(optimizer='adam', loss='binary_crossentropy', metrics=["accuracy"])
# model.compile(optimizer=adam, loss=loss, metrics=['accuracy'])
model.compile(optimizer=sgd, loss=loss, metrics=['accuracy'])
tensorboard = TensorBoard(log_dir=f'./logs/{datetime.datetime.now().strftime(model.name + "_%Y%m%d_%H%M%S")}')
batch_size = 32
model.fit(x=X_train, y=Y_train, epochs=2, batch_size=batch_size, verbose=1, callbacks=[tensorboard])
# preds = model.evaluate(x=X_train, y=Y_train)
# preds = model.evaluate(x=X_test, y=Y_test)
preds = model.evaluate(x=X_test, y=Y_test, batch_size=batch_size)
# preds = model.evaluate(x=X_test, y=Y_test, batch_size=X_test.shape[0])
print("Loss = " + str(preds[0]))
print("Test Accuracy = " + str(preds[1]))
# preds = np.abs(model.predict(x=X_test, batch_size=batch_size) - Y_test)
preds = model.predict(x=X_test, batch_size=batch_size)
print(preds)
for ip, pred in enumerate(preds):
print(pred[0])
# if pred[0] < 0.5:
im = Image.fromarray((X_test[ip, :, :, 0] * 255).astype('uint8'))
im.show()
input()
# print(model.summary())
model.save(f'./{datetime.datetime.now().strftime(model.name + "_%Y%m%d_%H%M%S")}.h5')
# plot_model(model, to_file=f'{model.name}.png')
# SVG(model_to_dot(model).create(prog='dot', format='svg'))
if __name__ == '__main__':
main()
|
the-stack_106_20877
|
from __future__ import absolute_import
from unittest import TestCase
from lintreview.review import Comment, Problems
from lintreview.tools.xo import Xo
from nose.tools import eq_
from tests import root_dir, requires_image
FILE_WITH_NO_ERRORS = 'tests/samples/xo/no_errors.js',
FILE_WITH_ERRORS = 'tests/samples/xo/has_errors.js'
class TestXo(TestCase):
def setUp(self):
self.problems = Problems()
options = {
'ignore': ''
}
self.tool = Xo(self.problems, options, root_dir)
def test_match_file(self):
self.assertFalse(self.tool.match_file('test.php'))
self.assertFalse(self.tool.match_file('dir/name/test.py'))
self.assertFalse(self.tool.match_file('test.py'))
self.assertTrue(self.tool.match_file('test.js'))
self.assertTrue(self.tool.match_file('test.jsx'))
self.assertTrue(self.tool.match_file('dir/name/test.js'))
@requires_image('nodejs')
def test_check_dependencies(self):
self.assertTrue(self.tool.check_dependencies())
@requires_image('nodejs')
def test_process_files_pass(self):
self.tool.process_files(FILE_WITH_NO_ERRORS)
eq_([], self.problems.all(FILE_WITH_NO_ERRORS))
@requires_image('nodejs')
def test_process_files_fail(self):
self.tool.process_files([FILE_WITH_ERRORS])
problems = self.problems.all(FILE_WITH_ERRORS)
eq_(2, len(problems))
msg = ("Filename is not in kebab case. Rename it to `has-errors.js`."
" (unicorn/filename-case)\n"
"Unexpected var, use let or const instead. (no-var)\n"
"'foo' is assigned a value but never used. (no-unused-vars)\n"
"'bar' is not defined. (no-undef)\n"
"Missing semicolon. (semi)")
expected = Comment(FILE_WITH_ERRORS, 2, 2, msg)
eq_(expected, problems[0])
msg = ("Unexpected alert. (no-alert)\n"
"'alert' is not defined. (no-undef)\n"
"Strings must use singlequote. (quotes)")
expected = Comment(FILE_WITH_ERRORS, 4, 4, msg)
eq_(expected, problems[1])
|
the-stack_106_20878
|
# -*- coding: utf-8 -*-
#
# WeatherPlugin E2
#
# Coded by Dr.Best (c) 2012-2013
# Support: www.dreambox-tools.info
# E-Mail: [email protected]
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Multimedia GmbH.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Multimedia GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Multimedia GmbH.
#
# If you want to use or modify the code or parts of it,
# you have to keep MY license and inform me about the modifications by mail.
#
from Components.Renderer.Renderer import Renderer
from enigma import ePixmap
from Components.AVSwitch import AVSwitch
from enigma import eEnv, ePicLoad, eRect, eSize, gPixmapPtr
class MSNWeatherPixmap(Renderer):
def __init__(self):
Renderer.__init__(self)
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintIconPixmapCB)
self.iconFileName = ""
GUI_WIDGET = ePixmap
def postWidgetCreate(self, instance):
for (attrib, value) in self.skinAttributes:
if attrib == "size":
x, y = value.split(',')
self._scaleSize = eSize(int(x), int(y))
break
sc = AVSwitch().getFramebufferScale()
self._aspectRatio = eSize(sc[0], sc[1])
self.picload.setPara((self._scaleSize.width(), self._scaleSize.height(), sc[0], sc[1], True, 2, '#ff000000'))
def disconnectAll(self):
self.picload.PictureData.get().remove(self.paintIconPixmapCB)
self.picload = None
Renderer.disconnectAll(self)
def paintIconPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr is not None:
pic_scale_size = eSize()
# To be added in the future:
if 'scale' in eSize.__dict__ and self._scaleSize.isValid() and self._aspectRatio.isValid():
pic_scale_size = ptr.size().scale(self._scaleSize, self._aspectRatio)
# To be removed in the future:
elif 'scaleSize' in gPixmapPtr.__dict__:
pic_scale_size = ptr.scaleSize()
if pic_scale_size.isValid():
pic_scale_width = pic_scale_size.width()
pic_scale_height = pic_scale_size.height()
dest_rect = eRect(0, 0, pic_scale_width, pic_scale_height)
self.instance.setScale(1)
self.instance.setScaleDest(dest_rect)
else:
self.instance.setScale(0)
self.instance.setPixmap(ptr)
else:
self.instance.setPixmap(None)
def doSuspend(self, suspended):
if suspended:
self.changed((self.CHANGED_CLEAR,))
else:
self.changed((self.CHANGED_DEFAULT,))
def updateIcon(self, filename):
new_IconFileName = filename
if (self.iconFileName != new_IconFileName):
self.iconFileName = new_IconFileName
self.picload.startDecode(self.iconFileName)
def changed(self, what):
if what[0] != self.CHANGED_CLEAR:
if self.instance:
self.updateIcon(self.source.iconfilename)
else:
self.picload.startDecode("")
|
the-stack_106_20880
|
black=0,0,0
blue=(0,0,255)
sky_blue=(200,200,255)
red=(255,0,0)
green=(0,255,0)
yellow=(255,255,0)
screen_width = 1000
screen_height = 600
#Spacing between water height locations
spacing = 10 #pixels
sea_level = 200
friction = 0.02
surface_tension = 0.3
#Number of water spacings beyond the right side of the screen that
#the water extends
water_extension = 5
#For scaling down the image
scaling = 0.2
compass_scaling = 0.1
#Buffer of empty pixels around boat image to make the boat
#sink down in the water more.
buffer = 7
#Upward pressure from being under water. Is a function of depth
bouyancy = 0.2
#Constant downward acceleration
gravity = 0.6
air_friction = 0.01
water_friction = 0.1
#Use to oscillate the right-most water_heights value
amplitude = 150
period = 25
#Controls for the player's boat
jump_power = 22 #instantaneous change to dy when jumping
speed = 1.5 #shift left or right in pixels per frame
rum_scroll_speed_min = 2
rum_scroll_speed_max = 5
rum_respawn_min = 30
rum_respawn_max = 90
rum_collision_distance = 40
sword_respawn_min = 90
sword_respawn_max = 150
#How far above sea level to spawn swords and rum
spawn_above_sea_level = 100
#Speed of cannon ball shot
cannon_ball_speed=20
cannon_ball_arc=3.1415/3
#Depth at which the boat causes a splash in the water.
splash_depth = 45
splash_delay = 15 #delay in frames before the boat can cause another splash.
splash_adjust = 15 #adjustment to reduce magnitude of splash. Unit is pixels
#This determines how long to dwell on each frame of the cannon shot smoke.
#Dwell of zero skips the image
dwell_sequence = []
for _ in range(27):
dwell_sequence.append(0)
for _ in range(6):
dwell_sequence.append(5)
for _ in range(3):
dwell_sequence.append(1)
#Drift for the smoke from cannon shots
smoke_dx = -3
#This determines how long to dwell on each frame of the cannon shot explosion.
blast_dwell_sequence = []
for _ in range(24):
blast_dwell_sequence.append(1)
for _ in range(12):
blast_dwell_sequence.append(0)
#Enemy boat attributes
enemy_health = 3
enemy_volley_count = 3 #Shots per volley
enemy_volley_delay = 180 #delay between volleys
arc_adder = 3.1415/8 #Adder to the enemy boat's cannon ball arc
random_arc_adder = 3.1415/4
#Delay before the next enemy boat spawn
enemy_spawn_delay = 60*20 #20 seconds
enemy_stop_point = screen_width/2
|
the-stack_106_20884
|
# qubit number=2
# total number=69
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += RX(-0.09738937226128368,2) # number=2
prog += H(1) # number=33
prog += Y(2) # number=56
prog += CZ(2,1) # number=34
prog += H(1) # number=35
prog += H(1) # number=3
prog += H(0) # number=45
prog += H(1) # number=66
prog += CZ(2,1) # number=67
prog += H(1) # number=68
prog += CZ(1,0) # number=46
prog += H(0) # number=47
prog += Y(1) # number=15
prog += CNOT(1,0) # number=10
prog += H(1) # number=19
prog += CZ(0,1) # number=20
prog += RX(-0.6000441968356504,1) # number=28
prog += H(1) # number=21
prog += H(1) # number=30
prog += CZ(0,1) # number=31
prog += H(1) # number=32
prog += H(1) # number=57
prog += CZ(0,1) # number=58
prog += H(1) # number=59
prog += CNOT(0,1) # number=51
prog += X(1) # number=52
prog += CNOT(0,1) # number=53
prog += CNOT(0,1) # number=50
prog += H(2) # number=29
prog += H(1) # number=36
prog += X(1) # number=64
prog += CZ(0,1) # number=37
prog += Y(2) # number=44
prog += H(1) # number=38
prog += Z(1) # number=55
prog += H(1) # number=61
prog += CZ(0,1) # number=62
prog += Z(2) # number=65
prog += H(1) # number=63
prog += Z(1) # number=11
prog += RX(-1.1780972450961724,2) # number=54
prog += H(1) # number=42
prog += H(0) # number=39
prog += CZ(1,0) # number=40
prog += H(0) # number=41
prog += CNOT(2,1) # number=26
prog += Y(1) # number=14
prog += CNOT(1,0) # number=5
prog += X(1) # number=6
prog += Z(1) # number=8
prog += X(1) # number=7
prog += H(2) # number=43
prog += RX(-2.42845112122491,1) # number=25
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil387.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_106_20886
|
import atmPy.atmos.gas_props as gp
import atmPy.atmos.water as water
from atmPy.atmos.constants import R, Na
from numpy import sqrt, pi
class Air(gp.Gas):
def __init__(self, t=20.0, p=1013.25, **kwargs):
super(Air, self).__init__(t, p)
self._Rd = 287.05
self._Rv = 461.495
self._dc = 3.7e-10
self.e = 0
if "ecal_meth" in kwargs:
self._wvObj = kwargs.ecal_meth
else:
self._wvObj = water.MurphyKoop()
if "e" in kwargs:
self.e = kwargs.e
elif "rh" in kwargs:
self.e = self._wvObj.ew(self.t) * kwargs['rh'] / 100
else:
self.e = 0
def cal_e(self, rh):
self.e = self._wvObj.ew(self.t) * rh / 100
def mu(self):
"""
The following function defines viscosity as a function of T in P-s.
Parameters
---------
T:temperature in degrees Celsius
Returns
-------
Viscosity in P-s
"""
# Make sure that the temperature is a float
t = self.t + 273.15
c = 110.4 # 120.0 # Sutherland's constant
mu0 = 1.716e-5# 18.27e-6 # Reference viscocity
t0 = 273.15 # 291.15 # Reference temperature
return (c + t0) / (c + t) * (t / t0) ** 1.5 * mu0
def l(self):
"""
Determine the mean free path of air.
Returns
-------
Mean free path of air in microns.
"""
# Convert pressure to atmospheres
# patm = float(self.p) / 1013.25
# l0 = 0.066 # Reference mean free path at 1 atm
tk = self.t + 273.15
# return (R * tk) / (sqrt(2) * pi * self._dc ** 2 * Na * self.p * 100)
mfp = 6.73e-8*(tk/296.15*1013.25/self.p)*((110.4/296.15+1)/(110.4/tk+1))
return mfp
# return l0 / patm
def rho(self):
tk = self.t + 273.15
return self.p * 100 / (self._Rd * tk) + self.e * 100 / (self._Rv * tk)
|
the-stack_106_20887
|
import logging
from typing import Iterable, Optional, Sequence
from google.protobuf.struct_pb2 import ListValue as ProtobufList
from google.protobuf.struct_pb2 import Struct as ProtobufStruct
from lookout.core.analyzer import Analyzer, AnalyzerModel, DummyAnalyzerModel, ReferencePointer
from lookout.core.api.event_pb2 import PushEvent, ReviewEvent
from lookout.core.api.service_analyzer_pb2 import EventResponse
from lookout.core.data_requests import DataService
from lookout.core.event_listener import EventHandlers
from lookout.core.metrics import record_event
from lookout.core.model_repository import ModelRepository
from lookout.core.ports import Type
class AnalyzerManager(EventHandlers):
"""
Manages several `Analyzer`-s: runs them and trains the models.
Relies on a `ModelRepository` to retrieve and update the models. Also requires the address
of the data (UAST, contents) gRPC service, typically running in the same Lookout server.
"""
_log = logging.getLogger("AnalyzerManager")
def __init__(self, analyzers: Iterable[Type[Analyzer]], model_repository: ModelRepository,
data_service: DataService):
"""
Initialize a new instance of the AnalyzerManager class.
:param analyzers: Analyzer types to manage (not instances!).
:param model_repository: Injected implementor of the `ModelRepository` interface.
:param data_service: gRPC data retrieval service to fetch UASTs and files.
"""
self._model_repository = model_repository
analyzers = [(a.__name__, a) for a in analyzers]
analyzers.sort()
self._analyzers = [a[1] for a in analyzers]
self._data_service = data_service
def __str__(self) -> str:
"""Summarize AnalyzerManager as a string."""
return "AnalyzerManager(%s)" % self.version
@property
def version(self) -> str:
"""
Return the version string that depends on all the managed analyzers.
"""
return " ".join(self._model_id(a) for a in self._analyzers)
def process_review_event(self, request: ReviewEvent) -> EventResponse: # noqa: D401
"""
Callback for review events invoked by EventListener.
"""
base_ptr = ReferencePointer.from_pb(request.commit_revision.base)
head_ptr = ReferencePointer.from_pb(request.commit_revision.head)
response = EventResponse()
response.analyzer_version = self.version
comments = []
for analyzer in self._analyzers:
try:
mycfg = self._protobuf_struct_to_dict(request.configuration[analyzer.name])
self._log.info("%s config: %s", analyzer.name, mycfg)
except (KeyError, ValueError):
mycfg = {}
self._log.debug("no config was provided for %s", analyzer.name)
if analyzer.model_type != DummyAnalyzerModel:
model = self._get_model(analyzer, base_ptr.url)
if model is None:
self._log.info("training: %s", analyzer.name)
record_event("%s.train" % analyzer.name, 1)
model = analyzer.train(base_ptr, mycfg, self._data_service)
self._model_repository.set(self._model_id(analyzer), base_ptr.url, model)
else:
model = DummyAnalyzerModel()
self._log.debug("running %s", analyzer.name)
record_event("%s.analyze" % analyzer.name, 1)
results = analyzer(model, head_ptr.url, mycfg).analyze(
base_ptr, head_ptr, self._data_service)
self._log.info("%s: %d comments", analyzer.name, len(results))
record_event("%s.comments" % analyzer.name, len(results))
comments.extend(results)
response.comments.extend(comments)
return response
def process_push_event(self, request: PushEvent) -> EventResponse: # noqa: D401
"""
Callback for push events invoked by EventListener.
"""
ptr = ReferencePointer.from_pb(request.commit_revision.head)
data_service = self._data_service
for analyzer in self._analyzers:
if analyzer.model_type == DummyAnalyzerModel:
continue
try:
mycfg = self._protobuf_struct_to_dict(request.configuration[analyzer.name])
except (KeyError, ValueError):
mycfg = {}
model = self._get_model(analyzer, ptr.url)
if model is not None:
must_train = analyzer.check_training_required(model, ptr, mycfg, data_service)
if not must_train:
self._log.info("skipped training %s", analyzer.name)
continue
self._log.debug("training %s", analyzer.name)
record_event("%s.train" % analyzer.name, 1)
model = analyzer.train(ptr, mycfg, data_service)
self._model_repository.set(self._model_id(analyzer), ptr.url, model)
response = EventResponse()
response.analyzer_version = self.version
return response
def warmup(self, urls: Sequence[str]):
"""
Warm up the model cache (which supposedly exists in the injected `ModelRepository`). \
We get the models corresponding to the managed analyzers and the specified list of \
repositories.
:param urls: The list of Git repositories for which to fetch the models.
"""
self._log.info("warming up on %d urls", len(urls))
for url in urls:
for analyzer in self._analyzers:
self._model_repository.get(self._model_id(analyzer), analyzer.model_type, url)
@staticmethod
def _model_id(analyzer: Type[Analyzer]) -> str:
return "%s/%s" % (analyzer.name, analyzer.version)
@staticmethod
def _protobuf_struct_to_dict(configuration: ProtobufStruct) -> dict:
mycfg = dict(configuration)
stack = [mycfg]
while stack:
d = stack.pop()
if isinstance(d, dict):
keyiter = iter(d)
elif isinstance(d, list):
keyiter = range(len(d))
else:
keyiter = []
for key in keyiter:
if isinstance(d[key], ProtobufStruct):
d[key] = dict(d[key])
stack.append(d[key])
elif isinstance(d[key], ProtobufList):
d[key] = list(d[key])
stack.append(d[key])
else:
if isinstance(d[key], float) and d[key].is_integer():
d[key] = int(d[key])
return mycfg
def _get_model(self, analyzer: Type[Analyzer], url: str) -> Optional[AnalyzerModel]:
model, cache_miss = self._model_repository.get(
self._model_id(analyzer), analyzer.model_type, url)
if cache_miss:
self._log.info("cache miss: %s", analyzer.name)
return model
|
the-stack_106_20888
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ordnance_facts
author: "Alexander Turner (@alexanderturner) <[email protected]>"
short_description: Collect facts from Ordnance Virtual Routers over SSH
description:
- Collects a base set of device facts from an Ordnance Virtual
router over SSH. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(!) to specify that a specific subset should
not be collected.
required: false
default: '!config'
'''
EXAMPLES = """
---
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: RouterName
password: ordnance
transport: cli
---
- name: Collect all facts from the device
community.network.ordnance_facts:
gather_subset: all
provider: "{{ cli }}"
- name: Collect only the config and default facts
community.network.ordnance_facts:
gather_subset:
- config
provider: "{{ cli }}"
- name: Do not collect hardware facts
community.network.ordnance_facts:
gather_subset:
- "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the virtual router
returned: always
type: list
# config
ansible_net_config:
description: The current active config from the virtual router
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the virtual router
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the virtual router
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the virtual router
returned: when interfaces is configured
type: dict
"""
import re
import traceback
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import NetworkModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_native
class FactsBase(object):
def __init__(self, module):
self.module = module
self.facts = dict()
self.failed_commands = list()
def run(self, cmd):
try:
return self.module.cli(cmd)[0]
except Exception:
self.failed_commands.append(cmd)
class Config(FactsBase):
def populate(self):
data = self.run('show running-config')
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.run('show interfaces')
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = self.run('show ipv6 interface')
if data:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
ipv4 = self.parse_ipv4(value)
intf['ipv4'] = self.parse_ipv4(value)
if ipv4:
self.add_ip_address(ipv4['address'], 'ipv4')
intf['duplex'] = self.parse_duplex(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)', data)
if match:
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
failed_commands = list()
try:
for inst in instances:
inst.populate()
failed_commands.extend(inst.failed_commands)
facts.update(inst.facts)
except Exception as exc:
module.fail_json(msg=to_native(exc), exception=traceback.format_exc())
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands)
if __name__ == '__main__':
main()
|
the-stack_106_20892
|
import unittest
from pbxproj.XcodeProject import *
class PBXGenericTargetTest(unittest.TestCase):
def testGetBuildPhase(self):
project = XcodeProject({
"objects": {
"1": {"isa": "PBXGenericTarget", "buildPhases": ["2"]},
"2": {"isa": "PBXGenericBuildPhase"}
}
})
build_phases = project.objects['1'].get_or_create_build_phase("PBXGenericBuildPhase")
self.assertListEqual(build_phases, [project.objects["2"]])
def testRemoveBuildPhaseFailed(self):
project = XcodeProject({
"objects": {
"1": {"isa": "PBXGenericTarget", "buildPhases": ["2"]},
"2": {"isa": "PBXGenericBuildPhase"}
}
})
result = project.objects['1'].remove_build_phase(None)
self.assertFalse(result)
|
the-stack_106_20893
|
#!/usr/bin/env python3
# Copyright (c) 2019 The OPALCOIN developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import sync_blocks, sync_mempools, connect_nodes_bi, \
p2p_port, assert_equal, assert_raises_rpc_error
import urllib.parse
class ReorgStakeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-minrelaytxfee=0.00001"],[]]
def generateBatchBlocks(self, nodeid, limit, batch_size = 5):
i = 0
while i < limit:
i += batch_size
if i <= limit:
self.nodes[nodeid].generate(batch_size)
else:
self.nodes[nodeid].generate(batch_size-i+limit)
def findUtxoInList(self, txid, vout, utxo_list):
for x in utxo_list:
if x["txid"] == txid and x["vout"] == vout:
return True, x
return False, None
def run_test(self):
# NLAST_POW_BLOCK = 250 - so mine 125 blocks each node (25 consecutive blocks for 5 times)
NMATURITY = 100
self.log.info("Mining 250 blocks (125 with node 0 and 125 with node 1)...")
for i in range(5):
self.generateBatchBlocks(0, 25)
sync_blocks(self.nodes)
self.generateBatchBlocks(1, 25)
sync_blocks(self.nodes)
sync_mempools(self.nodes)
# Check balances
balance0 = 250.0 * (125 - 50)
balance1 = 250.0 * (125 - 50)
# Last two 25-blocks bursts (for each node) are not mature: NMATURITY = 2 * (2 * 25)
immature_balance0 = 250.0 * 50
immature_balance1 = 250.0 * 50
w_info = self.nodes[0].getwalletinfo()
assert_equal(w_info["balance"], balance0)
assert_equal(w_info["immature_balance"], immature_balance0)
self.log.info("Balance for node 0 checks out: %f [%f]" % (balance0, immature_balance0))
w_info = self.nodes[1].getwalletinfo()
assert_equal(w_info["balance"], balance1)
assert_equal(w_info["immature_balance"], immature_balance1)
self.log.info("Balance for node 1 checks out: %f [%f]" % (balance1, immature_balance1))
initial_balance = balance0
initial_immature_balance = immature_balance0
initial_unspent = self.nodes[0].listunspent()
# PoS start reached (block 250) - disconnect nodes
self.nodes[0].disconnectnode(urllib.parse.urlparse(self.nodes[1].url).hostname + ":" + str(p2p_port(1)))
self.nodes[1].disconnectnode(urllib.parse.urlparse(self.nodes[0].url).hostname + ":" + str(p2p_port(0)))
self.log.info("Nodes disconnected")
# Stake one block with node-0 and save the stake input
self.log.info("Staking 1 block with node 0...")
self.nodes[0].generate(1)
last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
assert(len(last_block["tx"]) > 1) # a PoS block has at least two txes
coinstake_txid = last_block["tx"][1]
coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
assert(coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "") # first output of coinstake is empty
stakeinput = coinstake_tx["vin"][0]
# The stake input was unspent 1 block ago, now it's not
res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent)
assert (res and utxo["spendable"])
res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
# Stake 10 more blocks with node-0 and check balances
self.log.info("Staking 10 more blocks with node 0...")
self.generateBatchBlocks(0, 10)
balance0 = initial_balance + 0 # mined blocks matured (250*11) - staked blocks inputs (250*11)
immature_balance0 += 250 * 11 # -mined blocks matured (250*11) + staked blocks (500*11)
w_info = self.nodes[0].getwalletinfo()
assert_equal(w_info["balance"], balance0)
assert_equal(w_info["immature_balance"], immature_balance0)
self.log.info("Balance for node 0 checks out: %f [%f]" % (balance0, immature_balance0))
# verify that the stakeinput can't be spent
rawtx_unsigned = self.nodes[0].createrawtransaction(
[{"txid": str(stakeinput["txid"]), "vout": int(stakeinput["vout"])}],
{"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": 249.99})
rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
assert(rawtx["complete"])
assert_raises_rpc_error(-25, "Missing inputs",self.nodes[0].sendrawtransaction, rawtx["hex"])
# Stake 12 blocks with node-1
self.log.info("Staking 12 blocks with node 1...")
self.generateBatchBlocks(1, 12)
balance1 -= 250 * 12 # 0 - staked blocks inputs (250*12)
immature_balance1 += 500 * 12 # + staked blocks (500 * 12)
w_info = self.nodes[1].getwalletinfo()
assert_equal(w_info["balance"], balance1)
assert_equal(w_info["immature_balance"], immature_balance1)
self.log.info("Balance for node 1 checks out: %f [%f]" % (balance1, immature_balance1))
new_best_hash = self.nodes[1].getbestblockhash()
# re-connect and sync nodes and check that node-0 gets on the other chain
self.log.info("Connecting and syncing nodes...")
connect_nodes_bi(self.nodes, 0, 1)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbestblockhash(), new_best_hash)
# check balance of node-0
balance0 = initial_balance + 250 * 12 # + mined blocks matured (250*12)
immature_balance0 = initial_immature_balance - 250 * 12 # - mined blocks matured (250*12)
w_info = self.nodes[0].getwalletinfo()
assert_equal(w_info["balance"], balance0) # <--- !!! THIS FAILS before PR #1043
assert_equal(w_info["immature_balance"], immature_balance0)
self.log.info("Balance for node 0 checks out: %f [%f]" % (balance0, immature_balance0))
# check that NOW the original stakeinput is present and spendable
res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (res and utxo["spendable"]) # <--- !!! THIS FAILS before PR #1043
self.log.info("Coinstake input %s...%s-%d is spendable again." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[1].generate(1)
sync_blocks(self.nodes)
res, utxo = self.findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
if __name__ == '__main__':
ReorgStakeTest().main()
|
the-stack_106_20894
|
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
from contextlib import contextmanager
import time
import gc
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.0f}s".format(title, time.time() - t0))
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows = None, nan_as_category = False):
# Read data and merge
df = pd.read_csv('../input/application_train.csv', nrows= num_rows)
test_df = pd.read_csv('../input/application_test.csv', nrows= num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df).reset_index().drop("index",axis=1)
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
df = df[df['NAME_INCOME_TYPE'] != 'Maternity leave']
df = df[df['NAME_FAMILY_STATUS'] != 'Unknown']
df.loc[df.FLAG_OWN_CAR=="N","OWN_CAR_AGE"] = 0
# Imcomplete information
# df['incomplete'] = 1
# df.loc[df.isnull().sum(axis=1)<35, 'incomplete'] = 0
df['num_missing'] = df.isnull().sum(axis = 1).values
# Add social class
df["HIGH_CLASS"] = 0
df.loc[(df["NAME_EDUCATION_TYPE"].isin(["Academic degree","Higher education"]))
& (df["OCCUPATION_TYPE"].isin(["Accountants","Core staff","HR staff","High skill tech staff","IT staff","Managers","Medicine staff","Private service staff"])),"HIGH_CLASS"] = 1
df["LOW_CLASS"] = 0
df.loc[(df["NAME_EDUCATION_TYPE"].isin(["Lower secondary","Secondary / secondary special"])
& df["OCCUPATION_TYPE"].isin(["Cleaning staff","Cooking staff","Drivers","Laborers","Low-skill Laborers","Security staff","Waiters/barmen staff"])),"LOW_CLASS"] = 1
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
# Estimate invalid data in feature "DAYS_EMPLOYED"
# from sklearn.linear_model import LinearRegression
# valid = df[df["DAYS_EMPLOYED"]<0]
# invalid = df[df["DAYS_EMPLOYED"]>=0]
# lr = LinearRegression()
# model = lr.fit(pd.DataFrame(valid["DAYS_BIRTH"]),valid["DAYS_EMPLOYED"])
# invalid["DAYS_EMPLOYED"] = model.predict(pd.DataFrame(invalid["DAYS_BIRTH"]))
# df["DAYS_EMPLOYED"] = pd.concat([valid,invalid]).sort_index()["DAYS_EMPLOYED"]
# Remove outliers
df.loc[df['OWN_CAR_AGE'] > 80, 'OWN_CAR_AGE'] = np.nan
df.loc[df['REGION_RATING_CLIENT_W_CITY'] < 0, 'REGION_RATING_CLIENT_W_CITY'] = np.nan
df.loc[df['AMT_INCOME_TOTAL'] > 1e8, 'AMT_INCOME_TOTAL'] = np.nan
df.loc[df['AMT_REQ_CREDIT_BUREAU_QRT'] > 10, 'AMT_REQ_CREDIT_BUREAU_QRT'] = np.nan
df.loc[df['OBS_30_CNT_SOCIAL_CIRCLE'] > 40, 'OBS_30_CNT_SOCIAL_CIRCLE'] = np.nan
# Some simple new features (percentages)
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['EMPLOYED_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['INCOME_PER_FAM_MEMBERS'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']
df['INC_PER_CHILD'] = df['AMT_INCOME_TOTAL'] / (1+df['CNT_CHILDREN'])
df['CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
df['CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
df['NEW_DOC_IND_AVG'] = df[docs].mean(axis=1)
df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_LIVE_IND_KURT'] = df[live].kurtosis(axis=1)
# df['NEW_EXT_SOURCES_MEDIAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].median(axis=1, skipna=True)
df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1, skipna=True)
df['NEW_EXT_SOURCES_PROD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].prod(axis=1, skipna=True, min_count=1)
df['NEW_EXT_SOURCES_MAX'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].max(axis=1, skipna=True)
df['NEW_EXT_SOURCES_MIN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].min(axis=1, skipna=True)
df['NEW_EXT_SOURCES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1, skipna=True)
df['NEW_EXT_SOURCES_STD'] = df['NEW_EXT_SOURCES_STD'].fillna(df['NEW_EXT_SOURCES_STD'].mean())
# df['NEW_EXT_SOURCES_MAD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mad(axis=1, skipna=True)
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOY_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = pd.factorize(df[bin_feature])
# Categorical features with One-Hot encode
df, _ = one_hot_encoder(df, nan_as_category)
cols = ['FLAG_DOCUMENT_'+str(i) for i in [2,4,5,7,9,10,12,13,14,15,17,19,20,21]]
df = df.drop(cols,axis=1)
del test_df
gc.collect()
return df
# Preprocess bureau.csv and bureau_balance.csv
def bureau_and_balance(num_rows = None, nan_as_category = False):
bureau = pd.read_csv('../input/bureau.csv', nrows = num_rows)
bb = pd.read_csv('../input/bureau_balance.csv', nrows = num_rows)
# bureau_balance.csv
temp = pd.DataFrame(bb[['SK_ID_BUREAU', 'STATUS']].groupby('SK_ID_BUREAU').first())
temp.columns = ["Latest_Status"]
temp["Closed_Balance"] = bb.loc[bb['STATUS']=='C',['SK_ID_BUREAU', 'MONTHS_BALANCE']].groupby('SK_ID_BUREAU').last()
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size','last']}
bb, bb_cat1 = one_hot_encoder(bb, False)
for col in bb_cat1:
bb_aggregations[col] = ['sum','mean']
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in bb_agg.columns.tolist()])
bb_agg = bb_agg.join(temp,how = 'left', on = 'SK_ID_BUREAU')
del temp
gc.collect()
bb_agg['Month_closed_to_end'] = bb_agg['MONTHS_BALANCE_LAST'] - bb_agg['Closed_Balance']
bb_agg['Non_zero_DPD_cnt'] = bb_agg[['STATUS_1_SUM', 'STATUS_2_SUM', 'STATUS_3_SUM', 'STATUS_4_SUM', 'STATUS_5_SUM']].sum(axis = 1)
bb_agg['Non_zero_DPD_ratio'] = bb_agg[['STATUS_1_MEAN', 'STATUS_2_MEAN', 'STATUS_3_MEAN', 'STATUS_4_MEAN', 'STATUS_5_MEAN']].mean(axis = 1)
bb_agg, bb_cat2 = one_hot_encoder(bb_agg, False)
bb_cat = bb_cat1+bb_cat2
# bureau.csv
# Replace\remove some outliers in bureau set
bureau.loc[bureau['AMT_ANNUITY'] > .8e8, 'AMT_ANNUITY'] = np.nan
bureau.loc[bureau['AMT_CREDIT_SUM'] > 3e8, 'AMT_CREDIT_SUM'] = np.nan
bureau.loc[bureau['AMT_CREDIT_SUM_DEBT'] > 1e8, 'AMT_CREDIT_SUM_DEBT'] = np.nan
bureau.loc[bureau['AMT_CREDIT_MAX_OVERDUE'] > .8e8, 'AMT_CREDIT_MAX_OVERDUE'] = np.nan
bureau.loc[bureau['DAYS_ENDDATE_FACT'] < -10000, 'DAYS_ENDDATE_FACT'] = np.nan
bureau.loc[(bureau['DAYS_CREDIT_UPDATE'] > 0) | (bureau['DAYS_CREDIT_UPDATE'] < -40000), 'DAYS_CREDIT_UPDATE'] = np.nan
bureau.loc[bureau['DAYS_CREDIT_ENDDATE'] < -10000, 'DAYS_CREDIT_ENDDATE'] = np.nan
bureau.drop(bureau[bureau['DAYS_ENDDATE_FACT'] < bureau['DAYS_CREDIT']].index, inplace = True)
# Some new features in bureau set
bureau['bureau1'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_DEBT']
bureau['bureau2'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_LIMIT']
bureau['bureau3'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_OVERDUE']
bureau['bureau4'] = bureau['DAYS_CREDIT'] - bureau['CREDIT_DAY_OVERDUE']
bureau['bureau5'] = bureau['DAYS_CREDIT'] - bureau['DAYS_CREDIT_ENDDATE']
bureau['bureau6'] = bureau['DAYS_CREDIT'] - bureau['DAYS_ENDDATE_FACT']
bureau['bureau7'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT']
bureau['bureau8'] = bureau['DAYS_CREDIT_UPDATE'] - bureau['DAYS_CREDIT_ENDDATE']
bureau.drop(['CREDIT_CURRENCY'], axis=1, inplace= True)
bureau, bureau_cat = one_hot_encoder(bureau, False)
bureau = bureau.join(bb_agg,how="left",on="SK_ID_BUREAU")
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)
del bb,bb_agg
gc.collect()
num_aggregations = {
'DAYS_CREDIT': ['min','max','mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min','max','mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max','mean'],
'AMT_CREDIT_MAX_OVERDUE': ['max','mean'],
'AMT_CREDIT_SUM': ['mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max','mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['max','mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum'],
'MONTHS_BALANCE_LAST': ['min','max','mean'],
'Closed_Balance': ['min','max','mean'],
'Month_closed_to_end': ['min','max','mean'],
'Non_zero_DPD_cnt': ['sum'],
'Non_zero_DPD_ratio': ['mean']
}
for num in ['bureau'+str(i+1) for i in range(8)]:
num_aggregations[num] = ['max','mean']
cat_aggregations = {}
for cat in bureau_cat: cat_aggregations[cat] = ['mean']
for cat in bb_cat1:
cat_aggregations[cat + "_MEAN"] = ['mean']
cat_aggregations[cat + "_SUM"] = ['sum']
for cat in bb_cat2:
cat_aggregations[cat] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper() for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper() for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper() for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg
# Preprocess previous_applications.csv
def previous_applications(num_rows = None, nan_as_category = True):
prev = pd.read_csv('../input/previous_application.csv', nrows = num_rows)
prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)
# Days 365.243 values -> nan
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)
prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)
prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)
prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)
# Replace some outliers
prev.loc[prev['AMT_CREDIT'] > 6000000, 'AMT_CREDIT'] = np.nan
prev.loc[prev['SELLERPLACE_AREA'] > 3500000, 'SELLERPLACE_AREA'] = np.nan
prev[['DAYS_FIRST_DRAWING', 'DAYS_FIRST_DUE', 'DAYS_LAST_DUE_1ST_VERSION',
'DAYS_LAST_DUE', 'DAYS_TERMINATION']].replace(365243, np.nan, inplace = True)
# Some new features
prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
prev['prev1'] = prev.isnull().sum(axis = 1).values
prev['prev2'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT']
prev['prev3'] = prev['AMT_APPLICATION'] - prev['AMT_GOODS_PRICE']
prev['prev4'] = prev['AMT_GOODS_PRICE'] - prev['AMT_CREDIT']
prev['prev5'] = prev['DAYS_FIRST_DRAWING'] - prev['DAYS_FIRST_DUE']
prev['prev6'] = (prev['DAYS_TERMINATION'] < -500).astype(int)
# Previous applications numeric features
num_aggregations = {
'AMT_ANNUITY': ['max', 'mean'],
'AMT_APPLICATION': ['max', 'mean'],
'AMT_CREDIT': ['max', 'mean'],
'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],
'AMT_DOWN_PAYMENT': ['max', 'mean'],
'AMT_GOODS_PRICE': ['max', 'mean'],
'HOUR_APPR_PROCESS_START': ['max', 'mean'],
'RATE_DOWN_PAYMENT': ['max', 'mean'],
'DAYS_DECISION': ['max', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
}
for num in ['prev'+str(i+1) for i in range(6)]:
num_aggregations[num] = ['max','mean']
# Previous applications categorical features
cat_aggregations = {}
for cat in cat_cols:
cat_aggregations[cat] = ['mean']
prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})
prev_agg.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() for e in prev_agg.columns.tolist()])
# Previous Applications: Approved Applications - only numerical features
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)
approved_agg.columns = pd.Index(['APPROVED_' + e[0] + "_" + e[1].upper() for e in approved_agg.columns.tolist()])
prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')
# Previous Applications: Refused Applications - only numerical features
refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]
refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)
refused_agg.columns = pd.Index(['REFUSED_' + e[0] + "_" + e[1].upper() for e in refused_agg.columns.tolist()])
prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')
del refused, refused_agg, approved, approved_agg, prev
gc.collect()
return prev_agg
# Preprocess POS_CASH_balance.csv
def pos_cash(num_rows = None, nan_as_category = True):
pos = pd.read_csv('../input/POS_CASH_balance.csv', nrows = num_rows)
pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)
# Replace some outliers
pos.loc[pos['CNT_INSTALMENT_FUTURE'] > 60, 'CNT_INSTALMENT_FUTURE'] = np.nan
# Some new features
pos['pos CNT_INSTALMENT more CNT_INSTALMENT_FUTURE'] = \
(pos['CNT_INSTALMENT'] > pos['CNT_INSTALMENT_FUTURE']).astype(int)
# Features
aggregations = {
'MONTHS_BALANCE': ['max', 'mean', 'size'],
'SK_DPD': ['max', 'mean'],
'SK_DPD_DEF': ['max', 'mean'],
'pos CNT_INSTALMENT more CNT_INSTALMENT_FUTURE': ['max','mean'],
}
for cat in cat_cols:
aggregations[cat] = ['mean']
pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)
pos_agg.columns = pd.Index(['POS_' + e[0] + "_" + e[1].upper() for e in pos_agg.columns.tolist()])
# Count pos cash accounts
pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()
del pos
gc.collect()
return pos_agg
# Preprocess installments_payments.csv
def installments_payments(num_rows = None, nan_as_category = True):
ins = pd.read_csv('../input/installments_payments.csv', nrows = num_rows)
ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)
# Replace some outliers
ins.loc[ins['NUM_INSTALMENT_VERSION'] > 70, 'NUM_INSTALMENT_VERSION'] = np.nan
ins.loc[ins['DAYS_ENTRY_PAYMENT'] < -4000, 'DAYS_ENTRY_PAYMENT'] = np.nan
# Some new features
ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']
ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']
ins['ins1'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['ins2'] = (ins['NUM_INSTALMENT_NUMBER'] == 100).astype(int)
ins['ins3'] = (ins['DAYS_INSTALMENT'] > ins['NUM_INSTALMENT_NUMBER'] * 50 / 3 - 11500 / 3).astype(int)
# Days past due and days before due (no negative values)
ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']
ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']
ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)
ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)
# Features: Perform aggregations
aggregations = {
'NUM_INSTALMENT_VERSION': ['nunique'],
'DPD': ['max', 'mean', 'sum'],
'DBD': ['max', 'mean', 'sum'],
'PAYMENT_PERC': ['mean','var'],
'PAYMENT_DIFF': ['mean','var'],
'AMT_INSTALMENT': ['max', 'mean', 'sum'],
'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],
'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum'],
}
for num in ['ins'+str(i+1) for i in range(3)]:
aggregations[num] = ['max','mean']
for cat in cat_cols:
aggregations[cat] = ['mean']
ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)
ins_agg.columns = pd.Index(['INSTAL_' + e[0] + "_" + e[1].upper() for e in ins_agg.columns.tolist()])
# Count installments accounts
ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()
del ins
gc.collect()
return ins_agg
# Preprocess credit_card_balance.csv
def credit_card_balance(num_rows = None, nan_as_category = True):
cc = pd.read_csv('../input/credit_card_balance.csv', nrows = num_rows)
cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)
# Replace some outliers
cc.loc[cc['AMT_PAYMENT_CURRENT'] > 4000000, 'AMT_PAYMENT_CURRENT'] = np.nan
cc.loc[cc['AMT_CREDIT_LIMIT_ACTUAL'] > 1000000, 'AMT_CREDIT_LIMIT_ACTUAL'] = np.nan
# Some new features
cc['cc1'] = cc.isnull().sum(axis = 1).values
cc['cc2'] = cc['SK_DPD'] - cc['MONTHS_BALANCE']
cc['cc3'] = cc['SK_DPD_DEF'] - cc['MONTHS_BALANCE']
cc['cc4'] = cc['SK_DPD'] - cc['SK_DPD_DEF']
cc['cc5'] = cc['AMT_TOTAL_RECEIVABLE'] - cc['AMT_RECIVABLE']
cc['cc6'] = cc['AMT_TOTAL_RECEIVABLE'] - cc['AMT_RECEIVABLE_PRINCIPAL']
cc['cc7'] = cc['AMT_RECIVABLE'] - cc['AMT_RECEIVABLE_PRINCIPAL']
cc['cc8'] = cc['AMT_BALANCE'] - cc['AMT_RECIVABLE']
cc['cc9'] = cc['AMT_BALANCE'] - cc['AMT_RECEIVABLE_PRINCIPAL']
cc['cc10'] = cc['AMT_BALANCE'] - cc['AMT_TOTAL_RECEIVABLE']
cc['cc11'] = cc['AMT_DRAWINGS_CURRENT'] - cc['AMT_DRAWINGS_ATM_CURRENT']
cc['cc12'] = cc['AMT_DRAWINGS_CURRENT'] - cc['AMT_DRAWINGS_OTHER_CURRENT']
cc['cc13'] = cc['AMT_DRAWINGS_CURRENT'] - cc['AMT_DRAWINGS_POS_CURRENT']
# General aggregations
cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)
cc_agg = cc.groupby('SK_ID_CURR').agg(['max', 'mean', 'sum', 'var'])
cc_agg.columns = pd.Index(['CC_' + e[0] + "_" + e[1].upper() for e in cc_agg.columns.tolist()])
# Count credit card lines
cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()
del cc
gc.collect()
return cc_agg
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
def write_to_csv(test_df,sub_preds,file_name="submission.csv"):
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(file_name, index= False)
def kfold_rf(df, num_folds, stratified = False, debug= False):
# Divide in training/validation and test data
train_df = df[df['TARGET'].notnull()]
test_df = df[df['TARGET'].isnull()]
print("Starting RF. Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape))
del df
gc.collect()
train_df = train_df.replace([np.inf,-np.inf], 0)
test_df = test_df.replace([np.inf,-np.inf], 0)
train_df = train_df.fillna(0)
test_df = test_df.fillna(0)
# Cross validation model
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=1001)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=1001)
# Create arrays and dataframes to store results
oof_preds = np.zeros(train_df.shape[0])
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
rf_params={
'n_jobs': 16,
'n_estimators': 500,
'max_features': 0.2,
'max_depth': 8,
'min_samples_split': 10,
# 'min_samples_leaf': 10,
'verbose': 1
}
clf = RandomForestClassifier(**rf_params)
clf.fit(train_x, train_y)
oof_preds[valid_idx] = clf.predict_proba(valid_x)[:, 1]
sub_pred = clf.predict_proba(test_df[feats])[:, 1]
sub_preds += sub_pred / folds.n_splits
auc_score = roc_auc_score(valid_y, oof_preds[valid_idx])
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
print('Fold %2d AUC : %.6f' % (n_fold + 1, auc_score))
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'], oof_preds))
write_to_csv(train_df,oof_preds,"oof_rf.csv")
write_to_csv(test_df,sub_preds,"submission_rf.csv")
# Write submission file and plot feature importance
# if not debug:
# test_df['TARGET'] = sub_preds
# test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name, index= False)
temp = feature_importance_df[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)
print("no. of contributing features: %d" % (len(temp[temp["importance"]>0])))
display_importances(feature_importance_df)
feature_importance_df.groupby("feature").mean().sort_values("importance",ascending=False)["importance"].to_csv("feature_importance.csv")
# Display/plot feature importance
def display_importances(feature_importance_df_):
cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False).index
best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
plt.figure(figsize=(15, 100))
sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances01.png')
def main(debug = False):
num_rows = 50000 if debug else None
with timer("Process application train and test"):
df = application_train_test(num_rows)
print("Application df shape:", df.shape)
with timer("Process bureau and bureau_balance"):
bureau = bureau_and_balance(num_rows)
print("Bureau df shape:", bureau.shape)
df = df.join(bureau, how='left', on='SK_ID_CURR')
del bureau
gc.collect()
with timer("Process previous_applications"):
prev = previous_applications(num_rows)
print("Previous applications df shape:", prev.shape)
df = df.join(prev, how='left', on='SK_ID_CURR')
del prev
gc.collect()
with timer("Process POS-CASH balance"):
pos = pos_cash(num_rows)
print("Pos-cash balance df shape:", pos.shape)
df = df.join(pos, how='left', on='SK_ID_CURR')
del pos
gc.collect()
with timer("Process installments payments"):
ins = installments_payments(num_rows)
print("Installments payments df shape:", ins.shape)
df = df.join(ins, how='left', on='SK_ID_CURR')
del ins
gc.collect()
with timer("Process credit card balance"):
cc = credit_card_balance(num_rows)
print("Credit card balance df shape:", cc.shape)
df = df.join(cc, how='left', on='SK_ID_CURR')
del cc
gc.collect()
with timer("Run RF"):
kfold_rf(df, num_folds=5, stratified=False, debug=debug)
if __name__ == "__main__":
# submission_file_name = "submission.csv"
with timer("Full model run"):
main()
|
the-stack_106_20896
|
import os
from django import forms
from django.forms import widgets
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import filesizeformat
from avatar.conf import settings
from avatar.models import Avatar
def avatar_img(avatar, size):
if not avatar.thumbnail_exists(size):
avatar.create_thumbnail(size)
return mark_safe('<img src="%s" alt="%s" width="%s" height="%s" />' %
(avatar.avatar_url(size), six.text_type(avatar),
size, size))
class UploadAvatarForm(forms.Form):
avatar = forms.ImageField(label=_("avatar"))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(UploadAvatarForm, self).__init__(*args, **kwargs)
def clean_avatar(self):
data = self.cleaned_data['avatar']
if settings.AVATAR_ALLOWED_FILE_EXTS:
root, ext = os.path.splitext(data.name.lower())
if ext not in settings.AVATAR_ALLOWED_FILE_EXTS:
valid_exts = ", ".join(settings.AVATAR_ALLOWED_FILE_EXTS)
error = _("%(ext)s is an invalid file extension. "
"Authorized extensions are : %(valid_exts_list)s")
raise forms.ValidationError(error %
{'ext': ext,
'valid_exts_list': valid_exts})
if data.size > settings.AVATAR_MAX_SIZE:
error = _("Your file is too big (%(size)s), "
"the maximum allowed size is %(max_valid_size)s")
raise forms.ValidationError(error % {
'size': filesizeformat(data.size),
'max_valid_size': filesizeformat(settings.AVATAR_MAX_SIZE)
})
count = Avatar.objects.filter(user=self.user).count()
if 1 < settings.AVATAR_MAX_AVATARS_PER_USER <= count:
error = _("You already have %(nb_avatars)d avatars, "
"and the maximum allowed is %(nb_max_avatars)d.")
raise forms.ValidationError(error % {
'nb_avatars': count,
'nb_max_avatars': settings.AVATAR_MAX_AVATARS_PER_USER,
})
return
class PrimaryAvatarForm(forms.Form):
def __init__(self, *args, **kwargs):
kwargs.pop('user')
size = kwargs.pop('size', settings.AVATAR_DEFAULT_SIZE)
avatars = kwargs.pop('avatars')
super(PrimaryAvatarForm, self).__init__(*args, **kwargs)
choices = [(avatar.id, avatar_img(avatar, size)) for avatar in avatars]
self.fields['choice'] = forms.ChoiceField(label=_("Choices"),
choices=choices,
widget=widgets.RadioSelect)
class DeleteAvatarForm(forms.Form):
def __init__(self, *args, **kwargs):
kwargs.pop('user')
size = kwargs.pop('size', settings.AVATAR_DEFAULT_SIZE)
avatars = kwargs.pop('avatars')
super(DeleteAvatarForm, self).__init__(*args, **kwargs)
choices = [(avatar.id, avatar_img(avatar, size)) for avatar in avatars]
self.fields['choices'] = forms.MultipleChoiceField(label=_("Choices"),
choices=choices,
widget=widgets.CheckboxSelectMultiple)
|
the-stack_106_20897
|
#!/usr/bin/env python
#
# minos documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from minos import template
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Minos Microservice Template'
copyright = "2021, Clariteia"
author = "Clariteia Devs"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = template.__version__
# The full version, including alpha/beta/rc tags.
release = template.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'minosdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'minos.tex',
'Minos Microservice Template Documentation',
'Clariteia Devs', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'minos',
'Minos Microservice Template Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'minos',
'Minos Microservice Template Documentation',
author,
'minos',
'One line description of project.',
'Miscellaneous'),
]
|
the-stack_106_20898
|
"""Reads all ships and some of their basic attributes from the CCP SDE dump, and writes them into a CSV file."""
import csv
import io
import sqlite3
import shutil
import constants as const
# Terminology
#
# Skinned ship:
# This term refers to a ship that differs from its "unskinned" sibling only in its looks, but not in its attributes.
# Blacklist
SKINNED_SHIPS = frozenset([
'Police Pursuit Comet', # variant of Federation Navy Comet
"Goru's Shuttle", # variant of the Caldari Shuttle
])
# Whitelist for ships that would be incorrectly flagged as skinned ships.
NOT_SKINNED_SHIPS = frozenset([
'Miasmos Quafe Ultra Edition'
])
ATTRIBUTE_METALEVEL = 633
ATTRIBUTE_META_GROUP_ID = 1692
ATTRIBUTE_TECH_LEVEL = 422
META_GROUP_TECH_1 = 'Tech I'
META_GROUP_TECH_2 = 'Tech II'
META_GROUP_TECH_3 = 'Tech III'
META_GROUP_FACTION = 'Faction'
class Ship:
type_name = None
type_id = None
group_name = None
race = None
meta_level = 0
tech_level = 0
meta_group = None
base_type = None
market_group = None
parent_market_group = None
class ShipsRetriever:
races_by_id = {}
shipsById = {}
meta_types_by_id = {} # contains dicts with keys 'parentTypeID' and 'metaGroupName'
all_ships_by_id = {}
def retrieve(self, db_file):
self.con = sqlite3.connect(db_file)
with self.con:
self.con.row_factory = sqlite3.Row
self.fetch_races()
self.fetch_meta_types()
self.fetch_ships()
self.con = None
return self.all_ships_by_id.values()
def fetch_races(self):
for row in self.con.execute('SELECT raceId, raceName from chrRaces'):
self.races_by_id[row['raceId']] = row['raceName']
def fetch_meta_types(self):
for row in self.con.execute(
'''
SELECT typeID, parentTypeID, metaGroupName
FROM invMetaTypes
JOIN invMetaGroups ON invMetaTypes.metaGroupID = invMetaGroups.metaGroupID
'''):
self.meta_types_by_id[row['typeID']] = { 'parentTypeID': row['parentTypeID'], 'metaGroupName': row['metaGroupName'] }
def fetch_ships(self):
query = '''
SELECT invGroups.groupName, invTypes.typeName, invTypes.typeID, invTypes.raceID
FROM invGroups
JOIN invTypes ON invGroups.groupID = invTypes.groupID
WHERE invGroups.categoryID = 6 -- 6 = ships
AND invTypes.published = 1
'''
ships = {}
for row in self.con.execute(query):
ship = Ship()
ship.type_name = row['typeName']
ship.type_id = row['typeID']
ship.group_name = row['groupName']
ship.race = self.races_by_id[row['raceID']]
ships[row['typeID']] = ship
# Add further information
for ship in ships.values():
# Meta Level
meta_level = self.get_attribute(ship.type_id, ATTRIBUTE_METALEVEL);
ship.meta_level = int(meta_level)
# Tech Level
tech_level = self.get_attribute(ship.type_id, ATTRIBUTE_TECH_LEVEL)
ship.tech_level = int(tech_level)
# Meta Group
try:
meta_type = self.meta_types_by_id[ship.type_id]
ship.meta_group = meta_type['metaGroupName']
ship.base_type = ships[meta_type['parentTypeID']].type_name
except KeyError:
pass
if ship.meta_group is None:
# Ships that are not based on another hull don't have an entry in 'invMetaTypes', so we use the tech level.
if tech_level == 1:
if meta_level == 0:
ship.meta_group = META_GROUP_TECH_1
else:
ship.meta_group = META_GROUP_FACTION
elif tech_level == 2:
ship.meta_group = META_GROUP_TECH_2
elif tech_level == 3:
ship.meta_group = META_GROUP_TECH_3
else:
# Unknown tech level or incorrect data
ship.meta_group = 'UNKNOWN'
# Market Group
market_groups = self.con.execute(
'''
SELECT g1.marketGroupName, g2.marketGroupName
FROM invTypes
JOIN invMarketGroups AS g1 ON invTypes.marketGroupID = g1.marketGroupID
JOIN invMarketGroups AS g2 ON g1.parentGroupID = g2.marketGroupID
WHERE invTypes.typeID=:typeID
''',
{ 'typeID': ship.type_id }
).fetchone()
if market_groups is not None:
ship.market_group = market_groups[0]
ship.parent_market_group = market_groups[1]
self.all_ships_by_id = ships
def get_attribute(self, type_id, attribute_id):
value = self.con.execute(
'SELECT COALESCE(valueFloat, valueInt) AS value FROM dgmTypeAttributes WHERE typeID=:typeID AND attributeID=:attributeID',
{ 'typeID': type_id, 'attributeID': attribute_id }
).fetchone()[0]
return value
class ShipTraits:
@staticmethod
def is_skinned_ship(ship):
if ship.type_name in NOT_SKINNED_SHIPS:
return False
# Normal faction ships have a meta level > 0
if ship.meta_group == 'Faction' and ship.meta_level == 0:
return True
name = ship.type_name
if name.endswith('Edition'):
return True
if name in SKINNED_SHIPS:
return True
return False
@staticmethod
def is_unreleased_ship(ship):
if ship.type_name.startswith('?'):
return True
return False
def write_csv(ships, filename):
sorted_ships = sorted(ships, key=lambda ship: ship.type_name)
with open(filename, 'w') as file:
c = csv.writer(file, delimiter='\t', lineterminator='\n')
c.writerow([const.CSV_COL_SHIP, const.CSV_COL_SHIP_CLASS, const.CSV_COL_META_GROUP, const.CSV_COL_TECH_LEVEL, const.CSV_COL_META_LEVEL,
const.CSV_COL_HULL, const.CSV_COL_RACE, const.CSV_COL_MARKET_GROUP, const.CSV_COL_TYPE_ID, const.CSV_COL_IGNORE])
for ship in sorted_ships:
filter_ship = ShipTraits.is_unreleased_ship(ship) or ShipTraits.is_skinned_ship(ship)
c.writerow([
ship.type_name,
ship.group_name,
ship.meta_group,
ship.tech_level,
ship.meta_level,
ship.base_type,
ship.race,
ship.market_group,
ship.type_id,
'TRUE' if filter_ship else 'FALSE'
])
if __name__ == '__main__':
r = ShipsRetriever()
ships = r.retrieve(str(const.DATA_PATH / 'eve.sqlite'))
if not const.OUTPUT_PATH.exists(): const.OUTPUT_PATH.mkdir()
write_csv(ships, str(const.OUTPUT_PATH / 'ships.csv'))
|
the-stack_106_20899
|
# Test means dtype
import sys
import numpy as np
from frovedis.exrpc.server import FrovedisServer
from frovedis.matrix.dense import FrovedisRowmajorMatrix
from frovedis.mllib.gmm import GaussianMixture
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if (argc < 2):
print ('Please give frovedis_server calling command as the first argument \n(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
train_mat = np.loadtxt("./input/gmm_data.txt")
# creating spectral agglomerative object
n_components = 2
try:
gmm_model = GaussianMixture(n_components=n_components)
# fitting the training matrix on gaussian mixture object
gmm_model.fit(train_mat)
means = gmm_model.means_
except Exception as e:
print ("status=Exception: " + str(e))
sys.exit(1)
if(means.dtype == np.float64):
print("status=Passed")
else:
print("status=Failed")
|
the-stack_106_20900
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""math"""
import numpy as np
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore.common._decorator import deprecated
from mindspore.ops.primitive import constexpr
from mindspore.ops import functional as F
from ..cell import Cell
from ...common import dtype as mstype
from ..._checkparam import Validator as validator
__all__ = ['ReduceLogSumExp',
'Range',
'LGamma',
'DiGamma',
'IGamma',
'LBeta',
'MatMul',
'Moments',
'MatInverse',
'MatDet',
]
@constexpr
def _check_input_dtype(param_name, input_dtype, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
class ReduceLogSumExp(Cell):
r"""
Reduces a dimension of a tensor by calculating exponential for all elements in the dimension,
then calculate logarithm of the sum.
The dtype of the tensor to be reduced is number.
.. math::
ReduceLogSumExp(x) = \log(\sum(e^x))
Args:
axis (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
Only constant value is allowed.
keep_dims (bool): If True, keep these reduced dimensions and the length is 1.
If False, don't keep these dimensions.
Default : False.
Inputs:
- **x** (Tensor) - The input tensor. With float16 or float32 data type.
Outputs:
Tensor, has the same dtype as the `x`.
- If axis is (), and keep_dims is False,
the output is a 0-D tensor representing the sum of all elements in the input tensor.
- If axis is int, set as 2, and keep_dims is False,
the shape of output is :math:`(x_1, x_3, ..., x_R)`.
- If axis is tuple(int), set as (2, 3), and keep_dims is False,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Raises:
TypeError: If `axis` is not one of int, list, tuple.
TypeError: If `keep_dims` is not bool.
TypeError: If dtype of `x` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = nn.ReduceLogSumExp(1, keep_dims=True)
>>> output = op(input_x)
>>> print(output.shape)
(3, 1, 5, 6)
"""
def __init__(self, axis, keep_dims=False):
super(ReduceLogSumExp, self).__init__()
validator.check_value_type('axis', axis, [int, list, tuple], self.cls_name)
validator.check_value_type('keep_dims', keep_dims, [bool], self.cls_name)
self.axis = axis
self.exp = P.Exp()
self.sum = P.ReduceSum(keep_dims)
self.log = P.Log()
def construct(self, x):
exp = self.exp(x)
sumexp = self.sum(exp, self.axis)
logsumexp = self.log(sumexp)
return logsumexp
class Range(Cell):
r"""
Creates a sequence of numbers in range [start, limit) with step size delta.
The size of output is :math:`\left \lfloor \frac{limit-start}{delta} \right \rfloor + 1` and `delta` is the gap
between two values in the tensor.
.. math::
out_{i+1} = out_{i} +delta
Args:
start (Union[int, float]): If `limit` is `None`, the value acts as limit in the range and first entry
defaults to `0`. Otherwise, it acts as first entry in the range.
limit (Union[int, float]): Acts as upper limit of sequence. If `None`, defaults to the value of `start`
while set the first entry of the range to `0`. It can not be equal to `start`.
delta (Union[int, float]): Increment of the range. It can not be equal to zero. Default: 1.
Outputs:
Tensor, the dtype is int if the dtype of `start`, `limit` and `delta` all are int. Otherwise, dtype is float.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> net = nn.Range(1, 8, 2)
>>> output = net()
>>> print(output)
[1 3 5 7]
"""
def __init__(self, start, limit=None, delta=1):
super(Range, self).__init__()
if delta == 0:
raise ValueError("The input of `delta` can not be equal to zero.")
data = np.arange(start, limit, delta)
if data.dtype == np.float:
self.ms_dtype = mstype.float32
else:
self.ms_dtype = mstype.int32
self.result_tensor = Tensor(data, dtype=self.ms_dtype)
def construct(self):
return self.result_tensor
class LGamma(Cell):
r"""
Calculates LGamma using Lanczos' approximation referring to "A Precision Approximation of the Gamma Function".
The algorithm is:
.. math::
\begin{array}{ll} \\
lgamma(z + 1) = \frac{(\log(2) + \log(pi))}{2} + (z + 1/2) * log(t(z)) - t(z) + A(z) \\
t(z) = z + kLanczosGamma + 1/2 \\
A(z) = kBaseLanczosCoeff + \sum_{k=1}^n \frac{kLanczosCoefficients[i]}{z + k}
\end{array}
However, if the input is less than 0.5 use Euler's reflection formula:
.. math::
lgamma(x) = \log(pi) - lgamma(1-x) - \log(abs(sin(pi * x)))
And please note that
.. math::
lgamma(+/-inf) = +inf
Thus, the behaviour of LGamma follows:
when x > 0.5, return log(Gamma(x))
when x < 0.5 and is not an integer, return the real part of Log(Gamma(x)) where Log is the complex logarithm
when x is an integer less or equal to 0, return +inf
when x = +/- inf, return +inf
Inputs:
- **x** (Tensor) - The input tensor. Only float16, float32 are supported.
Outputs:
Tensor, has the same shape and dtype as the `x`.
Raises:
TypeError: If dtype of `x` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32))
>>> op = nn.LGamma()
>>> output = op(input_x)
>>> print(output)
[3.5762787e-07 6.9314754e-01 1.7917603e+00]
"""
def __init__(self):
super(LGamma, self).__init__()
# const numbers
self.k_lanczos_gamma = 7
self.k_base_lanczos_coeff = 0.99999999999980993227684700473478
self.k_lanczos_coefficients = [676.520368121885098567009190444019,
-1259.13921672240287047156078755283,
771.3234287776530788486528258894,
-176.61502916214059906584551354,
12.507343278686904814458936853,
-0.13857109526572011689554707,
9.984369578019570859563e-6,
1.50563273514931155834e-7]
self.one_half = 0.5
self.one = 1
self.two = 2
self.inf = np.inf
self.pi = np.pi
self.log_2 = np.log(self.two)
self.log_pi = np.log(np.pi)
self.log_sqrt_two_pi = (self.log_2 + self.log_pi) / self.two
self.lanczos_gamma_plus_one_half = self.k_lanczos_gamma + 0.5
self.log_lanczos_gamma_plus_one_half = np.log(self.lanczos_gamma_plus_one_half)
# operations
self.log = P.Log()
self.log1p = P.Log1p()
self.abs = P.Abs()
self.shape = P.Shape()
self.dtype = P.DType()
self.fill = P.Fill()
self.floor = P.Floor()
self.equal = P.Equal()
self.greater = P.Greater()
self.less = P.Less()
self.lessequal = P.LessEqual()
self.select = P.Select()
self.sin = P.Sin()
self.isfinite = P.IsFinite()
def construct(self, x):
input_dtype = self.dtype(x)
_check_input_dtype("x", input_dtype, [mstype.float16, mstype.float32], self.cls_name)
infinity = self.fill(input_dtype, self.shape(x), self.inf)
need_to_reflect = self.less(x, 0.5)
neg_input = -x
z = self.select(need_to_reflect, neg_input, x - 1)
@constexpr
def _calculate_reflected_x(z, k_base_lanczos_coeff, k_lanczos_coefficients):
reflex_x = k_base_lanczos_coeff
for i in range(8):
product_ = k_lanczos_coefficients[i] / (z + i + 1)
reflex_x = product_ + reflex_x
return reflex_x
reflex_x = _calculate_reflected_x(z, self.k_base_lanczos_coeff, self.k_lanczos_coefficients)
t = z + self.lanczos_gamma_plus_one_half
log_t = self.log1p(z / self.lanczos_gamma_plus_one_half) + self.log_lanczos_gamma_plus_one_half
log_y = self.log(reflex_x) + (z + self.one_half - t / log_t) * log_t + self.log_sqrt_two_pi
abs_input = self.abs(x)
abs_frac_input = abs_input - self.floor(abs_input)
x = self.select(self.lessequal(x, 0.0), self.select(self.equal(abs_frac_input, 0.0), infinity, x), x)
reduced_frac_input = self.select(self.greater(abs_frac_input, 0.5),
1 - abs_frac_input, abs_frac_input)
reflection_denom = self.log(self.sin(self.pi * reduced_frac_input))
reflection = self.select(self.isfinite(reflection_denom),
-reflection_denom - log_y + self.log_pi,
-reflection_denom)
result = self.select(need_to_reflect, reflection, log_y)
return self.select(self.isfinite(x), result, infinity)
class DiGamma(Cell):
r"""
Calculates Digamma using Lanczos' approximation referring to "A Precision Approximation of the Gamma Function".
The algorithm is:
.. math::
\begin{array}{ll} \\
digamma(z + 1) = log(t(z)) + A'(z) / A(z) - kLanczosGamma / t(z) \\
t(z) = z + kLanczosGamma + 1/2 \\
A(z) = kBaseLanczosCoeff + \sum_{k=1}^n \frac{kLanczosCoefficients[i]}{z + k} \\
A'(z) = \sum_{k=1}^n \frac{kLanczosCoefficients[i]}{{z + k}^2}
\end{array}
However, if the input is less than 0.5 use Euler's reflection formula:
.. math::
digamma(x) = digamma(1 - x) - pi * cot(pi * x)
Inputs:
- **x** (Tensor[Number]) - The input tensor. Only float16, float32 are supported.
Outputs:
Tensor, has the same shape and dtype as the `x`.
Raises:
TypeError: If dtype of `x` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32))
>>> op = nn.DiGamma()
>>> output = op(input_x)
>>> print(output)
[0.42278463 0.92278427 1.2561178]
"""
def __init__(self):
super(DiGamma, self).__init__()
# const numbers
self.k_lanczos_gamma = 7
self.k_base_lanczos_coeff = 0.99999999999980993227684700473478
self.k_lanczos_coefficients = [676.520368121885098567009190444019,
-1259.13921672240287047156078755283,
771.3234287776530788486528258894,
-176.61502916214059906584551354,
12.507343278686904814458936853,
-0.13857109526572011689554707,
9.984369578019570859563e-6,
1.50563273514931155834e-7]
self.nan = np.nan
self.pi = np.pi
self.lanczos_gamma_plus_one_half = self.k_lanczos_gamma + 0.5
self.log_lanczos_gamma_plus_one_half = np.log(self.lanczos_gamma_plus_one_half)
# operations
self.log1p = P.Log1p()
self.abs = P.Abs()
self.shape = P.Shape()
self.dtype = P.DType()
self.fill = P.Fill()
self.floor = P.Floor()
self.equal = P.Equal()
self.less = P.Less()
self.select = P.Select()
self.sin = P.Sin()
self.cos = P.Cos()
self.logicaland = P.LogicalAnd()
def construct(self, x):
input_dtype = self.dtype(x)
_check_input_dtype("x", input_dtype, [mstype.float16, mstype.float32], self.cls_name)
need_to_reflect = self.less(x, 0.5)
neg_input = -x
z = self.select(need_to_reflect, neg_input, x - 1)
@constexpr
def _calculate_num_denom(z, k_base_lanczos_coeff, k_lanczos_coefficients):
num = 0
denom = k_base_lanczos_coeff
for i in range(8):
num = num - k_lanczos_coefficients[i] / ((z + i + 1) * (z + i + 1))
denom = denom + k_lanczos_coefficients[i] / (z + i + 1)
return num, denom
num, denom = _calculate_num_denom(z, self.k_base_lanczos_coeff, self.k_lanczos_coefficients)
t = z + self.lanczos_gamma_plus_one_half
log_t = self.log1p(z / self.lanczos_gamma_plus_one_half) + self.log_lanczos_gamma_plus_one_half
y = log_t + num / denom - self.k_lanczos_gamma / t
reduced_input = x + self.abs(self.floor(x + 0.5))
reflection = y - self.pi * self.cos(self.pi * reduced_input) / self.sin(self.pi * reduced_input)
real_result = self.select(need_to_reflect, reflection, y)
nan = self.fill(self.dtype(x), self.shape(x), np.nan)
return self.select(self.logicaland(self.less(x, 0), self.equal(x, self.floor(x))),
nan, real_result)
eps_fp32 = Tensor(np.finfo(np.float32).eps, mstype.float32)
def _while_helper_func(cond, body, vals):
while cond(vals).any():
vals = body(vals)
return vals
def _IgammaSeries(ax, x, a, enabled):
"""Helper function for computing Igamma using a power series."""
logicaland = P.LogicalAnd()
greater = P.Greater()
fill = P.Fill()
shape = P.Shape()
dtype = P.DType()
select = P.Select()
# If more data types are supported, this epsilon need to be selected.
epsilon = eps_fp32
def cond(vals):
enabled = vals[0]
return enabled
def body(vals):
enabled = vals[0]
r = vals[1]
c = vals[2]
ans = vals[3]
x = vals[4]
dc_da = vals[5]
dans_da = vals[6]
r = r + 1
dc_da = dc_da * (x / r) + (-1 * c * x) / (r * r)
dans_da = dans_da + dc_da
c = c * (x / r)
ans = ans + c
conditional = logicaland(enabled, greater(c / ans, epsilon))
return (conditional, select(enabled, r, vals[1]),
select(enabled, c, vals[2]), select(enabled, ans, vals[3]),
select(enabled, x, vals[4]), select(enabled, dc_da, vals[5]),
select(enabled, dans_da, vals[6]))
ones = fill(dtype(a), shape(a), 1)
zeros = fill(dtype(a), shape(a), 0)
vals = (enabled, a, ones, ones, x, zeros, zeros)
vals = _while_helper_func(cond, body, vals)
ans = vals[3]
return (ans * ax) / a
def _IgammacContinuedFraction(ax, x, a, enabled):
"""Helper function for computing Igammac using a continued fraction."""
abs_x = P.Abs()
logicaland = P.LogicalAnd()
greater = P.Greater()
less = P.Less()
notequal = P.NotEqual()
fill = P.Fill()
shape = P.Shape()
dtype = P.DType()
select = P.Select()
# If more data types are supported, this epsilon need to be selected.
epsilon = eps_fp32
def cond(vals):
enabled = vals[0]
c = vals[5]
return logicaland(less(c, 2000), enabled)
def body(vals):
enabled = vals[0]
ans = vals[1]
t = vals[2]
y = vals[3]
z = vals[4]
c = vals[5]
pkm1 = vals[6]
qkm1 = vals[7]
pkm2 = vals[8]
qkm2 = vals[9]
dpkm2_da = vals[10]
dqkm2_da = vals[11]
dpkm1_da = vals[12]
dqkm1_da = vals[13]
dans_da = vals[14]
c = c + 1
y = y + 1
z = z + 2
yc = y * c
pk = pkm1 * z - pkm2 * yc
qk = qkm1 * z - qkm2 * yc
qk_is_nonzero = notequal(qk, 0)
r = pk / qk
t = select(qk_is_nonzero, abs_x((ans - r) / r), fill(dtype(t), shape(t), 1))
ans = select(qk_is_nonzero, r, ans)
dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c
dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c
dans_da_new = select(qk_is_nonzero, (dpk_da - ans * dqk_da) / qk, dans_da)
grad_conditional = select(qk_is_nonzero,
abs_x(dans_da_new - dans_da),
fill(dtype(dans_da), shape(dans_da), 1))
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
dpkm2_da = dpkm1_da
dqkm2_da = dqkm1_da
dpkm1_da = dpk_da
dqkm1_da = dqk_da
rescale = greater(abs_x(pk), 1 / epsilon)
pkm2 = select(rescale, pkm2 * epsilon, pkm2)
pkm1 = select(rescale, pkm1 * epsilon, pkm1)
qkm2 = select(rescale, qkm2 * epsilon, qkm2)
qkm1 = select(rescale, qkm1 * epsilon, qkm1)
dpkm2_da = select(rescale, dpkm2_da * epsilon, dpkm2_da)
dqkm2_da = select(rescale, dqkm2_da * epsilon, dqkm2_da)
dpkm1_da = select(rescale, dpkm1_da * epsilon, dpkm1_da)
dqkm1_da = select(rescale, dqkm1_da * epsilon, dqkm1_da)
conditional = logicaland(enabled, greater(grad_conditional, epsilon))
return (conditional, select(enabled, ans, vals[1]), select(enabled, t, vals[2]),
select(enabled, y, vals[3]), select(enabled, z, vals[4]),
c, select(enabled, pkm1, vals[6]),
select(enabled, qkm1, vals[7]), select(enabled, pkm2, vals[8]),
select(enabled, qkm2, vals[9]), select(enabled, dpkm2_da, vals[10]),
select(enabled, dqkm2_da, vals[11]), select(enabled, dpkm1_da, vals[12]),
select(enabled, dqkm1_da, vals[13]), select(enabled, dans_da_new, vals[14]))
y = 1 - a
z = x + y + 1
c = fill(dtype(x), shape(x), 0)
pkm2 = fill(dtype(x), shape(x), 1)
qkm2 = x
pkm1 = x + 1
qkm1 = z * x
ans = pkm1 / qkm1
t = fill(dtype(x), shape(x), 1)
dpkm2_da = fill(dtype(x), shape(x), 0)
dqkm2_da = fill(dtype(x), shape(x), 0)
dpkm1_da = fill(dtype(x), shape(x), 0)
dqkm1_da = -x
dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1
vals = (enabled, ans, t, y, z, c, pkm1, qkm1, pkm2, qkm2, dpkm2_da, dqkm2_da, dpkm1_da, dqkm1_da, dans_da)
vals = _while_helper_func(cond, body, vals)
ans = vals[1]
return ans * ax
class IGamma(Cell):
r"""
Calculates lower regularized incomplete Gamma function.
The lower regularized incomplete Gamma function is defined as:
.. math::
P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)
where
.. math::
gamma(a, x) = \int_0^x t^{a-1} \exp^{-t} dt
is the lower incomplete Gamma function.
Above :math:`Q(a, x)` is the upper regularized complete Gamma function.
Inputs:
- **a** (Tensor) - The input tensor. With float32 data type. `a` should have
the same dtype with `x`.
- **x** (Tensor) - The input tensor. With float32 data type. `x` should have
the same dtype with `a`.
Outputs:
Tensor, has the same dtype as `a` and `x`.
Raises:
TypeError: If dtype of input x and a is not float16 nor float32,
or if x has different dtype with a.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> input_a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
>>> input_x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
>>> igamma = nn.IGamma()
>>> output = igamma(input_a, input_x)
>>> print (output)
[0.593994 0.35276785 0.21486944 0.13337152]
"""
def __init__(self):
super(IGamma, self).__init__()
# const numbers
# If more data types are supported, this float max value need to be selected.
self.log_maxfloat32 = Tensor(np.log(np.finfo(np.float32).max), mstype.float32)
# operations
self.logicaland = P.LogicalAnd()
self.logicalor = P.LogicalOr()
self.logicalnot = P.LogicalNot()
self.equal = P.Equal()
self.greater = P.Greater()
self.less = P.Less()
self.neg = P.Neg()
self.log = P.Log()
self.exp = P.Exp()
self.select = P.Select()
self.zeroslike = P.ZerosLike()
self.fill = P.Fill()
self.shape = P.Shape()
self.dtype = P.DType()
self.lgamma = LGamma()
self.const = P.ScalarToArray()
self.cast = P.Cast()
def construct(self, a, x):
a_dtype = self.dtype(a)
x_dtype = self.dtype(x)
_check_input_dtype("a", a_dtype, [mstype.float32], self.cls_name)
_check_input_dtype("x", x_dtype, a_dtype, self.cls_name)
domain_error = self.logicalor(self.less(x, 0), self.less(a, 0))
use_igammac = self.logicaland(self.greater(x, 1), self.greater(x, a))
ax = a * self.log(x) - x - self.lgamma(a)
para_shape = self.shape(ax)
if para_shape != ():
broadcastto = P.BroadcastTo(para_shape)
x = broadcastto(x)
a = broadcastto(a)
x_is_zero = self.equal(x, 0)
log_maxfloat = self.log_maxfloat32
underflow = self.less(ax, self.neg(log_maxfloat))
ax = self.exp(ax)
enabled = self.logicalnot(self.logicalor(self.logicalor(x_is_zero, domain_error), underflow))
output = self.select(use_igammac,
1 - _IgammacContinuedFraction(ax, x, a, self.logicaland(enabled, use_igammac)),
_IgammaSeries(ax, x, a, self.logicaland(enabled, self.logicalnot(use_igammac))))
output = self.select(x_is_zero, self.zeroslike(output), output)
output = self.select(domain_error, self.fill(self.dtype(a), self.shape(a), np.nan), output)
return output
class LBeta(Cell):
r"""
This is semantically equal to
.. math::
P(x, y) = lgamma(x) + lgamma(y) - lgamma(x + y).
The method is more accurate for arguments above 8. The reason for accuracy loss in the naive computation
is catastrophic cancellation between the lgammas. This method avoids the numeric cancellation by explicitly
decomposing lgamma into the Stirling approximation and an explicit log_gamma_correction, and cancelling
the large terms from the Striling analytically.
Inputs:
- **x** (Tensor) - The input tensor. With float16 or float32 data type. `x` should have
the same dtype with `y`.
- **y** (Tensor) - The input tensor. With float16 or float32 data type. `y` should have
the same dtype with `x`.
Outputs:
Tensor, has the same dtype as `x` and `y`.
Raises:
TypeError: If dtype of `x` or `y` is neither float16 nor float32,
or if `x` has different dtype with `y`.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> input_x = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
>>> input_y = Tensor(np.array([2.0, 3.0, 14.0, 15.0]).astype(np.float32))
>>> lbeta = nn.LBeta()
>>> output = lbeta(input_y, input_x)
>>> print(output)
[-1.7917596 -4.094345 -12.000229 -14.754799]
"""
def __init__(self):
super(LBeta, self).__init__()
# const numbers
self.log_2pi = np.log(2 * np.pi)
self.minimax_coeff = [-0.165322962780713e-02,
0.837308034031215e-03,
-0.595202931351870e-03,
0.793650666825390e-03,
-0.277777777760991e-02,
0.833333333333333e-01]
# operations
self.log = P.Log()
self.log1p = P.Log1p()
self.less = P.Less()
self.select = P.Select()
self.shape = P.Shape()
self.dtype = P.DType()
self.lgamma = LGamma()
self.const = P.ScalarToTensor()
def construct(self, x, y):
x_dtype = self.dtype(x)
y_dtype = self.dtype(y)
_check_input_dtype("x", x_dtype, [mstype.float16, mstype.float32], self.cls_name)
_check_input_dtype("y", y_dtype, x_dtype, self.cls_name)
x_plus_y = x + y
para_shape = self.shape(x_plus_y)
if para_shape != ():
broadcastto = P.BroadcastTo(para_shape)
x = broadcastto(x)
y = broadcastto(y)
comp_less = self.less(x, y)
x_min = self.select(comp_less, x, y)
y_max = self.select(comp_less, y, x)
@constexpr
def _log_gamma_correction(x, minimax_coeff):
inverse_x = 1. / x
inverse_x_squared = inverse_x * inverse_x
accum = minimax_coeff[0]
for i in range(1, 6):
accum = accum * inverse_x_squared + minimax_coeff[i]
return accum * inverse_x
log_gamma_correction_x = _log_gamma_correction(x_min, self.minimax_coeff)
log_gamma_correction_y = _log_gamma_correction(y_max, self.minimax_coeff)
log_gamma_correction_x_y = _log_gamma_correction(x_plus_y, self.minimax_coeff)
# Two large arguments case: y >= x >= 8.
log_beta_two_large = self.const(0.5 * self.log_2pi, x_dtype) - 0.5 * self.log(y_max) \
+ log_gamma_correction_x + log_gamma_correction_y - log_gamma_correction_x_y \
+ (x_min - 0.5) * self.log(x_min / (x_min + y_max)) - y_max * self.log1p(x_min / y_max)
cancelled_stirling = -1 * (x_min + y_max - 0.5) * self.log1p(x_min / y_max) - x_min * self.log(y_max) + x_min
correction = log_gamma_correction_y - log_gamma_correction_x_y
log_gamma_difference_big_y = correction + cancelled_stirling
# One large argument case: x < 8, y >= 8.
log_beta_one_large = self.lgamma(x_min) + log_gamma_difference_big_y
# Small arguments case: x <= y < 8.
log_beta_small = self.lgamma(x_min) + self.lgamma(y_max) - self.lgamma(x_min + y_max)
comp_xless8 = self.less(x_min, 8)
comp_yless8 = self.less(y_max, 8)
temp = self.select(comp_yless8, log_beta_small, log_beta_one_large)
return self.select(comp_xless8, temp, log_beta_two_large)
@constexpr
def get_broadcast_matmul_shape(x_shape, y_shape):
"""get broadcast_matmul shape"""
if (len(x_shape) < 2) or (len(y_shape) < 2):
raise ValueError('For matmul, rank of x1 and x2 should be equal to or greater than 2, '
+ f'but got {x_shape} and {y_shape}.')
x_shape_batch = x_shape[:-2]
y_shape_batch = y_shape[:-2]
if x_shape_batch == y_shape_batch:
return x_shape, y_shape
x_len = len(x_shape)
y_len = len(y_shape)
length = x_len if x_len < y_len else y_len
broadcast_shape_back = []
for i in range(-length, -2):
if x_shape[i] == 1:
broadcast_shape_back.append(y_shape[i])
elif y_shape[i] == 1:
broadcast_shape_back.append(x_shape[i])
elif x_shape[i] == y_shape[i]:
broadcast_shape_back.append(x_shape[i])
else:
raise ValueError(f"For MatMul, the x1_shape {x_shape} and x2_shape {y_shape} can not broadcast.")
broadcast_shape_front = y_shape[0: y_len - length] if length == x_len else x_shape[0: x_len - length]
x_broadcast_shape = broadcast_shape_front + tuple(broadcast_shape_back) + x_shape[-2:]
y_broadcast_shape = broadcast_shape_front + tuple(broadcast_shape_back) + y_shape[-2:]
return x_broadcast_shape, y_broadcast_shape
@constexpr
def check_col_row_equal(x1_shape, x2_shape, transpose_x1, transpose_x2):
"""check col and row equal"""
if len(x1_shape) == 1:
transpose_x1 = False
x1_shape = (1,) + x1_shape
if len(x2_shape) == 1:
transpose_x2 = False
x2_shape = x2_shape + (1,)
x1_last = x1_shape[-2:]
x2_last = x2_shape[-2:]
x1_col = x1_last[not transpose_x1] # x1_col = x1_last[1] if (not transpose_a) else x1_last[0]
x2_row = x2_last[transpose_x2] # x2_row = x2_last[0] if (not transpose_b) else x2_last[1]
if x1_col != x2_row:
raise ValueError('The column of matrix dimensions of x1 should be equal to '
+ f'the row of matrix dimensions of x2, but got {x1_col} and {x2_row}.')
def matmul_op_select(x1_shape, x2_shape, transpose_x1, transpose_x2):
"""select matmul op"""
x1_dim, x2_dim = len(x1_shape), len(x2_shape)
if x1_dim == 1 and x2_dim == 1:
matmul_op = P.Mul()
elif x1_dim <= 2 and x2_dim <= 2:
transpose_x1 = False if x1_dim == 1 else transpose_x1
transpose_x2 = False if x2_dim == 1 else transpose_x2
matmul_op = P.MatMul(transpose_x1, transpose_x2)
elif x1_dim == 1 and x2_dim > 2:
matmul_op = P.BatchMatMul(False, transpose_x2)
elif x1_dim > 2 and x2_dim == 1:
matmul_op = P.BatchMatMul(transpose_x1, False)
else:
matmul_op = P.BatchMatMul(transpose_x1, transpose_x2)
return matmul_op
class MatMul(Cell):
r"""
Multiplies matrix `x1` by matrix `x2`.
nn.MatMul will be deprecated in future versions. Please use ops.matmul instead.
- If both x1 and x2 are 1-dimensional, the dot product is returned.
- If the dimensions of x1 and x2 are all not greater than 2, the matrix-matrix product will be returned. Note if
one of 'x1' and 'x2' is 1-dimensional, the argument will first be expanded to 2 dimension. After the matrix
multiply, the expanded dimension will be removed.
- If at least one of x1 and x2 is N-dimensional (N>2), the none-matrix dimensions(batch) of inputs will be
broadcasted and must be broadcastable. Note if one of 'x1' and 'x2' is 1-dimensional, the argument will first be
expanded to 2 dimension and then the none-matrix dimensions will be broadcasted. After the matrix multiply, the
expanded dimension will be removed. For example, if `x1` is a :math:`(j \times 1 \times n \times m)` tensor and
`x2` is a :math:`(k \times m \times p)` tensor, the output will be a :math:`(j \times k \times n \times p)`
tensor.
Args:
transpose_x1 (bool): If true, `a` is transposed before multiplication. Default: False.
transpose_x2 (bool): If true, `b` is transposed before multiplication. Default: False.
Inputs:
- **input_x1** (Tensor) - The first tensor to be multiplied.
- **input_x2** (Tensor) - The second tensor to be multiplied.
Outputs:
Tensor, the shape of the output tensor depends on the dimension of input tensors.
Raises:
TypeError: If `transpose_x1` or `transpose_x2` is not a bool.
ValueError: If the column of matrix dimensions of `input_x1` is not equal to
the row of matrix dimensions of `input_x2`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> net = nn.MatMul()
>>> input_x1 = Tensor(np.ones(shape=[3, 2, 3]), mindspore.float32)
>>> input_x2 = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
>>> output = net(input_x1, input_x2)
>>> print(output.shape)
(3, 2, 4)
"""
@deprecated('1.2', 'ops.matmul', False)
def __init__(self, transpose_x1=False, transpose_x2=False):
super(MatMul, self).__init__()
validator.check_value_type('transpose_x1', transpose_x1, [bool], self.cls_name)
validator.check_value_type('transpose_x2', transpose_x2, [bool], self.cls_name)
self.transpose_x1 = transpose_x1
self.transpose_x2 = transpose_x2
self.shape_op = P.Shape()
self.expand_op = P.ExpandDims()
self.squeeze_left_op = P.Squeeze(-2)
self.squeeze_right_op = P.Squeeze(-1)
self.reduce_sum_op = P.ReduceSum(keep_dims=False)
def construct(self, x1, x2):
x1_shape = self.shape_op(x1)
x2_shape = self.shape_op(x2)
check_col_row_equal(x1_shape, x2_shape, self.transpose_x1, self.transpose_x2)
matmul_op = matmul_op_select(x1_shape, x2_shape, self.transpose_x1, self.transpose_x2)
x1_dim, x2_dim = len(x1_shape), len(x2_shape)
if x1_dim == x2_dim and x2_dim == 1:
return self.reduce_sum_op(matmul_op(x1, x2), -1)
if x1_dim == 1:
x1 = self.expand_op(x1, 0)
x1_shape = self.shape_op(x1)
if x2_dim == 1:
x2 = self.expand_op(x2, 1)
x2_shape = self.shape_op(x2)
x1_broadcast_shape, x2_broadcast_shape = get_broadcast_matmul_shape(x1_shape, x2_shape)
x1_broadcast_to = P.BroadcastTo(x1_broadcast_shape)
x2_broadcast_to = P.BroadcastTo(x2_broadcast_shape)
if x1_broadcast_shape != x1_shape:
x1 = x1_broadcast_to(x1)
if x2_broadcast_shape != x2_shape:
x2 = x2_broadcast_to(x2)
matmul_broadcast = matmul_op(x1, x2)
if x1_dim == 1:
matmul_broadcast = self.squeeze_left_op(matmul_broadcast)
if x2_dim == 1:
matmul_broadcast = self.squeeze_right_op(matmul_broadcast)
return matmul_broadcast
class Moments(Cell):
"""
Calculates the mean and variance of `x`.
Args:
axis (Union[int, tuple(int)]): Calculates the mean and variance along the specified axis. Default: ().
keep_dims (bool): If true, The dimension of mean and variance are identical with input's.
If false, don't keep these dimensions. Default: False.
Inputs:
- **input_x** (Tensor) - The tensor to be calculated. Only float16 and float32 are supported.
Outputs:
- **mean** (Tensor) - The mean of input x, with the same date type as input x.
- **variance** (Tensor) - The variance of input x, with the same date type as input x.
Raises:
TypeError: If `axis` is not one of int, tuple, None.
TypeError: If `keep_dims` is neither bool nor None.
TypeError: If dtype of `input_x` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> net = nn.Moments(axis=3, keep_dims=True)
>>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32)
>>> output = net(input_x)
>>> print(output)
(Tensor(shape=[1, 1, 2, 1], dtype=Float32, value=
[[[[ 2.50000000e+00],
[ 4.50000000e+00]]]]), Tensor(shape=[1, 1, 2, 1], dtype=Float32, value=
[[[[ 1.25000000e+00],
[ 1.25000000e+00]]]]))
"""
def __init__(self, axis=None, keep_dims=None):
super(Moments, self).__init__()
if axis is None:
axis = ()
if isinstance(axis, tuple):
for idx, item in enumerate(axis):
validator.check_value_type("axis[%d]" % idx, item, [int], self.cls_name)
self.axis = validator.check_value_type('axis', axis, [int, tuple], self.cls_name)
if keep_dims is None:
keep_dims = False
self.keep_dims = validator.check_value_type('keep_dims', keep_dims, [bool], self.cls_name)
self.cast = P.Cast()
self.reduce_mean = P.ReduceMean(keep_dims=True)
self.square_diff = P.SquaredDifference()
self.squeeze = P.Squeeze(self.axis)
def construct(self, x):
tensor_dtype = F.dtype(x)
_check_input_dtype("input x", tensor_dtype, [mstype.float16, mstype.float32], self.cls_name)
if tensor_dtype == mstype.float16:
x = self.cast(x, mstype.float32)
mean = self.reduce_mean(x, self.axis)
variance = self.reduce_mean(self.square_diff(x, F.stop_gradient(mean)), self.axis)
if not self.keep_dims:
mean = self.squeeze(mean)
variance = self.squeeze(variance)
if tensor_dtype == mstype.float16:
mean = self.cast(mean, mstype.float16)
variance = self.cast(variance, mstype.float16)
return mean, variance
return mean, variance
class MatInverse(Cell):
"""
Calculates the inverse of Positive-Definite Hermitian matrix using Cholesky decomposition.
Inputs:
- **a** (Tensor[Number]) - The input tensor. It must be a positive-definite matrix.
With float16 or float32 data type.
Outputs:
Tensor, has the same dtype as the `a`.
Raises:
TypeError: If dtype of `a` is neither float16 nor float32.
Supported Platforms:
``GPU``
Examples:
>>> input_a = Tensor(np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32))
>>> op = nn.MatInverse()
>>> output = op(input_a)
>>> print(output)
[[49.36112 -13.555558 2.1111116]
[-13.555558 3.7777784 -0.5555557]
[2.1111116 -0.5555557 0.11111111]]
"""
def __init__(self):
super(MatInverse, self).__init__()
self.dtype = P.DType()
self.choleskytrsm = P.CholeskyTrsm()
self.matmul = MatMul(transpose_x1=True)
def construct(self, a):
input_dtype = self.dtype(a)
_check_input_dtype("input_a", input_dtype, [mstype.float16, mstype.float32], self.cls_name)
l_inverse = self.choleskytrsm(a)
a_inverse = self.matmul(l_inverse, l_inverse)
return a_inverse
class MatDet(Cell):
"""
Calculates the determinant of Positive-Definite Hermitian matrix using Cholesky decomposition.
Inputs:
- **a** (Tensor[Number]) - The input tensor. It must be a positive-definite matrix.
With float16 or float32 data type.
Outputs:
Tensor, has the same dtype as the `a`.
Raises:
TypeError: If dtype of `a` is neither float16 nor float32.
Supported Platforms:
``GPU``
Examples:
>>> input_a = Tensor(np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32))
>>> op = nn.MatDet()
>>> output = op(input_a)
>>> print(output)
35.999996
"""
def __init__(self):
super(MatDet, self).__init__()
self.dtype = P.DType()
self.cholesky = P.Cholesky()
self.det_triangle = P.DetTriangle()
self.square = P.Square()
def construct(self, a):
input_dtype = self.dtype(a)
_check_input_dtype("input_a", input_dtype, [mstype.float16, mstype.float32], self.cls_name)
l = self.cholesky(a)
l_det = self.det_triangle(l)
a_det = self.square(l_det)
return a_det
|
the-stack_106_20901
|
from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from trix.trix_core.models import Course, User
from trix.trix_course.views import base
class AddCourseAdminListView(base.TrixCourseBaseView):
model = Course
template_name = "trix_course/add_course_admin.django.html"
paginate_by = 20
def get(self, request, **kwargs):
course_id = kwargs['course_id']
course = get_object_or_404(Course, id=course_id)
if request.user.is_course_owner(course):
return super(AddCourseAdminListView, self).get(request, **kwargs)
else:
raise PermissionDenied
def get_queryset(self):
search = self.request.GET.get('q')
course = Course.objects.get(id=self.kwargs['course_id'])
users = User.objects.filter(is_active=True).exclude(owner=course)
if not self.request.user.is_superuser:
users = users.exclude(admin=course)
if search:
users = users.filter(email__icontains=search)
return users
def get_context_data(self, **kwargs):
context = super(AddCourseAdminListView, self).get_context_data(**kwargs)
context['course'] = Course.objects.get(id=self.kwargs['course_id'])
return context
class UpdateCourseAdminView(base.TrixCourseBaseView):
model = Course
def post(self, request, *args, **kwargs):
'''
Adds a user as a course admin or owner.
'''
course = Course.objects.get(id=kwargs['course_id'])
if not request.user.is_course_owner(course):
raise PermissionDenied
# Add admin or owner based on type of post.
if 'admin' in request.POST:
self._add_admins(request, course, [kwargs['user_id']])
elif 'owner' in request.POST:
self._add_owners(request, course, [kwargs['user_id']])
elif 'admin_list' in request.POST:
self._add_admins(request, course, request.POST.getlist('selected_students'))
elif 'owner_list' in request.POST:
self._add_owners(request, course, request.POST.getlist('selected_students'))
return redirect(reverse('trix_add_admin', kwargs={'course_id': kwargs['course_id']}))
def _add_admins(self, request, course, id_list):
for user_id in id_list:
user = User.objects.get(id=user_id)
course.admins.add(user)
messages.success(request, _(f"{user.displayname} added as a course admin."))
def _add_owners(self, request, course, id_list):
for user_id in id_list:
user = User.objects.get(id=user_id)
course.admins.add(user)
course.owner.add(user)
messages.success(request, _(f"{user.displayname} added as course owner."))
|
the-stack_106_20903
|
"""
Planar Point Pattern Class
"""
import numpy as np
import sys
from pysal.lib.cg import KDTree
from .centrography import hull
from .window import as_window, poly_from_bbox
from .util import cached_property
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
__author__ = "Serge Rey [email protected]"
__all__ = ['PointPattern']
if sys.version_info[0] > 2:
xrange = range
class PointPattern(object):
"""
Planar Point Pattern Class 2-D.
Parameters
----------
points: array
(n,p), n points with p >= 2 attributes on each
point. Two attributes must comprise the spatial
coordinate pair. Default is that the first two
attributes are the x and y spatial coordinates.
window: :class:`.Window`
Bounding geometric object for the point pattern.
If not specified, window will be set to the minimum
bounding rectangle of the point pattern.
names: list
The names of the attributes.
coord_names: list
The names of the attributes defining the two spatial
coordinates.
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[66.22, 32.54], [22.52, 22.39], [31.01, 81.21],
... [9.47, 31.02], [30.78, 60.10], [75.21, 58.93],
... [79.26, 7.68], [8.23, 39.93], [98.73, 77.17],
... [89.78, 42.53], [65.19, 92.08], [54.46, 8.48]]
>>> pp = PointPattern(points)
>>> pp.n
12
>>> pp.mean_nnd
21.612139802089246
>>> pp.lambda_mbb
0.0015710507711240867
>>> pp.lambda_hull
0.0022667153468973137
>>> pp.hull_area
5294.00395
>>> pp.mbb_area
7638.200000000001
"""
def __init__(self, points, window=None, names=None, coord_names=None):
# first two series in df are x, y unless coor_names and names are
# specified
self.df = pd.DataFrame(points)
n, p = self.df.shape
self._n_marks = p - 2
if coord_names is None:
if names is not None:
coord_names = names[:2]
else:
coord_names = ['x', 'y']
if names is None:
col_names = coord_names
if p > 2:
for m in range(2, p):
col_names.append("mark_{}".format(m-2))
coord_names = coord_names[:2]
else:
col_names = names
self.coord_names = coord_names
self._x, self._y = coord_names
self.df.columns = col_names
self.points = self.df.loc[:, [self._x, self._y]]
self._n, self._p = self.points.shape
if window is None:
self.set_window(as_window(poly_from_bbox(self.mbb)))
else:
self.set_window(window)
self._facade()
def __len__(self):
"""Return the number of points. Use the expression 'len(pp)'.
Returns
-------
length : int
The number of points in the point pattern.
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[1, 3], [4, 5], [0,0]]
>>> pp = PointPattern(points)
>>> len(pp)
3
"""
return len(self.df)
def __contains__(self, n):
"""Return True if n is a point (a tuple of coordinates), False otherwise.
Use the expression 'n in pp'.
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[1, 3], [4, 5], [0,0]]
>>> pp = PointPattern(points)
>>> [1, 3] in pp
True
"""
name = self.df.columns.values.tolist()
return ((self.df[name[0]] == n[0]) & (self.df[name[1]] == n[1])).any()
def set_window(self, window):
try:
self._window = window
except:
print("not a valid Window object")
def get_window(self):
"""
Bounding geometry for the point pattern
:class:`.window.Window`
"""
if not hasattr(self, '_window') or self._window is None:
# use bbox as window
self.set_window(as_window(poly_from_bbox(self.mbb)))
return self._window
window = property(get_window, set_window)
def summary(self):
'''
Description of the point pattern.
'''
print('Point Pattern')
print("{} points".format(self.n))
print("Bounding rectangle [({},{}), ({},{})]".format(*self.mbb))
print("Area of window: {}".format(self.window.area))
print("Intensity estimate for window: {}".format(self.lambda_window))
print(self.head())
def add_marks(self, marks, mark_names=None):
if mark_names is None:
nm = range(len(marks))
mark_names = ["mark_{}".format(self._n_marks+1+j) for j in nm]
for name, mark in zip(mark_names, marks):
self.df[name] = mark
self._n_marks += 1
def plot(self, window=False, title="Point Pattern", hull=False,
get_ax=False):
"""
Plot function for a point pattern.
Parameters
----------
window : boolean
If window is True, plot window of the point
pattern. If not, don't plot window.
title : string
Name of the figure.
hull : boolean
If hull is True, plot convex hull of the point
pattern. If not, don't plot convex hull.
get_ax : boolean
If get_ax is True, return the current plot ax.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Current plot ax. Only return it when get_ax is True.
"""
fig, ax = plt.subplots()
plt.plot(self.df[self._x], self.df[self._y], '.')
# plt.scatter(self.df[self._x], self.df[self._y])
plt.title(title)
if window:
patches = []
for part in self.window.parts:
p = Polygon(np.asarray(part))
patches.append(p)
ax.add_collection(PatchCollection(patches, facecolor='w',
edgecolor='k', alpha=0.3))
if hull:
patches = []
p = Polygon(self.hull)
patches.append(p)
ax.add_collection(PatchCollection(patches, facecolor='w',
edgecolor='k', alpha=0.3))
# plt.plot(x, y, '.')
if get_ax:
return ax
def _mbb(self):
"""
Minimum bounding box
"""
mins = self.points.min(axis=0)
maxs = self.points.max(axis=0)
return np.hstack((mins, maxs))
mbb = cached_property(_mbb)
def _mbb_area(self):
"""
Area of minimum bounding box
"""
return np.product(self.mbb[[2, 3]]-self.mbb[[0, 1]])
mbb_area = cached_property(_mbb_area)
def _n(self):
"""
Number of points
"""
return self.points.shape[0]
n = cached_property(_n)
def _lambda_mbb(self):
"""
Intensity based on minimum bounding box
"""
return self.n * 1. / self.mbb_area
lambda_mbb = cached_property(_lambda_mbb)
def _hull(self):
"""
Points defining convex hull in counterclockwise order
"""
return hull(self.points)
hull = cached_property(_hull)
def _lambda_window(self):
"""
Intensity estimate based on area of window
The intensity of a point process at point :math:`s_j` can be defined
as:
.. math::
\\lambda(s_j) = \\lim \\limits_{|\\mathbf{A}s_j|
\\to 0} \\left \\{ \\frac{E(Y(\mathbf{A}s_j)}{|\mathbf{A}s_j|}
\\right \\}
where :math:`\\mathbf{A}s_j` is a small region surrounding location
:math:`s_j` with area :math:`|\\mathbf{A}s_j|`, and
:math:`E(Y(\\mathbf{A}s_j))` is the expected number of event points in
:math:`\\mathbf{A}s_j`.
The intensity is the mean number of event points per unit of area at
point :math:`s_j`.
"""
return self.n / self.window.area
lambda_window = cached_property(_lambda_window)
def _hull_area(self):
"""
Area of convex hull
"""
h = self.hull
if not np.alltrue(h[0] == h[-1]):
# not in closed cartographic form
h = np.vstack((h, h[0]))
s = h[:-1, 0] * h[1:, 1] - h[1:, 0] * h[:-1, 1]
return s.sum() / 2.
hull_area = cached_property(_hull_area)
def _lambda_hull(self):
"""
Intensity based on convex hull
"""
return self.n * 1. / self.hull_area
lambda_hull = cached_property(_lambda_hull)
def _build_tree(self):
return KDTree(self.points)
tree = cached_property(_build_tree)
def knn(self, k=1):
"""
Find k nearest neighbors for each point in the pattern
Parameters
----------
k: int
number of nearest neighbors to find
Returns
-------
nn: array (n x k)
row i column j contains the id for i's jth nearest neighbor
nnd: array(n x k)
row i column j contains the distance between i and its jth
nearest neighbor
"""
if k < 1:
raise ValueError('k must be at least 1')
nn = self.tree.query(self.tree.data, k=k+1)
return nn[1][:, 1:], nn[0][:, 1:]
def _nn_sum(self):
"""
Nearest neighbor distances
"""
ids, nnd = self.knn(1)
return nnd
nnd = cached_property(_nn_sum) # nearest neighbor distances
def _min_nnd(self):
"""
Min nearest neighbor distance
"""
return self.nnd.min()
min_nnd = cached_property(_min_nnd)
def _max_nnd(self):
"""
Max nearest neighbor distance
"""
return self.nnd.max()
max_nnd = cached_property(_max_nnd)
def _mean_nnd(self):
"""
Mean nearest neighbor distance
"""
return self.nnd.mean()
mean_nnd = cached_property(_mean_nnd)
def find_pairs(self, r):
"""
Find all pairs of points in the pattern that are within r units of each
other
Parameters
----------
r: float
diameter of pair circle
Returns
-------
s: set
pairs of points within r units of each other
"""
return self.tree.query_pairs(r)
def knn_other(self, other, k=1):
"""
Find k nearest neighbors in the pattern for each point in other
Parameters
----------
other: :class:`PointPattern`
k: int
number of nearest neighbors to find
Returns
-------
nn: array (n x k)
row i column j contains the id for i's jth nearest neighbor
nnd: array(n x k)
row i column j contains the distance between i and its jth
nearest neighbor
"""
if k < 1:
raise ValueError('k must be at least 1')
try:
nn = self.tree.query(other.points, k=k)
except:
nn = self.tree.query(other, k=k)
return nn[1], nn[0]
def explode(self, mark):
"""
Explode a marked point pattern into a sequence of individual point
patterns. If the mark has k unique values, then the sequence will be of
length k.
Parameters
----------
mark: string
The label of the mark to use for the subsetting
Returns
-------
pps: list
sequence of :class:`PointPattern` instances
"""
uv = np.unique(self.df[mark])
pps = [self.df[self.df[mark] == v] for v in uv]
names = self.df.columns.values.tolist()
cnames = self.coord_names
return[PointPattern(pp, names=names, coord_names=cnames) for pp in pps]
def unique(self):
""" Remove duplicate points in the point pattern.
Two points in a point pattern are deemed to be identical if their
coordinates are the same, and their marks are the same (if any)
Returns
-------
pp: list
A deduplicated :class:`PointPattern` instance
Examples
--------
>>> from pointpats import PointPattern
>>> points = [[1.2, 2.1], [1.2, 2.1], [0, 1], [1, 2]]
>>> pp = PointPattern(points)
>>> pp.unique().df
x y
0 1.2 2.1
2 0.0 1.0
3 1.0 2.0
"""
names = self.df.columns.values.tolist()
coord_names = self.coord_names
window = self.set_window
unique_df = self.df.drop_duplicates()
return PointPattern(unique_df, names=names, coord_names=coord_names,
window=window)
def superimpose(self, point_pattern):
"""Returns a superimposed point pattern.
Parameters
----------
point_pattern:
:class:`PointPattern` instance
Returns
-------
superimposed :
:class:`PointPattern` instance
Examples
--------
>>> from pointpats import PointPattern
>>> points1 = [[1, 3], [4, 5], [0, 0]]
>>> points2 = [[5, 6], [1, 4], [0, 0]]
>>> pp1 = PointPattern(points1)
>>> pp2 = PointPattern(points2)
>>> pp1.superimpose(pp2).points
x y
0 1 3
1 4 5
2 0 0
0 5 6
1 1 4
"""
names_pp1 = self.df.columns.values.tolist()
cnames_pp1 = self.coord_names
names_pp2 = point_pattern.df.columns.values.tolist()
cnames_pp2 = point_pattern.coord_names
if names_pp1 != names_pp2 or cnames_pp1 != cnames_pp2:
raise TypeError('Both point patterns should have similar\
attributes and spatial coordinates ')
pp = pd.concat((self.df, point_pattern.df))
pp = pp.drop_duplicates()
return PointPattern(pp, names=names_pp1, coord_names=cnames_pp1)
def flip_coordinates(self):
""" Flips the coordinates of a point pattern.
Doesn't change the structure of data frame. This function swaps
`_x` and `_y` variables, which are used to represent coordinates.
"""
self._x, self._y = self._y, self._x
# Pandas facade
def _facade(self):
self.head = self.df.head
self.tail = self.df.tail
|
the-stack_106_20905
|
#!/usr/bin/env python3
import os
import sys
import tempfile
import unittest
from subvol_utils import Subvol
from .temp_subvolumes import TempSubvolumes
class SubvolTestCase(unittest.TestCase):
'''
NB: The test here is partially redundant with demo_sendstreams, but
coverage easier to manage when there's a clean, separate unit test.
'''
def setUp(self):
self.temp_subvols = TempSubvolumes(sys.argv[0])
# This is not a great pattern because the temporary directory or
# temporary subvolumes will not get exception information in
# __exit__. However, this avoids breaking the abstraction barriers
# that e.g. overloading `TestCase.run` would violate.
self.temp_subvols.__enter__()
self.addCleanup(self.temp_subvols.__exit__, None, None, None)
def test_create_and_snapshot_and_already_exists(self):
p = self.temp_subvols.create('parent')
p2 = Subvol(p.path(), already_exists=True)
self.assertEqual(p.path(), p.path())
c = self.temp_subvols.snapshot(p2, 'child')
def test_does_not_exist(self):
with tempfile.TemporaryDirectory() as td:
with self.assertRaisesRegex(AssertionError, 'No btrfs subvol'):
Subvol(td, already_exists=True)
sv = Subvol(td)
with self.assertRaisesRegex(AssertionError, 'exists is False'):
sv.run_as_root(['true'])
def test_path(self):
# We are only going to do path manipulations in this test.
sv = Subvol('/subvol/need/not/exist')
for bad_path in ['..', 'a/../../b/c/d', '../c/d/e']:
with self.assertRaisesRegex(AssertionError, 'outside the subvol'):
sv.path(bad_path)
self.assertEqual(sv.path('a/b'), sv.path('/a/b/'))
self.assertEqual(b'a/b', os.path.relpath(sv.path('a/b'), sv.path()))
self.assertTrue(not sv.path('.').endswith(b'/.'))
def test_mark_readonly_and_get_sendstream(self):
sv = self.temp_subvols.create('subvol')
sv.run_as_root(['touch', sv.path('abracadabra')])
self.assertIn(b'abracadabra', sv.mark_readonly_and_get_sendstream())
|
the-stack_106_20906
|
# model settings
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=6,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=6,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=6,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
]),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
the-stack_106_20907
|
CHANNEL = "#bugbyte-ita"
BOTNAME = "CovidBot"
IRC_SERVER_ADDRESS = "irc.freenode.net"
HTTP_REQUEST_TIMEOUT = 20 # timeout in seconds
# https://console.cloud.google.com/apis/credentials
YOUTUBE_KEY = ''
# https://programmablesearchengine.google.com/
SEARCH_ENGINE = ''
# https://newsapi.org/register
NEWSAPI_KEY = ''
# https://openweathermap.org/api
OPENWEATHER_KEY = ''
WOLFRAM_KEY = ''
TELEGRAM_TOKEN = ''
USE_LOCAL_CHATBOT = False
CLEVERBOT_KEY = ''
CHATBOT_KEY = ''
AUTO_SPEAK = False
AUTO_SPEAK_PROBABILITY = 0.1
ENABLE_MINIFLUX = False
MINIFLUX_URL = ''
MINIFLUX_USER = ''
MINIFLUX_PSW = ''
CHESSENGINE_PATH = ''
TWITTER_CONSUMER_KEY=''
TWITTER_CONSUMER_SECRET=''
TWITTER_ACCESS_TOKEN_KEY=''
TWITTER_ACCESS_TOKEN_SECRET=''
|
the-stack_106_20908
|
#! /usr/bin/python
import ck.kernel as ck
import copy
import re
import argparse
# Batch size iteration parameters.
bs={
'start':1,
'stop':1,
'step':1,
'default':1
}
# Number of statistical repetitions.
num_repetitions=3
def do(i, arg):
# Detect basic platform info.
ii={'action':'detect',
'module_uoa':'platform',
'out':'out'}
r=ck.access(ii)
if r['return']>0: return r
# Host and target OS params.
hos=r['host_os_uoa']
hosd=r['host_os_dict']
tos=r['os_uoa']
tosd=r['os_dict']
tdid=r['device_id']
# Program and command.
program='caffe-time'
cmd_key='default'
tp='opencl'
if tp=='opencl' or tp=='cuda':
program=program+'-'+tp
# Load Caffe program meta and desc to check deps.
ii={'action':'load',
'module_uoa':'program',
'data_uoa':program}
rx=ck.access(ii)
if rx['return']>0: return rx
mm=rx['dict']
# Get compile-time and run-time deps.
cdeps=mm.get('compile_deps',{})
rdeps=mm.get('run_deps',{})
# Merge rdeps with cdeps for setting up the pipeline (which uses
# common deps), but tag them as "for_run_time".
for k in rdeps:
cdeps[k]=rdeps[k]
cdeps[k]['for_run_time']='yes'
# Caffe libs.
depl=copy.deepcopy(cdeps['lib-caffe'])
if (arg.tos is not None) and (arg.did is not None):
tos=arg.tos
tdid=arg.did
ii={'action':'resolve',
'module_uoa':'env',
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'out':'con',
'deps':{'lib-caffe':copy.deepcopy(depl)}
}
r=ck.access(ii)
if r['return']>0: return r
udepl=r['deps']['lib-caffe'].get('choices',[]) # All UOAs of env for Caffe libs.
if len(udepl)==0:
return {'return':1, 'error':'no installed Caffe libs'}
# Caffe models.
depm=copy.deepcopy(cdeps['caffemodel'])
ii={'action':'resolve',
'module_uoa':'env',
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'out':'con',
'deps':{'caffemodel':copy.deepcopy(depm)}
}
r=ck.access(ii)
if r['return']>0: return r
udepm=r['deps']['caffemodel'].get('choices',[]) # All UOAs of env for Caffe models.
if len(udepm)==0:
return {'return':1, 'error':'no installed Caffe models'}
# Prepare pipeline.
cdeps['lib-caffe']['uoa']=udepl[0]
cdeps['caffemodel']['uoa']=udepm[0]
ii={'action':'pipeline',
'prepare':'yes',
'dependencies':cdeps,
'module_uoa':'program',
'data_uoa':program,
'cmd_key':cmd_key,
'target_os':tos,
'device_id':tdid,
'dvdt_prof':'yes',
'env':{
'CK_CAFFE_SKIP_BACKWARD':1
},
'no_state_check':'yes',
'no_compiler_description':'yes',
'skip_calibration':'yes',
'cpu_freq':'max',
'gpu_freq':'max',
'flags':'-O3',
'speed':'no',
'energy':'no',
'skip_print_timers':'yes',
'skip_file_print':'yes',
'out':'con'
}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
ready=r.get('ready','')
if ready!='yes':
return {'return':11, 'error':'pipeline not ready'}
state=r['state']
tmp_dir=state['tmp_dir']
# Remember resolved deps for this benchmarking session.
xcdeps=r.get('dependencies',{})
# Clean pipeline.
if 'ready' in r: del(r['ready'])
if 'fail' in r: del(r['fail'])
if 'return' in r: del(r['return'])
pipeline=copy.deepcopy(r)
lib_tags_to_skip = [
"caffe",
"32bits",
"bvlc",
"host-os-linux-32",
"lib",
"target-os-linux-32",
"v0", "v0.0","v0.0.0","v0.0.0.0","vopencl"
]
# For each Caffe lib.*******************************************************
for lib_uoa in udepl:
# Load Caffe lib.
ii={'action':'load',
'module_uoa':'env',
'data_uoa':lib_uoa}
r=ck.access(ii)
if r['return']>0: return r
# Get the tags from e.g. 'BVLC Caffe framework (libdnn,viennacl)'
lib_tags = ""
for t in r['dict']['tags']:
if t not in lib_tags_to_skip:
lib_tags += t + '-'
lib_tags = lib_tags[:-1]
skip_compile='no'
# For each Caffe model.*************************************************
for model_uoa in udepm:
# Load Caffe model.
ii={'action':'load',
'module_uoa':'env',
'data_uoa':model_uoa}
r=ck.access(ii)
if r['return']>0: return r
# Get the tags from e.g. 'Caffe model (net and weights) (deepscale, squeezenet, 1.1)'
model_name=r['data_name']
model_tags = re.match('Caffe model \(net and weights\) \((?P<tags>.*)\)', model_name)
model_tags = model_tags.group('tags').replace(' ', '').replace(',', '-')
# Skip some models with "in [..]" or "not in [..]".
if model_tags not in [ 'bvlc-alexnet', 'bvlc-googlenet', 'deepscale-squeezenet-1.1', 'deepscale-squeezenet-1.0' ]: continue
record_repo='local'
record_uoa='dvdt-prof-'+model_tags+'-'+lib_tags
# Prepare pipeline.
ck.out('---------------------------------------------------------------------------------------')
ck.out('%s - %s' % (lib_tags, lib_uoa))
ck.out('%s - %s' % (model_name, model_uoa))
ck.out('Experiment - %s:%s' % (record_repo, record_uoa))
# Prepare autotuning input.
cpipeline=copy.deepcopy(pipeline)
# Reset deps and change UOA.
new_deps={'lib-caffe':copy.deepcopy(depl),
'caffemodel':copy.deepcopy(depm)}
new_deps['lib-caffe']['uoa']=lib_uoa
new_deps['caffemodel']['uoa']=model_uoa
jj={'action':'resolve',
'module_uoa':'env',
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'deps':new_deps}
r=ck.access(jj)
if r['return']>0: return r
cpipeline['dependencies'].update(new_deps)
cpipeline['no_clean']=skip_compile
cpipeline['no_compile']=skip_compile
cpipeline['cmd_key']=cmd_key
ii={'action':'autotune',
'module_uoa':'pipeline',
'data_uoa':'program',
'choices_order':[
[
'##choices#env#CK_CAFFE_BATCH_SIZE'
]
],
'choices_selection':[
{'type':'loop', 'start':bs['start'], 'stop':bs['stop'], 'step':bs['step'], 'default':bs['default']}
],
'features_keys_to_process':['##choices#*'],
'process_multi_keys':['##choices#env#CK_CAFFE_BATCH_SIZE'],
'iterations':-1,
'repetitions':num_repetitions,
'record':'yes',
'record_failed':'yes',
'record_params':{
'search_point_by_features':'yes'
},
'record_repo':record_repo,
'record_uoa':record_uoa,
'tags':[ 'dvdt-prof', program, model_tags, lib_tags ],
'pipeline':cpipeline,
'out':'con'}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
skip_compile='yes'
return {'return':0}
parser = argparse.ArgumentParser(description='Pipeline')
parser.add_argument("--target_os", action="store", dest="tos")
parser.add_argument("--device_id", action="store", dest="did")
myarg=parser.parse_args()
r=do({}, myarg)
if r['return']>0: ck.err(r)
|
the-stack_106_20909
|
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import os
import argparse
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
import csv
import time
import datetime
# Useful if you want to perform stemming.
import nltk
nltk.download("punkt")
tokenizer = nltk.RegexpTokenizer(r"\w+")
porter_stemmer = nltk.stem.PorterStemmer()
snowball_stemmer = nltk.stem.snowball.SnowballStemmer("english")
DEBUG = False
categories_file_name = r'/workspace/datasets/product_data/categories/categories_0001_abcat0010000_to_pcmcat99300050000.xml'
queries_file_name = r'/workspace/datasets/train.csv'
output_file_name = r'/workspace/datasets/labeled_query_data.txt'
parser = argparse.ArgumentParser(description='Process arguments.')
general = parser.add_argument_group("general")
general.add_argument("--min_queries", default=1, help="The minimum number of queries per category label (default is 1)")
general.add_argument("--query_normalize", default="std_tokenizer_porter_stemmer", help="Query normalizer (std_tokenizer_porter_stemmer, daniel_tokenizer_snowball_stemmer")
general.add_argument("--output", default=output_file_name, help="the file to output to")
args = parser.parse_args()
output_file_name = args.output
min_queries = int(args.min_queries)
query_normalize = args.query_normalize
print(f"min_queries={min_queries}, query_normalize={query_normalize}, output_file_name={output_file_name}")
# The root category, named Best Buy with id cat00000, doesn't have a parent.
root_category_id = 'cat00000'
tree = ET.parse(categories_file_name)
root = tree.getroot()
# Parse the category XML file to map each category id to its parent category id in a dataframe.
time_start = time.time()
categories = []
parents = []
for child in root:
id = child.find('id').text
cat_path = child.find('path')
cat_path_ids = [cat.find('id').text for cat in cat_path]
leaf_id = cat_path_ids[-1]
if leaf_id != root_category_id:
categories.append(leaf_id)
parents.append(cat_path_ids[-2])
print(f"Processed {len(categories)} categories from {categories_file_name} in {datetime.timedelta(seconds=time.time() - time_start)}")
parents_df = pd.DataFrame(list(zip(categories, parents)), columns =['category', 'parent'])
# IMPLEMENTED: Convert queries to lowercase, and optionally implement other normalization, like stemming.
def normalize_query(query_text, analyzer = "std_tokenizer_porter_stemmer"):
if analyzer == 'std_tokenizer_porter_stemmer':
normalized_query = " ".join([porter_stemmer.stem(token.lower()) for token in tokenizer.tokenize(query_text)])
elif analyzer == 'daniel_tokenizer_snowball_stemmer':
# Daniel's transform from week #3
ret = query_text.lower()
ret = ''.join(c for c in ret if c.isalpha() or c.isnumeric() or c=='-' or c==' ' or c =='.')
normalized_query = ' '.join(map(snowball_stemmer.stem, ret.split(' ')))
else:
raise Exception(f"Unknown analyzer \"{analyzer}\"")
if DEBUG: print(f"Query normalization: {query_text} --> {normalized_query}")
return normalized_query
# IMPLEMENTED: Roll up categories to ancestors to satisfy the minimum number of queries per category.
MAX_LOOP_COUNT = 10
QUERY_COUNT_THRESHOLD = 100
def prune_categories(queries_file_name, categories, parents_df, max_loop_count = -1, query_count_threshold = QUERY_COUNT_THRESHOLD):
print(f"Pruning {queries_file_name} with max_loop_count={max_loop_count}, query_count_threshold={query_count_threshold}")
time_start = time.time()
# Load the training query/category data
print(f"Loading the train data from {queries_file_name}...")
df = pd.read_csv(queries_file_name)[['category', 'query']]
# Keep queries with known categories only
df = df[df['category'].isin(categories)]
# Initialize the LABEL to the category as read from train.csv
df['label'] = df['category']
# Rollup audit column
df['audit'] = df['label']
loop_count = 0
early_exit = True
# Use max_loop_count = -1 to prevent early exit
if max_loop_count < 0:
print(f"WARN: No safety max loop count!")
while max_loop_count < 0 or loop_count < max_loop_count:
loop_count += 1
print(f"Loop {loop_count}:")
# Get (first time in the loop)/update (subsequent loops) label's parent
df = df.merge(parents_df, 'left', left_on = 'label', right_on = 'category', suffixes = (None, "_merged_right"), indicator = True)
assert df[(df['_merge'] != 'both') & (df['label'] != root_category_id)].shape[0] == 0
df.drop(columns=['_merge', 'category_merged_right'], inplace = True)
# Group by label to count the number of queries per label so far
grouped_by_label_df = df[['label', 'query']].groupby(['label'], as_index = False).count()
grouped_by_label_df.columns = ['label', 'query_count']
df = df.merge(grouped_by_label_df, 'left', 'label', indicator = True)
assert df[df['_merge'] != 'both'].shape[0] == 0
df.drop(columns=['_merge'], inplace = True)
print(f"\t{grouped_by_label_df.shape[0]} unique categories")
# Identify the labels whose query count < threshold
labels_under_threshold_df = grouped_by_label_df[grouped_by_label_df['query_count'] < query_count_threshold]
# Break is all labels' query counts are > threshold
if labels_under_threshold_df.shape[0] == 0:
print(f"\tNo label left < {query_count_threshold}")
print(f"\tEnded with {grouped_by_label_df.shape[0]} categories")
early_exit = False
break
else:
print(f"\t{labels_under_threshold_df.shape[0]} labels' query counts are < {query_count_threshold}")
df = df.merge(labels_under_threshold_df, 'left', 'label', suffixes = (None, "_under_threshold"), indicator = True)
# Roll up: Set the label to the parent when the category's query count < threshold
print(f"\t\tRolling up...")
df['label'] = df.apply(lambda row: row['parent'] if (row['_merge'] == 'both' and not pd.isnull(row['parent'])) else row['label'], axis = 1)
# 'audit' is an audit column to show the successive rollup(s)
print(f"\t\tAuditing...")
df['audit'] = df.apply(lambda row: row['parent'] + " > " + row['audit'] if (row['_merge'] == 'both' and not pd.isnull(row['parent'])) else row['audit'], axis = 1)
# Reset
df.drop(columns=['parent', '_merge', 'query_count', 'query_count_under_threshold'], inplace = True)
if early_exit:
print(f"WARN: Early exist after {loop_count} loops!")
print(f"Processed {df.shape[0]} queries in {datetime.timedelta(seconds=time.time() - time_start)}")
return df
pruned_df = prune_categories(queries_file_name, categories, parents_df, query_count_threshold = min_queries)
# [END] IMPLEMENTING: Roll up categories to ancestors to satisfy the minimum number of queries per category.
# Create labels in fastText format.
pruned_df['fasttext_label'] = '__label__' + pruned_df['label']
# Normalize the queries
print(f"Normalizing {pruned_df.shape[0]} queries...")
time_start = time.time()
pruned_df['normalized_query'] = pruned_df.apply(lambda row: normalize_query(row['query'], analyzer = query_normalize), axis = 1)
print(f"... in {datetime.timedelta(seconds=time.time() - time_start)}")
# Output labeled query data as a space-separated file, making sure that every category is in the taxonomy.
# df = df[df['category'].isin(categories)]
pruned_df['output'] = pruned_df['fasttext_label'] + ' ' + pruned_df['normalized_query']
print(f"Writing train data to {output_file_name}...")
pruned_df[['output']].to_csv(output_file_name, header=False, sep='|', escapechar='\\', quoting=csv.QUOTE_NONE, index=False)
print(f"... in {datetime.timedelta(seconds=time.time() - time_start)}")
|
the-stack_106_20910
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import db
from nova import exception
from nova import quota
authorize = extensions.extension_authorizer('compute', 'quota_classes')
class QuotaClassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('quota_class_set',
selector='quota_class_set')
root.set('id')
for resource in quota.quota_resources:
elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource
return xmlutil.MasterTemplate(root, 1)
class QuotaClassSetsController(object):
def _format_quota_set(self, quota_class, quota_set):
"""Convert the quota object to a result dict"""
result = dict(id=str(quota_class))
for resource in quota.quota_resources:
result[resource] = quota_set[resource]
return dict(quota_class_set=result)
@wsgi.serializers(xml=QuotaClassTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
db.sqlalchemy.api.authorize_quota_class_context(context, id)
return self._format_quota_set(id,
quota.get_class_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
@wsgi.serializers(xml=QuotaClassTemplate)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
quota_class = id
for key in body['quota_class_set'].keys():
if key in quota.quota_resources:
value = int(body['quota_class_set'][key])
try:
db.quota_class_update(context, quota_class, key, value)
except exception.QuotaClassNotFound:
db.quota_class_create(context, quota_class, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_class_set': quota.get_class_quotas(context,
quota_class)}
class Quota_classes(extensions.ExtensionDescriptor):
"""Quota classes management support"""
name = "QuotaClasses"
alias = "os-quota-class-sets"
namespace = ("http://docs.openstack.org/compute/ext/"
"quota-classes-sets/api/v1.1")
updated = "2012-03-12T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-class-sets',
QuotaClassSetsController())
resources.append(res)
return resources
|
the-stack_106_20911
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .catalog_item import CatalogItem
class USqlDatabase(CatalogItem):
"""A Data Lake Analytics catalog U-SQL database item.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param name: the name of the database.
:type name: str
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'name': {'key': 'databaseName', 'type': 'str'},
}
def __init__(self, compute_account_name=None, version=None, name=None):
super(USqlDatabase, self).__init__(compute_account_name=compute_account_name, version=version)
self.name = name
|
the-stack_106_20912
|
from collections import namedtuple
import numpy as np
from jesse.helpers import get_candle_source
from jesse.helpers import slice_candles
from jesse.indicators.ma import ma
MACDEXT = namedtuple('MACDEXT', ['macd', 'signal', 'hist'])
def macdext(candles: np.ndarray, fast_period: int = 12, fast_matype: int = 0, slow_period: int = 26,
slow_matype: int = 0, signal_period: int = 9, signal_matype: int = 0, source_type: str = "close",
sequential: bool = False) -> MACDEXT:
"""
MACDEXT - MACD with controllable MA type
:param candles: np.ndarray
:param fast_period: int - default: 12
:param fast_matype: int - default: 0
:param slow_period: int - default: 26
:param slow_matype: int - default: 0
:param signal_period: int - default: 9
:param signal_matype: int - default: 0
:param source_type: str - default: "close"
:param sequential: bool - default: False
:return: MACDEXT(macd, signal, hist)
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
macd = ma(source, period=fast_period, matype=fast_matype, sequential=True) - ma(source, period=slow_period, matype=slow_matype, sequential=True)
macdsignal = ma(macd, period=signal_period, matype=signal_matype, sequential=True)
macdhist = macd - macdsignal
if sequential:
return MACDEXT(macd, macdsignal, macdhist)
else:
return MACDEXT(macd[-1], macdsignal[-1], macdhist[-1])
|
the-stack_106_20917
|
try:
from setuptools import setup
except ImportError:
from distutils import setup
description="transparent support for multiple templating languages in Django"
long_description="""\
Smorgasbord makes it possible to use multiple template languages in Django,
even for 3rd party applications that don't use your choice of template
language natively.
Currently supported languages are:
* mako
* jinja2
* cheetah
* STML
"""
VERSION='0.4'
setup(author="Jacob Smullyan",
author_email='[email protected]',
description=description,
long_description=long_description,
license="BSD",
platforms='OS Independent',
name="django-smorgasbord",
url="http://code.google.com/p/smorgasbord/",
classifiers=["Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML',
],
version=VERSION,
keywords="django jinja2 skunkweb STML cheetah mako templating satimol",
packages=("smorgasbord", "smorgasbord.languages"),
package_dir={'' : '.'}
)
|
the-stack_106_20920
|
import serial
import random
import time
import sys
import signal
"""
ANTES DE EJECUTAR ESTE SCRIPT
Instalar pyserial. En la consola de anaconda (Anaconda prompt) ejectutar:
conda install pyserial
EJECUTAR ESTE SCRIPT
En la consola de anaconda (Anaconda prompt) ejectutar:
cd path/to/script
python data_gen_serial.py
-------------------------------------------------------------------------------
INFO de pyserial: http://pyserial.readthedocs.io/en/latest/shortintro.html
possible timeout values:
1. None: wait forever, block call
2. 0: non-blocking mode, return immediately
3. x, x is bigger than 0, float allowed, timeout block call
configure the serial connections (the parameters differs on the device you are
connecting to)
ser = serial.Serial(
port = "/dev/ttyUSB0",
baudrate = 57600,
parity = serial.PARITY_ODD,
stopbits = serial.STOPBITS_TWO,
bytesize = serial.SEVENBITS,
timeout = 1
)
"""
# Variables globales con valores iniciales ------------------------------------
global EAN_12
global EAN_13
global EAN_13_tagged
global ser
ser = serial.Serial()
def signal_handler(sig, frame):
print('Se pulsó Ctrl+C!, saliendo del programa!')
try:
ser.close()
except:
pass
else:
print('Se cerró el puerto OK.')
sys.exit(0)
# Funciones -------------------------------------------------------------------
def generar_EAN12():
global EAN_12
EAN_12 = ""
for i in range(12):
EAN_12 = EAN_12 + str(random.randint(0,9))
#print(str(len(EAN_12))+'\r\n')
return
def agregar_checksum(valido = False):
global EAN_12
global EAN_13
if valido == False:
EAN_13 = EAN_12 + 'F'
return
else:
checksum = 0
for i, digit in enumerate(EAN_12):
checksum += int(digit) * 3 if (i % 2 == 0) else int(digit)
agregar = str((10 - (checksum % 10)) % 10)
#print(':'+agregar+':'+'\r\n')
EAN_13 = EAN_12 + agregar
return
def agregar_tags(tag_inicial,tag_final):
global EAN_13
global EAN_13_tagged
EAN_13_tagged = tag_inicial+EAN_13+tag_final
return
def sendData( tag_inicial,tag_final,valido = False ):
#print( "Dato enviado --------------------------------------------\n" )
generar_EAN12()
agregar_checksum(valido)
agregar_tags(tag_inicial,tag_final)
print ("Enviando: ", EAN_13_tagged)
ser.write(str(EAN_13_tagged).encode())
time.sleep(0.1)
try:
#print ("Leyendo ...")
readOut = ser.read(ser.in_waiting).decode('ascii')
#time.sleep(1)
print ("Recibido: ", readOut)
except:
pass
#print( "Dato enviado: " + EAN_13_tagged + '' )
ser.flushInput() # flush input buffer, discarding all its contents
ser.flushOutput() # flush output buffer, aborting current output
# and discard all that is in buffer
if (readOut[0:-1] == EAN_13_tagged):
estado = "Ok"
else:
estado = "Falla"
print ("Estatus: "+estado)
return
# comando help: Imprime la lista de comandos
def cmd_h():
print( "Comandos disponibles -----------------------------------------------" )
print( " 'h' (help) imprime esta lista de comandos." )
print( " 'q' (quit) Salir del programa." )
print( " '1' Enviar EAN-13 tag inicial erroneo." )
print( " '2' Enviar EAN-13 tag final erroneo." )
print( " '3' Enviar EAN-13 con checksum mal." )
print( " '4' Enviar EAN-13 correcto." )
print( " 'r' Enviar N EAN-13 validos." )
print( "--------------------------------------------------------------------\n" )
return
# comando 1: Tag inicial erroneo
def cmd_1():
sendData( '<',')',True )
return
# comando 1: Tag final erroneo
def cmd_2():
sendData( '(','>',True )
return
# comando 3: Checksum erroneo
def cmd_3():
sendData( '(',')',False )
return
# comando 4: Todo OK
def cmd_4():
sendData( '(',')',True )
return
# comando r: Enviar un dato aleatorio del equipo con ID1.
def cmd_r():
for i in range(10):
sendData( '(',')',True )
return
# Inicializa y abre el puertos serie ------------------------------------------
def uart_main():
print( "\nconexión al puerto serie ----------------------------------------\n" )
print("Ingrese el puerto serie, ejemplos: /dev/ttyUSB0 , COM1")
print("O bien ingrese 'l' para /dev/ttyUSB0, o 'w' para COM9")
receive = input()
if receive == 'l':
ser.port = "/dev/ttyUSB0" # Puerto por defecto para Linux
else:
if receive == 'w':
ser.port = "COM9" # Puerto por defecto para Windows
else:
ser.port = receive
ser.baudrate = 115200
ser.bytesize = serial.EIGHTBITS # number of bits per bytes # SEVENBITS
ser.parity = serial.PARITY_NONE # set parity check: no parity # PARITY_ODD
ser.stopbits = serial.STOPBITS_ONE # number of stop bits # STOPBITS_TWO
#ser.timeout = None # block read
ser.timeout = 1 # non-block read
#ser.timeout = 2 # timeout block read
ser.xonxoff = False # disable software flow control
ser.rtscts = False # disable hardware (RTS/CTS) flow control
ser.dsrdtr = False # disable hardware (DSR/DTR) flow control
ser.writeTimeout = 2 # timeout for write
try:
ser.open()
except Exception as e:
print("Error abriendo puerto serie.\n" + str(e) + '\nFin de programa.')
exit()
# Si pudo abrir el puerto -----------------------------------------------------
if ser.isOpen():
print(ser.name + ' abierto.\n')
try:
ser.flushInput() # flush input buffer, discarding all its contents
ser.flushOutput() # flush output buffer, aborting current output
# and discard all that is in buffer
cmd_h() # Imprime la lista de comandos
# Ciclo infinito hasta comando exit (q) ---------------------------------
while True:
command = ""
# get keyboard input
# input = raw_input(">> ") # for Python 2
command = input(">> ") # for Python 3
if command == 'q':
print("Puerto cerrado. Se cierra el programa.")
ser.close()
exit()
elif command == 'h':
cmd_h()
elif command == '1':
cmd_1()
elif command == '2':
cmd_2()
elif command == '3':
cmd_3()
elif command == '4':
cmd_4()
elif command == 'r':
cmd_r()
else:
print("Comando no conocido.")
except Exception as e1:
print("error de comunicación." + str(e1))
else:
print("No se puede abrir el puerto serie.")
exit()
#%%
signal.signal(signal.SIGINT, signal_handler)
def main():
uart_main()
if __name__ == "__main__":
main()
|
the-stack_106_20921
|
"""Script to check the configuration file."""
import argparse
import logging
import os
from collections import OrderedDict, namedtuple
from glob import glob
from typing import Dict, List, Sequence
from unittest.mock import patch
import attr
import voluptuous as vol
from homeassistant import bootstrap, core, loader
from homeassistant.config import (
get_default_config_dir, CONF_CORE, CORE_CONFIG_SCHEMA,
CONF_PACKAGES, merge_packages_config, _format_config_error,
find_config_file, load_yaml_config_file,
extract_domain_configs, config_per_platform)
from homeassistant.util import yaml
from homeassistant.exceptions import HomeAssistantError
REQUIREMENTS = ('colorlog==4.0.2',)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS = {
'load': ("homeassistant.util.yaml.load_yaml", yaml.load_yaml),
'load*': ("homeassistant.config.load_yaml", yaml.load_yaml),
'secrets': ("homeassistant.util.yaml.secret_yaml", yaml.secret_yaml),
}
SILENCE = (
'homeassistant.scripts.check_config.yaml.clear_secret_cache',
)
PATCHES = {}
C_HEAD = 'bold'
ERROR_STR = 'General Errors'
def color(the_color, *args, reset=None):
"""Color helper."""
from colorlog.escape_codes import escape_codes, parse_colors
try:
if not args:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + ' '.join(args) + \
escape_codes[reset or 'reset']
except KeyError as k:
raise ValueError("Invalid color {} in {}".format(str(k), the_color))
def run(script_args: List) -> int:
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description="Check Home Assistant configuration.")
parser.add_argument(
'--script', choices=['check_config'])
parser.add_argument(
'-c', '--config',
default=get_default_config_dir(),
help="Directory that contains the Home Assistant configuration")
parser.add_argument(
'-i', '--info', nargs='?',
default=None, const='all',
help="Show a portion of the config")
parser.add_argument(
'-f', '--files',
action='store_true',
help="Show used configuration files")
parser.add_argument(
'-s', '--secrets',
action='store_true',
help="Show secret information")
args, unknown = parser.parse_known_args()
if unknown:
print(color('red', "Unknown arguments:", ', '.join(unknown)))
config_dir = os.path.join(os.getcwd(), args.config)
print(color('bold', "Testing configuration at", config_dir))
res = check(config_dir, args.secrets)
domain_info = []
if args.info:
domain_info = args.info.split(',')
if args.files:
print(color(C_HEAD, 'yaml files'), '(used /',
color('red', 'not used') + ')')
deps = os.path.join(config_dir, 'deps')
yaml_files = [f for f in glob(os.path.join(config_dir, '**/*.yaml'),
recursive=True)
if not f.startswith(deps)]
for yfn in sorted(yaml_files):
the_color = '' if yfn in res['yaml_files'] else 'red'
print(color(the_color, '-', yfn))
if res['except']:
print(color('bold_white', 'Failed config'))
for domain, config in res['except'].items():
domain_info.append(domain)
print(' ', color('bold_red', domain + ':'),
color('red', '', reset='red'))
dump_dict(config, reset='red')
print(color('reset'))
if domain_info:
if 'all' in domain_info:
print(color('bold_white', 'Successful config (all)'))
for domain, config in res['components'].items():
print(' ', color(C_HEAD, domain + ':'))
dump_dict(config)
else:
print(color('bold_white', 'Successful config (partial)'))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(' ', color(C_HEAD, domain + ':'))
dump_dict(res['components'].get(domain, None))
if args.secrets:
flatsecret = {}
for sfn, sdict in res['secret_cache'].items():
sss = []
for skey in sdict:
if skey in flatsecret:
_LOGGER.error('Duplicated secrets in files %s and %s',
flatsecret[skey], sfn)
flatsecret[skey] = sfn
sss.append(color('green', skey) if skey in res['secrets']
else skey)
print(color(C_HEAD, 'Secrets from', sfn + ':'), ', '.join(sss))
print(color(C_HEAD, 'Used Secrets:'))
for skey, sval in res['secrets'].items():
if sval is None:
print(' -', skey + ':', color('red', "not found"))
continue
print(' -', skey + ':', sval, color('cyan', '[from:', flatsecret
.get(skey, 'keyring') + ']'))
return len(res['except'])
def check(config_dir, secrets=False):
"""Perform a check by mocking hass load functions."""
logging.getLogger('homeassistant.loader').setLevel(logging.CRITICAL)
res = {
'yaml_files': OrderedDict(), # yaml_files loaded
'secrets': OrderedDict(), # secret cache and secrets loaded
'except': OrderedDict(), # exceptions raised (with config)
'components': None, # successful components
'secret_cache': None,
}
# pylint: disable=possibly-unused-variable
def mock_load(filename):
"""Mock hass.util.load_yaml to save config file names."""
res['yaml_files'][filename] = True
return MOCKS['load'][1](filename)
# pylint: disable=possibly-unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS['secrets'][1](ldr, node)
except HomeAssistantError:
val = None
res['secrets'][node.value] = val
return val
# Patches to skip functions
for sil in SILENCE:
PATCHES[sil] = patch(sil)
# Patches with local mock functions
for key, val in MOCKS.items():
if not secrets and key == 'secrets':
continue
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()['mock_' + key.replace('*', '')]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
if secrets:
# Ensure !secrets point to the patched function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml.secret_yaml)
try:
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
res['components'] = check_ha_config_file(hass)
res['secret_cache'] = OrderedDict(yaml.__SECRET_CACHE)
for err in res['components'].errors:
domain = err.domain or ERROR_STR
res['except'].setdefault(domain, []).append(err.message)
if err.config:
res['except'].setdefault(domain, []).append(err.config)
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("BURB")
print(color('red', 'Fatal error while loading config:'), str(err))
res['except'].setdefault(ERROR_STR, []).append(str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
if secrets:
# Ensure !secrets point to the original function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml.secret_yaml)
bootstrap.clear_secret_cache()
return res
def line_info(obj, **kwargs):
"""Display line config source."""
if hasattr(obj, '__config_file__'):
return color('cyan', "[source {}:{}]"
.format(obj.__config_file__, obj.__line__ or '?'),
**kwargs)
return '?'
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml.yaml.dump(config).
"""
def sort_dict_key(val):
"""Return the dict key for sorting."""
key = str(val[0]).lower()
return '0' if key == 'platform' else key
indent_str = indent_count * ' '
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + '-'
if isinstance(layer, Dict):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, (dict, list)):
print(indent_str, str(key) + ':', line_info(value, **kwargs))
dump_dict(value, indent_count + 2)
else:
print(indent_str, str(key) + ':', value)
indent_str = indent_count * ' '
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(' ', indent_str, i)
CheckConfigError = namedtuple(
'CheckConfigError', "message domain config")
@attr.s
class HomeAssistantConfig(OrderedDict):
"""Configuration result with errors attribute."""
errors = attr.ib(default=attr.Factory(list))
def add_error(self, message, domain=None, config=None):
"""Add a single error."""
self.errors.append(CheckConfigError(str(message), domain, config))
return self
def check_ha_config_file(hass):
"""Check if Home Assistant configuration file is valid."""
config_dir = hass.config.config_dir
result = HomeAssistantConfig()
def _pack_error(package, component, config, message):
"""Handle errors from packages: _log_pkg_error."""
message = "Package {} setup failed. Component {} {}".format(
package, component, message)
domain = 'homeassistant.packages.{}.{}'.format(package, component)
pack_config = core_config[CONF_PACKAGES].get(package, config)
result.add_error(message, domain, pack_config)
def _comp_error(ex, domain, config):
"""Handle errors from components: async_log_exception."""
result.add_error(
_format_config_error(ex, domain, config), domain, config)
# Load configuration.yaml
try:
config_path = find_config_file(config_dir)
if not config_path:
return result.add_error("File configuration.yaml not found.")
config = load_yaml_config_file(config_path)
except HomeAssistantError as err:
return result.add_error(
"Error loading {}: {}".format(config_path, err))
finally:
yaml.clear_secret_cache()
# Extract and validate core [homeassistant] config
try:
core_config = config.pop(CONF_CORE, {})
core_config = CORE_CONFIG_SCHEMA(core_config)
result[CONF_CORE] = core_config
except vol.Invalid as err:
result.add_error(err, CONF_CORE, core_config)
core_config = {}
# Merge packages
hass.loop.run_until_complete(merge_packages_config(
hass, config, core_config.get(CONF_PACKAGES, {}), _pack_error))
core_config.pop(CONF_PACKAGES, None)
# Filter out repeating config sections
components = set(key.split(' ')[0] for key in config.keys())
# Process and validate config
for domain in components:
try:
integration = hass.loop.run_until_complete(
loader.async_get_integration(hass, domain))
except loader.IntegrationNotFound:
result.add_error("Integration not found: {}".format(domain))
continue
try:
component = integration.get_component()
except ImportError:
result.add_error("Component not found: {}".format(domain))
continue
if hasattr(component, 'CONFIG_SCHEMA'):
try:
config = component.CONFIG_SCHEMA(config)
result[domain] = config[domain]
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
if (not hasattr(component, 'PLATFORM_SCHEMA') and
not hasattr(component, 'PLATFORM_SCHEMA_BASE')):
continue
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
if hasattr(component, 'PLATFORM_SCHEMA_BASE'):
p_validated = \
component.PLATFORM_SCHEMA_BASE( # type: ignore
p_config)
else:
p_validated = component.PLATFORM_SCHEMA( # type: ignore
p_config)
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
try:
p_integration = hass.loop.run_until_complete(
loader.async_get_integration(hass, p_name))
except loader.IntegrationNotFound:
result.add_error(
"Integration {} not found when trying to verify its {} "
"platform.".format(p_name, domain))
continue
try:
platform = p_integration.get_platform(domain)
except ImportError:
result.add_error(
"Platform not found: {}.{}".format(domain, p_name))
continue
# Validate platform specific schema
if hasattr(platform, 'PLATFORM_SCHEMA'):
try:
p_validated = platform.PLATFORM_SCHEMA(p_validated)
except vol.Invalid as ex:
_comp_error(
ex, '{}.{}'.format(domain, p_name), p_validated)
continue
platforms.append(p_validated)
# Remove config for current component and add validated config back in.
for filter_comp in extract_domain_configs(config, domain):
del config[filter_comp]
result[domain] = platforms
return result
|
the-stack_106_20922
|
import six
from django.conf import settings
from django.shortcuts import redirect, reverse
from django.contrib import messages
from social_django.middleware import SocialAuthExceptionMiddleware
from social_core.exceptions import NotAllowedToDisconnect, SocialAuthBaseException
class SocialLoginExceptionMiddleware(SocialAuthExceptionMiddleware):
"""
Overrides default exception middleware so we can redirect user to correct page when error happened
and display correct message.
"""
def process_exception(self, request, exception):
strategy = getattr(request, 'social_strategy', None)
if strategy is None or self.raise_exception(request, exception):
return
if isinstance(exception, SocialAuthBaseException):
backend = getattr(request, 'backend', None)
backend_name = getattr(backend, 'name', 'unknown-backend')
message = six.text_type(exception)
if isinstance(exception, NotAllowedToDisconnect):
message = 'Tidak bisa di disconnect karena {} satu-satunya metode login saat ini.'.format(
backend_name)
messages.info(request, message, extra_tags='danger')
if request.user.is_authenticated:
return redirect(reverse('account:settings'))
return redirect(settings.LOGIN_ERROR_URL)
|
the-stack_106_20924
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
'''
SegWit p2p test.
'''
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
# Note: we can reduce code by using SingleNodeConnCB (in master, not 0.12)
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.05
self.getdataset = set()
self.last_reject = None
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_pong(self, conn, message):
self.last_pong = message
def on_reject(self, conn, message):
self.last_reject = message
#print (message)
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_getdata != None
self.sync(test_function, timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
self.sync(test_function, timeout)
def wait_for_inv(self, expected_inv, timeout=60):
test_function = lambda: self.last_inv != expected_inv
self.sync(test_function, timeout)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_getdata = None
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
return
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_getdata = None
self.last_getheaders = None
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
return
def announce_block(self, block, use_header):
with mininode_lock:
self.last_getdata = None
if use_header:
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_block = None
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_reject.reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1"]))
# Start a node for testing IsStandard rules.
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
connect_nodes(self.nodes[0], 1)
# Disable segwit's bip9 parameter to simulate upgrading after activation.
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
connect_nodes(self.nodes[0], 2)
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=VB_TOP_BITS):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
print("\tVerifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
print("\tTesting non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
print("\tTesting behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_reject.reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
print("\tTesting witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
print("\tTesting witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
print("\tTesting witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
print("\tTesting extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-200000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
print("\tTesting maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
print("\tTesting maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
print("\tTesting witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_getdata.inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
print("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-100000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv(CInv(1, tx3.sha256))
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
print("\tTesting block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
# Rubixzcoin: Blocks with nVersion < VB_TOP_BITS are rejected
# block2 = self.build_next_block(nVersion=4)
# block2.solve()
# self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
# assert(self.test_node.last_getdata.inv[0].type == blocktype)
# self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
# Rubixzcoin: Blocks with nVersion < VB_TOP_BITS are rejected
block4 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
self.old_node.announce_block(block4, use_header=False)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)]
tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
print("\tTesting standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_reject.reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
print("\tTesting premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
print("\tTesting segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
print("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
print("\tTesting P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node, node_id):
print("\tTesting software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
stop_node(node, node_id)
self.nodes[node_id] = start_node(node_id, self.options.tmpdir, ["-debug"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(node, 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = node.getblockcount()
while height >= 0:
block_hash = node.getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
print("\tTesting sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
print("\tTesting getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# TODO: this duplicates some code from blocktools.py, would be nice
# to refactor.
# Check that default_witness_commitment is present.
block = CBlock()
witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
script = CScript([OP_RETURN, output_data])
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
print("\tTesting uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
print("\tTesting detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def test_reject_blocks(self):
print ("\tTesting rejection of block.nVersion < BIP9_TOP_BITS blocks")
block = self.build_next_block(nVersion=4)
block.solve()
resp = self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(resp, 'bad-version(0x00000004)')
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
print("\nStarting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
print("\nTesting behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
print("\nTesting behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_reject_blocks()
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
|
the-stack_106_20927
|
# coding: utf-8
import sys
from setuptools import setup, find_packages
NAME = "dgidb-transformer"
VERSION = "2.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"connexion>=2.0.2",
"swagger-ui-bundle>=0.0.2",
"python_dateutil>=2.6.0"
]
setup(
name=NAME,
version=VERSION,
description="Transformer API for DGIdb",
author_email="[email protected]",
url="",
keywords=["OpenAPI", "Transformer API for DGIdb"],
install_requires=REQUIRES,
packages=find_packages(),
package_data={'': ['openapi/openapi.yaml']},
include_package_data=True,
entry_points={
'console_scripts': ['openapi_server=openapi_server.__main__:main']},
long_description="""\
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
"""
)
|
the-stack_106_20928
|
import pytest
from immudb import consistency
from immudb.schema import schema_pb2
from immudb.client import ImmudbClient
from immudb.consistency import verify
import grpc._channel
class TestConsistency:
def test_verify_path(self):
path = []
assert True == consistency.verify_path(path, 0, 0, bytes(), bytes())
assert True == consistency.verify_path(path, 1, 1, bytes(), bytes())
assert False == consistency.verify_path(path, 0, 0, bytes([1]), bytes([2]))
assert False == consistency.verify_path(path, 0, 1, bytes(), bytes())
assert False == consistency.verify_path(path, 1, 0, bytes(), bytes())
path = [bytes(), bytes(), bytes()]
assert False == consistency.verify_path(path, 2, 1, bytes(), bytes())
def test_verify_path2(self):
path = [b'\x16\x9f\x05\x81\x86\xcfp\x80\xdf\x89\xc1\x16_\xf2\xd1\xa5i\xbb\xb6\x9b\xfe\x0f\xd6:\x80\xcb\xbf\xb2\xa6\xc8"?', b'...f9\xe1\x04\xb7\xb9\xe0T', b'\x8f\xab\xdb\xd3t#L\x9ay\xdey\xb3\xdeZ\x93={Wt\xba\xf5\xda\xd2\xc1\xaf \x15\xf5n\x86\xa6d']
second = 230269
first = 230268
secondHash = b'yP\xf2\xbbh\x02.9\x87\x8e\x1b5\x16k\xe2Zk\xdc3\x82\x96\x0b\xde\x80WJ=\xda\xc9\x8b\x9d\xdc'
firstHash = b"A\xab\x8e,\xe0/\xbb\x13y\x84\x08\xe7\xff\xf5\xbfg\x98\x8d3\xea\xa9\x0fB\xc6\xaa%'\xa3*\xd2\x8e\x0e"
assert False == consistency.verify_path(path, second, first, secondHash, firstHash)
def test_consistency_verify(self):
a=ImmudbClient()
try:
a = ImmudbClient("localhost:3322")
a.login("immudb","immudb")
except grpc._channel._InactiveRpcError as e:
pytest.skip("Cannot reach immudb server")
a.safeSet(b'dummy',b'dummy')
root=a.currentRoot()
a.safeSet(b'dummy1',b'dummy1')
print(root)
cns=a.stub.Consistency(schema_pb2.Index(index=root.index))
print(cns)
ret=verify(cns,root)
assert ret == True
|
the-stack_106_20929
|
import colorama
import socket
from typing import List, Optional
import time
import logging
import paramiko.ssh_exception
import paramiko.client
from .worker_pool import WorkerPoolManager
logger = logging.getLogger("thorctl")
_COLORS = [
colorama.Fore.GREEN,
colorama.Fore.YELLOW,
colorama.Fore.CYAN,
colorama.Style.BRIGHT + colorama.Fore.BLUE,
colorama.Style.DIM + colorama.Fore.YELLOW,
colorama.Style.DIM + colorama.Fore.GREEN,
colorama.Fore.RED,
colorama.Style.BRIGHT + colorama.Fore.GREEN,
colorama.Fore.WHITE,
colorama.Fore.MAGENTA,
]
class WorkerPoolSSHConnection:
def __init__(self, manager: WorkerPoolManager):
self.manager = manager
self.connections = {} # name -> WorkerSSHConnection
# counts all connections *ever*, not just currently active ones.
self.connection_count = 0
def _update_connections(self, conn_timeout: int = 3) -> List[str]:
"""Updates the WorkerPoolSSHConnection's internal connection state.
First, we check if there are any new instances that we have not
connected to. New connections are established to these. If a connection
can't be established, a warning message is emitted.
Second, we remove any disconnected connections from self.connections.
Parameters
----------
conn_timeout : int
How long to wait, in seconds, to establish a connection to a single
host.
Returns
-------
List[str]
A list of the names of instances that were newly connected to.
"""
instances = self.manager.list_worker_instances()
# Gather names of new instances that don't have a connection.
to_add = []
for instance in instances:
if instance["name"] not in self.connections:
to_add.append(instance)
# Try to connect to all the new instances.
added = []
for instance in to_add:
success = self._try_to_connect(instance, conn_timeout)
if success:
added.append(instance["name"])
# Gather names of instances that we've disconnected from.
to_remove = []
for name, conn in self.connections.items():
if not conn.connected:
to_remove.append(name)
# Remove all the disconnected instances.
for name in to_remove:
self.connections.pop(name)
return added
def _try_to_connect(self, instance: dict, conn_timeout: int) -> bool:
"""Attempt to connect to an instance. Return true if success, false if failed.
Parameters
----------
instance : dict
A resource description of the instance.
conn_timeout : int
Time in seconds to wait when trying to connect.
Returns
-------
bool
Whether we connected successfully.
"""
color = self._choose_color()
name = instance["name"]
logger.debug("attempting to connect to %s", name)
ip = _get_external_ip(instance)
if ip is None:
logger.debug("instance %s does not have an IP yet", name)
return False
conn = WorkerSSHConnection(name, ip, color)
try:
conn.connect(timeout=conn_timeout)
self.connections[name] = conn
self.connection_count += 1
logger.debug("connected to %s (%s)", name, ip)
return True
except Exception as e:
logger.debug("failed to connect to %s (%s): %s", name, ip, e)
return False
def _choose_color(self):
return _COLORS[self.connection_count % len(_COLORS)]
def stream_logs(self):
last_client_update = 0
# Loop over the open connections, printing output as we get it.
last_loop = 0
while True:
# Every 5 seconds, update our connections, attaching to new workers.
if time.time() - last_client_update > 5:
last_client_update = time.time()
added = self._update_connections()
for instance in added:
conn = self.connections[instance]
conn.start_tailing_logs()
if len(self.connections) == 0:
logger.debug("not connected to any workers")
# Main loop here: print up to 64 lines from each connection.
for conn in self.connections.values():
for line in conn.iter_available_lines(max_lines=64):
conn.print(line)
# Poll at most every 0.01 seconds.
since_last_loop = time.time() - last_loop
if since_last_loop < 0.01:
time.sleep(0.01 - since_last_loop)
last_loop = time.time()
class WorkerSSHConnection:
"""
Represents a persistent SSH connection to a Worker instance.
The connection can be connected or disconnected.
"""
def __init__(self, instance_name: str, instance_ip: str, print_color: str):
self.instance_name = instance_name
self.instance_ip = instance_ip
self.print_color = print_color
self._client = paramiko.client.SSHClient()
self._client.set_missing_host_key_policy(_IgnoreMissingHostKeys)
self._session = None
self._session_stdout = None
self._read_buffer = bytes()
self.connected = False
self.command_running = False
self.exit_status = None
def print(self, message: str):
"""
Print a message, prefixed with the instance's hostname and using the
WorkerSSHConnection's color.
Parameters
----------
message : str
Message to print
"""
reset = colorama.Style.RESET_ALL
print(f"{self.print_color}{self.instance_name}{reset}: {message}")
def connect(self, timeout: int = 1):
"""Establish a connection to the instance.
Parameters
----------
timeout : int
Time, in seconds, to wait for the connection to be established.
"""
self._client.connect(hostname=self.instance_ip, timeout=timeout)
transport = self._client.get_transport()
self._session = transport.open_session(timeout)
self.connected = True
def start_command(self, cmd: str):
"""Send a command over the connected SSH session.
Parameters
----------
cmd : str
Command to send over ssh.
Examples
--------
>>> conn.start_command("ls -l")
"""
assert self.connected
assert not self.command_running
self._session.get_pty()
self._session.exec_command(cmd)
self._session_stdout = self._session.makefile("r", 4096)
self.command_running = True
def disconnect(self):
"""End any running session and connection. """
if self.command_running:
self._session_stdout.close()
self.command_running = False
if self.connected:
self._session.close()
self._client.close()
self.connected = False
def start_tailing_logs(self):
"""Start a session which will stream logs from a THOR worker.
The log lines can be retrieved with iter_available_lines.
Examples
--------
>>> conn = WorkerSSHConnection("asgard", "192.168.1.1", colorama.Fore.GREEN)
>>> conn.connect()
>>> conn.start_tailing_logs()
>>> for line in conn.iter_available_lines(64):
... conn.print(line)
"""
self.start_command("journalctl -o cat -f -u thor-worker.service")
self._session.settimeout(0.05)
def iter_available_lines(self, max_lines: int):
"""
Iterate over buffered lines from stdout of any running command.
If not connected, or if no command is running, this returns without
yielding any lines.
If there is an incomplete line in the buffer, it is not returned.
When the running command has exited, this automatically disconnects.
Parameters
----------
max_lines : int
Maximum number of lines to yield.
Examples
--------
>>> conn = WorkerSSHConnection("asgard", "192.168.1.1", colorama.Fore.GREEN)
>>> conn.connect()
>>> conn.start_tailing_logs()
>>> while conn.connected:
... for line in conn.iter_available_lines(64):
... conn.print(line)
"""
if not self.connected or not self.command_running:
return
lines_read = 0
while True:
# If we have any lines already buffered, hand them out until we hit
# max_lines.
for line in self._iterate_buffered_lines():
yield line
lines_read += 1
if lines_read >= max_lines:
return
# If we can add to the buffer, do so, and then go back to yielding
# from the buffer.
if self._session.recv_ready():
data = self._session.recv(4096)
self._read_buffer = self._read_buffer + data
continue
# We couldn't add to the buffer. Maybe it's because the command exited?
if self._session.exit_status_ready():
# Yes, the command has completed.
#
# But there's a rare race possible: we could have gotten some
# output since we last checked, but before the command exited.
# If this is the case, we should return to the top of the loop,
# and read data until the recv buffer is exhausted.
#
# Once recv_ready returns False consistently *and* the exit
# status is ready, we can be sure we have read all output.
if self._session.recv_ready():
continue
self.exit_status = self._session.recv_exit_status()
self.command_running = False
yield f"command exited with status {self.exit_status}"
return
# Otherwise, there are just no available lines right now, so return.
return
def _iterate_buffered_lines(self):
next_linebreak = self._read_buffer.find(b"\n")
while next_linebreak > 0:
line = self._read_buffer[:next_linebreak]
self._read_buffer = self._read_buffer[(next_linebreak+1):]
yield line.decode()
next_linebreak = self._read_buffer.find(b"\n")
def _get_external_ip(instance_description: dict) -> Optional[str]:
networks = instance_description.get("networkInterfaces", [])
for net in networks:
access_configs = net.get("accessConfigs", [])
for ac in access_configs:
if ac.get("natIP", None) is not None:
return ac["natIP"]
return None
class _IgnoreMissingHostKeys(paramiko.client.MissingHostKeyPolicy):
def missing_host_key(self, client, hostname, key):
return
|
the-stack_106_20930
|
import numpy as np
import scipy.io as sio
import h5py
import random
from PIL import Image
import tensorflow as tf
import keras.backend as K
import os
import glob
data_path = 'data/'
def gpu_config(gpu_id=0, gpu_mem=0.8):
if gpu_mem:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=gpu_mem)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
print('############## gpu memory is fixed to', gpu_mem, 'on', str(gpu_id), '#################')
else:
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
print('gpu memory is flexible.')
def load_data(config='ucm2mai', im_size=224, evaluate=True, tr_test=0.8):
if config=='ucm_si' or config=='ucm_mu' or config=='aid_si' or config=='aid_mu':
return SetData_single(config, im_size, tr_test)
X_trainval = []
y_trainval = []
X_test = []
y_test = []
# get trainval and test split
folder_path = data_path+'MAI_dataset/'
config_trainval = '{}configs/{}_trainval.txt'.format(folder_path, config)
config_test = '{}configs/{}_test.txt'.format(folder_path, config)
f = sio.loadmat('{}multilabel.mat'.format(folder_path))
labels = f['labels']
# ############### this is the for the use of reproduce results ############
if config=='aid2mai':
temp = labels
labels = np.zeros((len(temp), 20))
labels[:, 0] = temp[:, 0]
labels[:, 1] = temp[:, 1]
labels[:, 2] = temp[:, 2]
labels[:, 3] = temp[:, 16]
labels[:, 4] = temp[:, 3]
labels[:, 5] = temp[:, 4]
labels[:, 6] = temp[:, 5]
labels[:, 7] = temp[:, 17]
labels[:, 8] = temp[:, 6]
labels[:, 9] = temp[:, 18]
labels[:, 10] = temp[:, 7]
labels[:, 11] = temp[:, 8]
labels[:, 12] = temp[:, 9]
labels[:, 13] = temp[:, 19]
labels[:, 14] = temp[:, 20]
labels[:, 15] = temp[:, 21]
labels[:, 16] = temp[:, 10]
labels[:, 17] = temp[:, 22]
labels[:, 18] = temp[:, 23]
labels[:, 19] = temp[:, 11]
if config=='ucm2mai':
temp = labels
labels = np.zeros((len(temp), 16))
labels[:, 0] = temp[:, 0]
labels[:, 1] = temp[:, 1]
labels[:, 2] = temp[:, 2]
labels[:, 3] = temp[:, 3]
labels[:, 4] = temp[:, 4]
labels[:, 5] = temp[:, 5]
labels[:, 6] = temp[:, 12]
labels[:, 7] = temp[:, 6]
labels[:, 8] = temp[:, 7]
labels[:, 9] = temp[:, 8]
labels[:, 10] = temp[:, 9]
labels[:, 11] = temp[:, 13]
labels[:, 12] = temp[:, 14]
labels[:, 13] = temp[:, 10]
labels[:, 14] = temp[:, 15]
labels[:, 15] = temp[:, 11]
# ##########################################################################
# load all test images
with open(config_test) as f:
test_list = f.readlines()
for i in range(len(test_list)):
fn_id = int(test_list[i][:-1])
im = np.float32(Image.open('{}images/{}.jpg'.format(folder_path, str(fn_id))).resize((im_size, im_size)))
im = im[:, :, [2, 1, 0]] # PIL to cv2
im[:, :, 0] -= 103.939
im[:, :, 1] -= 116.779
im[:, :, 2] -= 123.68
label = labels[fn_id-1, :]
X_test.append(im)
y_test.append(label)
X_test = np.float32(X_test)
y_test = np.uint8(y_test)
if evaluate:
return X_trainval, y_trainval, X_test, y_test
# load all training images
with open(config_trainval) as f:
trainval_list = f.readlines()
for i in range(len(trainval_list)):
fn_id = int(trainval_list[i][:-1])
im = np.float32(Image.open('{}images/{}.jpg'.format(folder_path, str(fn_id))).resize((im_size, im_size)))
im = im[:, :, [2, 1, 0]] # PIL to cv2
#im = cv2.resize(cv2.imread('{}images/{}.jpg'.format(folder_path, str(fn_id))), (im_size, im_size)).astype(np.float32)
im[:, :, 0] -= 103.939
im[:, :, 1] -= 116.779
im[:, :, 2] -= 123.68
label = labels[fn_id-1, :]
X_trainval.append(im)
y_trainval.append(label)
X_trainval = np.float32(X_trainval)
y_trainval = np.uint8(y_trainval)
return X_trainval, y_trainval, X_test, y_test
def SetData_single(dataset, im_size, tr_test=0.8):
X = []
y = []
num_cls = []
ind = 0
# prepare images
if dataset == 'ucm_mu' or dataset== 'ucm_si':
im_path = data_path+'UCM_dataset/'
classes = ['airplane', 'baseballdiamond', 'beach', 'buildings', 'agricultural','forest', 'golfcourse', 'parkinglot', 'harbor', 'denseresidential', 'river', 'runway', 'chaparral', 'storagetanks', 'tenniscourt', 'mediumresidential']
nb_classes_single = len(classes)
nb_classes_multi = 16
for cls_id in range(nb_classes_single):
print(cls_id, classes[cls_id], 'is loading...')
cls = classes[cls_id]
nb_samples = 100
num_cls.append(nb_samples)
for fn_id in range(100):
im = np.float32(Image.open(im_path+'{}/{}.tif'.format(cls, cls+str(fn_id).zfill(2))).resize((im_size, im_size)))
im = im[:, :, [2, 1, 0]]
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
X.append(im)
if dataset == 'ucm_mu':
label = np.zeros((nb_classes_multi))
if cls == 'beach': # beach and sea share the same prototype
label[2] = 1
label[15] = 1
elif cls == 'mediumresidential': # residential prototype consists of features of medium and dense residential
label[9] = 1
else:
label[cls_id] = 1
elif dataset == 'ucm_si':
label = np.zeros((nb_classes_single))
label[cls_id] = 1
y.append(label)
ind += 1
elif dataset == 'aid_mu' or dataset == 'aid_si':
im_path = data_path+'AID_dataset/'
classes = ['Airport', 'BaseballField', 'Beach', 'Bridge', 'Commercial', 'Farmland', 'Forest', 'Pond', 'Parking', 'Park', 'Port', 'DenseResidential', 'River', 'Viaduct', 'Playground', 'Stadium', 'StorageTanks', 'RailwayStation', 'Industrial', 'MediumResidential']
nb_classes_single = len(classes)
nb_classes_multi = 20
for cls_id in range(nb_classes_single):
cls = classes[cls_id]
nb_samples = len(glob.glob(im_path+'{}/{}*'.format(cls, str(cls).lower())))
num_cls.append(nb_samples)
for fn_id in range(nb_samples):
im = np.float32(Image.open(im_path+'{}/{}.jpg'.format(cls, str(cls).lower() + '_' +str(fn_id+1))).resize((im_size, im_size)))
im = im[:, :, [2, 1, 0]]
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
X.append(im)
if dataset == 'aid_mu':
label = np.zeros((nb_classes_multi))
if cls == 'Beach': # beach and sea share the same prototype
label[2] = 1
label[19] = 1
elif cls == 'MediumResidential': # residential prototype consists of features of medium and dense residential
label[11] = 1
else:
label[cls_id] = 1
elif dataset == 'aid_si':
label = np.zeros((nb_classes_single))
label[cls_id] = 1
y.append(label)
ind += 1
# format
X = np.float32(X)
y = np.uint8(y)
# random and find out trainval and test
num_samples = range(sum(num_cls))
train_ix, tr_ix = [], []
test_ix, te_ix = [], []
ind = 0
# every class in range(nb_classes):
for cls in range(nb_classes_single):
samp = num_samples[ind:(ind+num_cls[cls])]
tr, test = np.split(samp, [int(num_cls[cls]*tr_test)])
tr_ix.append(tr)
te_ix.append(test)
ind += num_cls[cls]
for i in range(len(tr_ix)):
for j in range(len(tr_ix[i])):
train_ix.append(tr_ix[i][j])
for i in range(len(te_ix)):
for j in range(len(te_ix[i])):
test_ix.append(te_ix[i][j])
# random.seed(75814)
X_tr = [X[int(i)] for i in train_ix]
y_tr = [y[int(i)] for i in train_ix]
X_tr = np.float32(X_tr)
y_tr = np.uint8(y_tr)
X_test = [X[int(i)] for i in test_ix]
y_test = [y[int(i)] for i in test_ix]
X_test = np.float32(X_test)
y_test = np.uint8(y_test)
del X, y
return X_tr, y_tr, X_test, y_test
def PR_score(pred, gt):
pred = np.int8(np.round(pred))
gt = np.int8(np.round(gt))
pg_minus = pred-gt
pg_add = pred+gt
TP_mat = (pg_add == 2)
FP_mat = (pg_minus == 1)
FN_mat = (pg_minus == -1)
TN_mat = (pg_add == 0)
# calculate example-based
TP_e = np.float32(np.sum(TP_mat, 1))
FP_e = np.float32(np.sum(FP_mat, 1))
FN_e = np.float32(np.sum(FN_mat, 1))
TN_e = np.float32(np.sum(TN_mat, 1))
# clear TP_e is 0, assign it some value and latter assign zero
ind_zero = np.where(TP_e==0)
TP_e[ind_zero] = 0.0000001
precision_e = TP_e/(TP_e+FP_e)
recall_e = TP_e/(TP_e+FN_e)
precision_e[ind_zero] = 0
recall_e[ind_zero] = 0
# calculate label-based
TP_l = np.float32(np.sum(TP_mat, 0))
FP_l = np.float32(np.sum(FP_mat, 0))
FN_l = np.float32(np.sum(FN_mat, 0))
TN_l = np.float32(np.sum(TN_mat, 0))
# clear TP_l is 0, assign it some value and latter assign zero
ind_zero = np.where(TP_l==0)
TP_l[ind_zero] = 0.0000001
precision_l = TP_l/(TP_l+FP_l)
recall_l = TP_l/(TP_l+FN_l)
precision_l[ind_zero] = 0
recall_l[ind_zero] = 0
return precision_e, recall_e, precision_l, recall_l
def F_score(pred, gt, ex_base=True, beta=1):
if ex_base:
precision, recall, _, _ = PR_score(pred, gt)
else:
_, _, precision, recall = PR_score(pred, gt)
print(np.shape(precision), np.shape(recall))
if precision.any()<0 or recall.any()<0:
print('negative precision or recall!!!!')
F = -999999999
return F
pr = (beta*beta)*precision+recall
#check both zeros
ind_zero = np.where(pr==0)
pr[ind_zero] = 0.0000001
F = (1.0+beta*beta)*(precision*recall)/pr
F[ind_zero] = 0
return F
def F1_e(y_true, y_pred):
gt = y_true
pred = K.round(y_pred)
pg_minus = pred-gt
pg_add = pred+gt
# calculate example-based
TP_e = K.sum(K.cast(K.equal(pg_add, 2), K.floatx()), 1)
FP_e = K.sum(K.cast(K.equal(pg_minus, 1), K.floatx()), 1)
FN_e = K.sum(K.cast(K.equal(pg_minus, -1), K.floatx()), 1)
TN_e = K.sum(K.cast(K.equal(pg_add, 0), K.floatx()), 1)
# in case of 0
TP_e2 = TP_e * K.cast(~K.equal(TP_e, 0), K.floatx()) + K.cast(K.equal(TP_e, 0), K.floatx())
precision_e = TP_e2/(TP_e2+FP_e)
recall_e = TP_e2/(TP_e2+FN_e)
precision_e = precision_e * K.cast(~K.equal(TP_e, 0), K.floatx())
recall_e = recall_e * K.cast(~K.equal(TP_e, 0), K.floatx())
pr = precision_e+recall_e
pr2 = pr * K.cast(~K.equal(pr, 0), K.floatx()) + K.cast(K.equal(pr, 0), K.floatx())
F = 2*(precision_e*recall_e)/pr2
F = F * K.cast(~K.equal(pr, 0), K.floatx())
return K.mean(F)
def F2_e(y_true, y_pred):
gt = y_true
pred = K.round(y_pred)
pg_minus = pred-gt
pg_add = pred+gt
# calculate example-based
TP_e = K.sum(K.cast(K.equal(pg_add, 2), K.floatx()), 1)
FP_e = K.sum(K.cast(K.equal(pg_minus, 1), K.floatx()), 1)
FN_e = K.sum(K.cast(K.equal(pg_minus, -1), K.floatx()), 1)
TN_e = K.sum(K.cast(K.equal(pg_add, 0), K.floatx()), 1)
# in case of 0
TP_e2 = TP_e * K.cast(~K.equal(TP_e, 0), K.floatx()) + K.cast(K.equal(TP_e, 0), K.floatx()) # 0 will be replaced by 1, and latter change back to 0
precision_e = TP_e2/(TP_e2+FP_e)
recall_e = TP_e2/(TP_e2+FN_e)
precision_e = precision_e * K.cast(~K.equal(TP_e, 0), K.floatx())
recall_e = recall_e * K.cast(~K.equal(TP_e, 0), K.floatx())
pr = 4*precision_e+recall_e
pr2 = pr * K.cast(~K.equal(pr, 0), K.floatx()) + K.cast(K.equal(pr, 0), K.floatx())
F = 5*(precision_e*recall_e)/pr2
F = F * K.cast(~K.equal(pr, 0), K.floatx())
return K.mean(F)
|
the-stack_106_20931
|
# Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"@bazel_skylib//lib:sets.bzl",
"sets",
)
load(
"@io_bazel_rules_go//go/private:context.bzl",
"go_context",
)
load(
"@io_bazel_rules_go//go/private:common.bzl",
"split_srcs",
)
load(
"@io_bazel_rules_go//go/private:mode.bzl",
"LINKMODES",
"LINKMODE_NORMAL",
"mode_string",
)
load(
"@io_bazel_rules_go//go/private:providers.bzl",
"GoArchive",
"GoArchiveData",
"GoAspectProviders",
"GoLibrary",
"GoSource",
)
load(
"@io_bazel_rules_go//go/platform:list.bzl",
"GOARCH",
"GOOS",
)
def _go_archive_aspect_impl(target, ctx):
go = go_context(ctx, ctx.rule.attr)
source = target[GoSource] if GoSource in target else None
archive = target[GoArchive] if GoArchive in target else None
if source and source.mode == go.mode:
# The base layer already built the right mode for us
return []
if not GoLibrary in target:
# Not a rule we can do anything with
return []
# We have a library and we need to compile it in a new mode
library = target[GoLibrary]
source = go.library_to_source(go, ctx.rule.attr, library, ctx.coverage_instrumented())
if archive:
archive = go.archive(go, source = source)
return [GoAspectProviders(
source = source,
archive = archive,
)]
go_archive_aspect = aspect(
_go_archive_aspect_impl,
attr_aspects = [
"deps",
"embed",
"compiler",
"compilers",
"_stdlib",
"_coverdata",
],
attrs = {
"pure": attr.string(values = [
"on",
"off",
"auto",
]),
"static": attr.string(values = [
"on",
"off",
"auto",
]),
"msan": attr.string(values = [
"on",
"off",
"auto",
]),
"race": attr.string(values = [
"on",
"off",
"auto",
]),
"goos": attr.string(
values = GOOS.keys() + ["auto"],
default = "auto",
),
"goarch": attr.string(
values = GOARCH.keys() + ["auto"],
default = "auto",
),
"linkmode": attr.string(values = LINKMODES, default = LINKMODE_NORMAL),
},
toolchains = ["@io_bazel_rules_go//go:toolchain"],
)
|
the-stack_106_20932
|
#!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import standard as rdf_standard
from grr.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import jobs_pb2
from grr.server.grr_response_server import artifact_utils
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
rdf_deps = [
rdfvalue.ByteSize,
"PathSpec", # TODO(user): recursive definition.
]
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
self.age = other.age
def __len__(self):
"""Return the total number of path components."""
i = -1
for i, _ in enumerate(self):
pass
return i + 1
def __getitem__(self, item):
for i, element in enumerate(self):
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
return list(self)[-1]
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
for component in reversed(self):
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
4: "/temp", # PathSpec.PathType.TMPFILE
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(first_component.offset / 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "investigating-with-grr/flows/specifying-file-paths.html"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, client=None):
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for pattern in self.InterpolateGrouping(pattern):
yield pattern
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
REGEX_SPLIT_PATTERN = re.compile(
"(" + "|".join(["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value)
result = "".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression("(?i)\\A%s\\Z" % result)
|
the-stack_106_20933
|
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
import numpy as np
from math import pi
import logging
logger = logging.getLogger(__name__)
def get_atomic_scattering_factor(angstrom_range, atom_type):
r"""
Calculates the atomic scatter factor for
- angstrom_range: numpy array of (2*sin(θ) / λ)² values (= 1/A² units)
- atom_type: an :class:`~pyxrd.calculations.data_objects.AtomTypeData`
instance
The atomic scattering factor is calculated as follows:
.. math::
:nowrap:
\begin{flalign*}
& ASF = \left[ c + \sum_{i=1}^{5}{ \left( a_i \cdot e ^ { - b_i \cdot {\left(2 \cdot \frac{sin(\theta)}{\lambda}\right)}^2 } \right) } \right]
\cdot e ^ { B \cdot {\left(2 \cdot \frac{sin(\theta)}{\lambda}\right)}^2 }
\end{flalign*}
Where a_i, b_i and c are the scattering factors taken from `atom_type`
"""
if atom_type is not None:
asf = np.sum(atom_type.par_a * np.exp(-atom_type.par_b * angstrom_range[..., np.newaxis]), axis=1) + atom_type.par_c
asf = asf * np.exp(-atom_type.debye * angstrom_range)
return asf
else:
logger.warning("get_atomic_scattering_factor reports: 'None found!'")
return np.zeros_like(angstrom_range)
def get_structure_factor(range_stl, atom):
r"""
Calculates the atom's structure factor for
- range_stl: numpy array of 2*sin(θ) / λ values (= 1/nm units)
- atom_type: an :class:`~pyxrd.calculations.data_objects.AtomData`
instance
The structure factor is calculated using the atomic scattering factor ASF
(calculated by :meth:`~pyxrd.calculations.atoms.get_atomic_scattering_factor`)
as follows:
.. math::
:nowrap:
\begin{flalign*}
& SF = ASF \cdot p \cdot e ^ { 2 \cdot \pi \cdot z \cdot i \cdot \frac{2 \cdot sin(\theta)}{\lambda} }
\end{flalign*}
"""
if atom is not None and atom.atom_type is not None:
angstrom_range = ((range_stl * 0.05) ** 2)
asf = get_atomic_scattering_factor(angstrom_range, atom.atom_type)
return asf * atom.pn * np.exp(2 * pi * atom.z * range_stl * 1j)
else:
logger.warning("get_structure_factor reports: 'None found!'")
return np.zeros_like(range_stl)
|
the-stack_106_20936
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import logging
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.engine.defaults import DefaultTrainer
# from detectron2.modeling import build_model
from centernet.centernet import build_model
from centernet.dataset_mapper import DatasetMapper
__all__ = ["DefaultTrainer2"]
class DefaultTrainer2(DefaultTrainer):
def __init__(self, cfg):
super().__init__(cfg)
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
@classmethod
def build_train_loader(cls, cfg):
return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
|
the-stack_106_20937
|
from zen.removeDuplicates import removeDuplicates
from zen.removeAll import removeAll
from zen.iterable import iterable
def intersect(*args,**keywords):
for k in keywords:
if k in locals():
exec(k+'=keywords[k]')
elif k in shortNames:
exec(shortNames[k]+'=keywords[k]')
sel=[]
for a in args:
sel.append(list(iterable(a)))
intersected=[]
for s in sel[0]:
inAll=True
for ss in sel[1:]:
if s not in ss:
inAll=False
break
if inAll:
intersected.append(s)
return intersected
|
the-stack_106_20938
|
# Copyright (C) 2020 leafcloud b.v.
# Copyright (C) 2020 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Backup s3 code."""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import threading
from unittest import mock
import zlib
from eventlet import tpool
from moto import mock_s3
from oslo_utils import units
from cinder.backup.drivers import s3 as s3_dr
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.tests.unit.backup import fake_s3_client
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
class FakeMD5(object):
def __init__(self, *args, **kwargs):
pass
@classmethod
def digest(cls):
return 's3cindermd5'.encode('utf-8')
@classmethod
def hexdigest(cls):
return 's3cindermd5'
def s3_client(func):
@mock.patch.object(s3_dr.boto3, 'client',
fake_s3_client.FakeS3Boto3.Client)
@mock.patch.object(hashlib, 'md5', FakeMD5)
def func_wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
return func_wrapper
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(reason=_('fake'))
def fake_delete(self, backup):
raise exception.BackupOperationError()
def _fake_delete_object(self, bucket_name, object_name):
raise AssertionError('delete_object method should not be called.')
class BackupS3TestCase(test.TestCase):
"""Test Case for s3."""
_DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df'
def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID):
vol = {'id': volume_id,
'size': 1,
'status': 'available',
'volume_type_id': self.vt['id']}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self,
volume_id=_DEFAULT_VOLUME_ID,
container=s3_dr.CONF.backup_s3_store_bucket,
parent_id=None,
status=None,
service_metadata=None):
try:
db.volume_get(self.ctxt, volume_id)
except exception.NotFound:
self._create_volume_db_entry(volume_id=volume_id)
kwargs = {'size': 1,
'container': container,
'volume_id': volume_id,
'parent_id': parent_id,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'status': status,
'service_metadata': service_metadata,
}
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _write_effective_compression_file(self, data_size):
"""Ensure file contents can be effectively compressed."""
self.volume_file.seek(0)
self.volume_file.write(bytes([65] * data_size))
self.volume_file.seek(0)
def setUp(self):
super(BackupS3TestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
self.size_volume_file = 0
for _i in range(0, 64):
self.volume_file.write(os.urandom(units.Ki))
self.size_volume_file += 1024
notify_patcher = mock.patch(
'cinder.volume.volume_utils.notify_about_backup_usage')
notify_patcher.start()
self.addCleanup(notify_patcher.stop)
self.flags(backup_s3_endpoint_url=None)
self.flags(backup_s3_store_access_key='s3cinderaccesskey')
self.flags(backup_s3_store_secret_key='s3cindersecretkey')
self.flags(backup_s3_sse_customer_key='s3aeskey')
@mock_s3
def test_backup_correctly_configured(self):
self.service = s3_dr.S3BackupDriver(self.ctxt)
self.assertIsInstance(self.service, s3_dr.S3BackupDriver)
@mock_s3
def test_backup(self):
volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2'
container_name = 'test-bucket'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
result = service.backup(backup, self.volume_file)
self.assertIsNone(result)
@mock_s3
def test_backup_uncompressed(self):
volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_bz2(self):
volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
service = s3_dr.S3BackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_zlib(self):
volume_id = '5cea0535-b6fb-4531-9a38-000000bea094'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
service = s3_dr.S3BackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_zstd(self):
volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zstd')
service = s3_dr.S3BackupDriver(self.ctxt)
self._write_effective_compression_file(self.size_volume_file)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_default_container(self):
volume_id = '9552017f-c8b9-4e4e-a876-00000053349c'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=None)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertEqual('volumebackups', backup.container)
@mock_s3
def test_backup_custom_container(self):
volume_id = '1da9859e-77e5-4731-bd58-000000ca119e'
container_name = 'fake99'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertEqual(container_name, backup.container)
@mock_s3
def test_backup_shafile(self):
volume_id = '6465dad4-22af-48f7-8a1a-000000218907'
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(64 * units.Ki / content1['chunk_size'],
len(content1['sha256s']))
@mock_s3
def test_backup_cmp_shafiles(self):
volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2'
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=None,
parent_id=backup.id)
service2 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
# Compare shas from both files
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
@mock_s3
def test_backup_delta_two_objects_change(self):
volume_id = '30dab288-265a-4583-9abe-000000d42c67'
self.flags(backup_s3_object_size=8 * units.Ki)
self.flags(backup_s3_block_size=units.Ki)
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(2 * 8 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(4 * 8 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=None,
parent_id=backup.id)
service2 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 32
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32])
@mock_s3
def test_backup_delta_two_blocks_in_object_change(self):
volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba'
self.flags(backup_s3_object_size=8 * units.Ki)
self.flags(backup_s3_block_size=units.Ki)
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(20 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(volume_id=volume_id,
container=None,
parent_id=backup.id)
service2 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service2.backup(deltabackup, self.volume_file)
# Verify that two shas are changed at index 16 and 20
content1 = service1._read_sha256file(backup)
content2 = service2._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
@mock_s3
@mock.patch('cinder.backup.drivers.s3.S3BackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.s3.S3BackupDriver.'
'_send_progress_notification')
def test_backup_default_container_notify(self, _send_progress,
_send_progress_end):
volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=None)
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
s3_dr.CONF.set_override("backup_object_number_per_notification", 1)
s3_dr.CONF.set_override("backup_s3_enable_progress_timer", False)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
s3_dr.CONF.set_override("backup_object_number_per_notification",
10)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
s3_dr.CONF.set_override("backup_object_number_per_notification",
10)
s3_dr.CONF.set_override("backup_s3_enable_progress_timer", True)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
@mock_s3
@mock.patch.object(s3_dr.S3BackupDriver, '_backup_metadata',
fake_backup_metadata)
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
volume_id = '020d9142-339c-4876-a445-000000f1520c'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
@mock_s3
@mock.patch.object(s3_dr.S3BackupDriver, '_backup_metadata',
fake_backup_metadata)
@mock.patch.object(s3_dr.S3BackupDriver, 'delete_backup',
fake_delete)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete_backup().
"""
volume_id = '2164421d-f181-4db7-b9bd-000000eeb628'
backup = self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
@mock_s3
def test_delete(self):
volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31'
object_prefix = 'test_prefix'
backup = self._create_backup_db_entry(volume_id=volume_id,
service_metadata=object_prefix)
service = s3_dr.S3BackupDriver(self.ctxt)
service.delete_backup(backup)
@mock_s3
@mock.patch.object(s3_dr.S3BackupDriver, 'delete_object',
_fake_delete_object)
def test_delete_without_object_prefix(self):
volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1'
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
service.delete_backup(backup)
@mock_s3
def test_get_compressor(self):
service = s3_dr.S3BackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(zlib, compressor)
self.assertIsInstance(compressor, tpool.Proxy)
compressor = service._get_compressor('bz2')
self.assertEqual(bz2, compressor)
self.assertIsInstance(compressor, tpool.Proxy)
self.assertRaises(ValueError, service._get_compressor, 'fake')
@mock_s3
def test_prepare_output_data_effective_compression(self):
"""Test compression works on a native thread."""
# Use dictionary to share data between threads
thread_dict = {}
original_compress = zlib.compress
def my_compress(data):
thread_dict['compress'] = threading.current_thread()
return original_compress(data)
self.mock_object(zlib, 'compress', side_effect=my_compress)
service = s3_dr.S3BackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertGreater(len(fake_data), len(result[1]))
self.assertNotEqual(threading.current_thread(),
thread_dict['compress'])
@mock_s3
def test_prepare_output_data_no_compression(self):
self.flags(backup_compression_algorithm='none')
service = s3_dr.S3BackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
@mock_s3
def test_prepare_output_data_ineffective_compression(self):
service = s3_dr.S3BackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = b'\0' * 128
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
@mock_s3
def test_no_config_option(self):
# With no config option to connect driver should raise exception.
self.flags(backup_s3_endpoint_url=None)
self.flags(backup_s3_store_access_key=None)
self.flags(backup_s3_store_secret_key=None)
self.assertRaises(exception.InvalidConfigurationValue,
s3_dr.S3BackupDriver.check_for_setup_error,
self)
@s3_client
def test_create_backup_fail(self):
volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec3'
container_name = 's3_api_failure'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
self.assertRaises(s3_dr.S3ClientError,
service.backup,
backup, self.volume_file)
@s3_client
def test_create_backup_faili2(self):
volume_id = '2a59c20e-0b79-4f57-aa63-5be208df48f6'
container_name = 's3_connection_error'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
self.assertRaises(s3_dr.S3ConnectionFailure,
service.backup,
backup, self.volume_file)
@mock_s3
def test_restore(self):
volume_id = 'c2a81f09-f480-4325-8424-00000071685b'
backup = self._create_backup_db_entry(
volume_id=volume_id,
status=objects.fields.BackupStatus.RESTORING)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as volume_file:
service.restore(backup, volume_id, volume_file)
@mock_s3
def test_restore_delta(self):
volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e'
self.flags(backup_s3_object_size=8 * units.Ki)
self.flags(backup_s3_block_size=units.Ki)
backup = self._create_backup_db_entry(volume_id=volume_id)
service1 = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service1.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
self.volume_file.seek(20 * units.Ki)
self.volume_file.write(os.urandom(units.Ki))
deltabackup = self._create_backup_db_entry(
volume_id=volume_id,
status=objects.fields.BackupStatus.RESTORING,
parent_id=backup.id)
self.volume_file.seek(0)
service2 = s3_dr.S3BackupDriver(self.ctxt)
service2.backup(deltabackup, self.volume_file, True)
with tempfile.NamedTemporaryFile() as restored_file:
service2.restore(deltabackup, volume_id,
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@s3_client
def test_restore_fail(self):
volume_id = '651496c7-0d8b-45f3-bfe8-9ef6ad30910f'
container_name = 's3_api_failure'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(s3_dr.S3ClientError,
service.restore,
backup, volume_id, volume_file)
@s3_client
def test_restore_faili2(self):
volume_id = '87f3f2c2-1a79-48c1-9d98-47c4cab7bf00'
container_name = 's3_connection_error'
backup = self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = s3_dr.S3BackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(s3_dr.S3ConnectionFailure,
service.restore,
backup, volume_id, volume_file)
@mock_s3
def test_backup_md5_validation(self):
volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad'
self.flags(backup_s3_md5_validation=True)
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@mock_s3
def test_backup_sse(self):
volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad'
self.flags(backup_s3_sse_customer_algorithm='AES256')
self.flags(backup_s3_sse_customer_key='sse_key')
backup = self._create_backup_db_entry(volume_id=volume_id)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
@mock_s3
def test_restore_sse(self):
volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad'
self.flags(backup_s3_sse_customer_algorithm='AES256')
self.flags(backup_s3_sse_customer_key='sse_key')
backup = self._create_backup_db_entry(
volume_id=volume_id,
status=objects.fields.BackupStatus.RESTORING)
service = s3_dr.S3BackupDriver(self.ctxt)
self.volume_file.seek(0)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as volume_file:
service.restore(backup, volume_id, volume_file)
|
the-stack_106_20939
|
"""Implements the BaseModule class used with the Measure and Source Modules of the M81."""
from lakeshore.xip_instrument import RegisterBase
class SSMSystemModuleQuestionableRegister(RegisterBase):
"""Class object representing the questionable status register of a module"""
bit_names = [
"read_error",
"unrecognized_pod_error",
"port_direction_error",
"factory_calibration_failure",
"self_calibration_failure"
]
def __init__(
self,
read_error=False,
unrecognized_pod_error=False,
port_direction_error=False,
factory_calibration_failure=False,
self_calibration_failure=False):
self.read_error = read_error
self.unrecognized_pod_error = unrecognized_pod_error
self.port_direction_error = port_direction_error
self.factory_calibration_failure = factory_calibration_failure
self.self_calibration_failure = self_calibration_failure
class BaseModule:
"""Class for interaction with a specific channel, not specific to source or measure"""
def __init__(self, module_number, device):
self.module_number = module_number
self.device = device
self.questionable_register = SSMSystemModuleQuestionableRegister
|
the-stack_106_20941
|
"""Optimization related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import flags
import absl.logging as _logging
import tensorflow.compat.v1 as tf
import tpu_optimizer
##### Optimization related flags #####
# learning rate schedule
flags.DEFINE_float("learning_rate", default=1e-5, help="initial learning rate")
flags.DEFINE_integer("warmup_steps", default=0, help="number of warmup steps")
flags.DEFINE_string("decay_method", default="poly", help="poly or cos")
flags.DEFINE_float("min_lr_ratio", default=0.0,
help="min lr ratio for cos decay.")
# weight decay
flags.DEFINE_float("weight_decay", default=0.00, help="weight decay rate")
# gradient clip
flags.DEFINE_float("clip", default=1.0, help="Gradient clipping")
flags.DEFINE_bool("per_core_clip", True,
help="Perform gradient clip on each TPU core.")
flags.DEFINE_bool("skip_nan_grad", False,
help="Whether to use skip NaN or Inf gradient.")
# used during finetune
flags.DEFINE_float("lr_layer_decay_rate", 1.0,
"Top layer: lr[L] = FLAGS.learning_rate."
"Low layer: lr[l-1] = lr[l] * lr_layer_decay_rate.")
# adam specific hparams
flags.DEFINE_float("adam_beta1", default=0.9,
help="The exponential decay rate for the 1st moment.")
flags.DEFINE_float("adam_beta2", default=0.99,
help="The exponential decay rate for the 2nd moment.")
flags.DEFINE_bool("adam_correction", default=True,
help="Use the adam bias correction.")
flags.DEFINE_bool("use_wd_exclusion", default=False,
help="Exclude certain params from weight decay as in BERT.")
flags.DEFINE_float("adam_epsilon", default=1e-6, help="adam epsilon")
FLAGS = flags.FLAGS
def _get_variable_name(param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
def compute_gradients(total_loss):
"""Separate the function of gradient computation."""
monitor_dict = {}
print(FLAGS.weight_decay, "==weight_decay==")
print(FLAGS.lr_layer_decay_rate, "==lr_layer_decay_rate==")
print(FLAGS.use_wd_exclusion, "==use_wd_exclusion==")
print(FLAGS.adam_correction, "==adam_correction==")
##### Configure optimizer
global_step = tf.train.get_or_create_global_step()
# Warmup the learning rate linearly
if FLAGS.warmup_steps > 0:
progress = (tf.cast(global_step, tf.float32) /
tf.cast(FLAGS.warmup_steps, tf.float32))
else:
progress = 1.0
curr_ratio = progress + (1.0 - progress) * FLAGS.min_lr_ratio
warmup_lr = curr_ratio * FLAGS.learning_rate
# Decay the learning rate
if FLAGS.decay_method == "poly":
decay_lr = tf.train.polynomial_decay(
FLAGS.learning_rate,
global_step=global_step - FLAGS.warmup_steps,
decay_steps=FLAGS.train_steps - FLAGS.warmup_steps,
end_learning_rate=FLAGS.learning_rate * FLAGS.min_lr_ratio)
elif FLAGS.decay_method == "cos":
decay_lr = tf.train.cosine_decay(
FLAGS.learning_rate,
global_step=global_step - FLAGS.warmup_steps,
decay_steps=FLAGS.train_steps - FLAGS.warmup_steps,
alpha=FLAGS.min_lr_ratio)
else:
raise ValueError(FLAGS.decay_method)
learning_rate = tf.where(global_step < FLAGS.warmup_steps,
warmup_lr, decay_lr)
if (FLAGS.weight_decay > 0 and not FLAGS.use_tpu and
FLAGS.num_core_per_host > 1):
raise ValueError("Do not support `weight_decay > 0` with multi-gpu "
"training so far.")
if FLAGS.use_wd_exclusion:
exclude_from_weight_decay = ["LayerNorm", "layer_norm", "bias"]
else:
exclude_from_weight_decay = []
print(exclude_from_weight_decay, "==exclude_from_weight_decay==")
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
beta_1=FLAGS.adam_beta1,
beta_2=FLAGS.adam_beta2,
epsilon=FLAGS.adam_epsilon,
bias_correction=FLAGS.adam_correction,
exclude_from_weight_decay=exclude_from_weight_decay,
weight_decay_rate=FLAGS.weight_decay)
if FLAGS.use_tpu:
if FLAGS.per_core_clip:
optimizer = tpu_optimizer.CrossShardOptimizer(
optimizer, skip_nan_grad=FLAGS.skip_nan_grad)
else:
optimizer = tpu_optimizer.CrossShardOptimizer(
optimizer, skip_nan_grad=FLAGS.skip_nan_grad, clip=FLAGS.clip)
##### Compute gradient
variables = tf.trainable_variables()
gradients = tf.gradients(total_loss, variables)
if FLAGS.clip > 0 and FLAGS.per_core_clip:
tf.logging.info("Clip local gradient with norm %.3f.", FLAGS.clip)
clipped, local_gnorm = tf.clip_by_global_norm(gradients, FLAGS.clip)
else:
tf.logging.info("Do not clip local gradient.")
clipped = list(gradients)
local_gnorm = tf.linalg.global_norm(gradients)
# layer-wise learning rate decay
if FLAGS.lr_layer_decay_rate != 1.0:
def _get_layer_id(name):
if "model/input" in name:
return 0
m = re.search(r"model/(encoder|decoder)/layer_(\d+?)/", name)
if not m: return None
return int(m.group(2)) + 1
n_layer = 0
for i in range(len(clipped)):
layer_id = _get_layer_id(variables[i].name)
if layer_id is None: continue
n_layer = max(n_layer, layer_id + 1)
for i in range(len(clipped)):
layer_id = _get_layer_id(variables[i].name)
if layer_id is not None:
abs_rate = FLAGS.lr_layer_decay_rate ** (n_layer - 1 - layer_id)
tf.logging.info("Apply mult %.4f to the grad of %s",
abs_rate, variables[i].name)
if isinstance(clipped[i], tf.IndexedSlices):
clipped[i] = tf.IndexedSlices(clipped[i].values * abs_rate,
clipped[i].indices,
clipped[i].dense_shape)
else:
clipped[i] *= abs_rate
else:
tf.logging.info("Grad of %s is not decayed.", variables[i].name)
grad_and_vars = list(zip(clipped, variables))
monitor_dict["local_gnorm"] = local_gnorm
monitor_dict["learning_rate"] = learning_rate
return optimizer, grad_and_vars, global_step, monitor_dict
def get_train_op(total_loss):
"""Get the train op from training loss."""
##### Compute gradients
optimizer, grad_and_vars, global_step, monitor_dict = compute_gradients(
total_loss)
##### Construct train op
train_op = optimizer.apply_gradients(
grad_and_vars, global_step=global_step)
# Manually increment `global_step` for AdamW and LAMB
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op, monitor_dict
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
bias_correction=False,
exclude_from_weight_decay=None,
include_in_weight_decay=["r_s_bias", "r_r_bias", "r_w_bias"],
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.bias_correction = bias_correction
self.exclude_from_weight_decay = exclude_from_weight_decay
self.include_in_weight_decay = include_in_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn"t interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name) and self.weight_decay_rate > 0:
update += self.weight_decay_rate * param
# Adam bias correction
if self.bias_correction:
global_step_float = tf.cast(global_step, update.dtype)
bias_correction1 = 1.0 - self.beta_1 ** (global_step_float + 1)
bias_correction2 = 1.0 - self.beta_2 ** (global_step_float + 1)
learning_rate = (self.learning_rate * tf.sqrt(bias_correction2)
/ bias_correction1)
else:
learning_rate = self.learning_rate
print("==bias_correction==")
update_with_lr = learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
for r in self.include_in_weight_decay:
if re.search(r, param_name) is not None:
tf.logging.info("Include %s in weight decay", param_name)
return True
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
tf.logging.info("Adam WD excludes %s", param_name)
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
the-stack_106_20944
|
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
from tensorflow.contrib import legacy_seq2seq
class RNNModel:
def __init__(self,
vocabulary_size,
batch_size,
sequence_length,
hidden_layer_size,
cells_size,
gradient_clip=5.,
training=True):
cells = []
[cells.append(rnn.LSTMCell(hidden_layer_size)) for _ in range(cells_size)]
self.cell = rnn.MultiRNNCell(cells)
self.input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
self.targets = tf.placeholder(tf.int32, [batch_size, sequence_length])
self.initial_state = self.cell.zero_state(batch_size, tf.float32)
with tf.variable_scope("rnn", reuse=tf.AUTO_REUSE):
softmax_layer = tf.get_variable("softmax_layer", [hidden_layer_size, vocabulary_size])
softmax_bias = tf.get_variable("softmax_bias", [vocabulary_size])
with tf.variable_scope("embedding", reuse=tf.AUTO_REUSE):
embedding = tf.get_variable("embedding", [vocabulary_size, hidden_layer_size])
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
inputs = tf.split(inputs, sequence_length, 1)
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
def loop(previous, _):
previous = tf.matmul(previous, softmax_layer) + softmax_bias
previous_symbol = tf.stop_gradient(tf.argmax(previous, 1))
return tf.nn.embedding_lookup(embedding, previous_symbol)
with tf.variable_scope("rnn", reuse=tf.AUTO_REUSE):
outputs, last_state = legacy_seq2seq.rnn_decoder(inputs, self.initial_state, self.cell, loop_function=loop if not training else None)
output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_layer_size])
self.logits = tf.matmul(output, softmax_layer) + softmax_bias
self.probabilities = tf.nn.softmax(self.logits)
loss = legacy_seq2seq.sequence_loss_by_example([self.logits], [tf.reshape(self.targets, [-1])], [tf.ones([batch_size * sequence_length])])
with tf.name_scope("cost"):
self.cost = tf.reduce_sum(loss) / batch_size / sequence_length
self.final_state = last_state
self.learning_rate = tf.Variable(0.0, trainable=False)
trainable_vars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, trainable_vars), gradient_clip)
with tf.variable_scope("optimizer", reuse=tf.AUTO_REUSE):
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(zip(grads, trainable_vars))
tf.summary.histogram("logits", self.logits)
tf.summary.histogram("probabilitiess", self.probabilities)
tf.summary.histogram("loss", loss)
tf.summary.scalar("cost", self.cost)
tf.summary.scalar("learning_rate", self.learning_rate)
def sample(self, sess, chars, vocabulary, length):
state = sess.run(self.cell.zero_state(1, tf.float32))
text = ""
char = chars[0]
for _ in range(length):
x = np.zeros((1, 1))
x[0, 0] = vocabulary[char]
feed = {self.input_data: x, self.initial_state: state}
[probabilities, state] = sess.run([self.probabilities, self.final_state], feed)
probability = probabilities[0]
total_sum = np.cumsum(probability)
sum = np.sum(probability)
sample = int(np.searchsorted(total_sum, np.random.rand(1) * sum))
predicted = chars[sample]
text += predicted
char = predicted
return text
|
the-stack_106_20945
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from collections import namedtuple
from cnn import CNN
hps = namedtuple('hps', ['modelname',
'training_set_dir',
'test_set_dir',
'optimizer',
'loss_function',
'n_epoch',
'dropout_rate',
'steps_per_epoch',
'validation_steps',
'input_shape'])
def hps_set():
return hps(modelname = 'covid_pred',
training_set_dir = 'datasets/covid_splitted/train',
test_set_dir = 'datasets/covid_splitted/val',
optimizer = 'adam',
loss_function = 'binary_crossentropy',
n_epoch = 25,
dropout_rate = 0.2,
steps_per_epoch = 10800,
validation_steps = 2700,
input_shape = (64,64,3))
hps = hps_set()
predictor = CNN(hps)
predictor.build_model(hps)
predictor.train_model()
model = tf.keras.models.load_model("CNN_covid_pred.model")
#instance = predictor.reshape_instance(
# data_dir = 'datasets/covid_test/negative/nCT122.jpg')
#result = model.predict(instance)
#if result[0][0] == 1:
# prediction = 'positive'
#else:
# prediction = 'negative'
|
the-stack_106_20946
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'format14.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the center across format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
center = workbook.add_format()
center.set_center_across()
worksheet.write('A1', 'foo', center)
workbook.close()
self.assertExcelEqual()
def test_create_file_2(self):
"""Test the center across format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
center = workbook.add_format({"center_across": True})
worksheet.write('A1', 'foo', center)
workbook.close()
self.assertExcelEqual()
|
the-stack_106_20949
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.script
# Libre Office Version: 7.3
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME, UNO_NONE
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('AllListener', 'Helper', 'ListenerType', 'AddListenerParam', 'EventMethod')
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.script.EventListener'
struct = uno.getClass(type_name)
struct.__ooo_ns__ = 'com.sun.star.script'
struct.__ooo_full_ns__= type_name
struct.__ooo_type_name__ = 'struct'
orig_init = struct.__init__
struct.__init__ = init
return struct
EventListener = _get_class()
else:
from ...lo.script.event_listener import EventListener as EventListener
__all__ = ['EventListener']
|
the-stack_106_20951
|
"""Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# Shangwu Yao <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
def _check_zero_division(zero_division):
if isinstance(zero_division, str) and zero_division == "warn":
return
elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:
return
raise ValueError('Got zero_division={0}.'
' Must be one of ["warn", 0, 1]'.format(zero_division))
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = {type_true, type_pred}
if y_type == {"binary", "multiclass"}:
y_type = {"multiclass"}
if len(y_type) > 1:
raise ValueError("Classification metrics can't handle a mix of {0} "
"and {1} targets".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
unique_values = np.union1d(y_true, y_pred)
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the fraction of correctly
classified samples (float), else returns the number of correctly
classified samples (int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_score`` function.
Examples
--------
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` and
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
C : ndarray of shape (n_classes, n_classes)
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
(Wikipedia and other references may use a different
convention for axes)
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
n_labels = labels.size
label_to_ind = {y: x for x, y in enumerate(labels)}
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
# Choose the accumulator dtype to always have high precision
if sample_weight.dtype.kind in {'i', 'u', 'b'}:
dtype = np.int64
else:
dtype = np.float64
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels), dtype=dtype,
).toarray()
return CM
def multilabel_confusion_matrix(y_true, y_pred, sample_weight=None,
labels=None, samplewise=False):
"""Compute a confusion matrix for each class or sample
.. versionadded:: 0.21
Compute class-wise (default) or sample-wise (samplewise=True) multilabel
confusion matrix to evaluate the accuracy of a classification, and output
confusion matrices for each class or sample.
In multilabel confusion matrix :math:`MCM`, the count of true negatives
is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`,
true positives is :math:`MCM_{:,1,1}` and false positives is
:math:`MCM_{:,0,1}`.
Multiclass data will be treated as if binarized under a one-vs-rest
transformation. Returned confusion matrices will be in the order of
sorted unique labels in the union of (y_true, y_pred).
Read more in the :ref:`User Guide <multilabel_confusion_matrix>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
of shape (n_samples, n_outputs) or (n_samples,)
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
of shape (n_samples, n_outputs) or (n_samples,)
Estimated targets as returned by a classifier
sample_weight : array-like of shape (n_samples,), default=None
Sample weights
labels : array-like
A list of classes or column indices to select some (or to force
inclusion of classes absent from the data)
samplewise : bool, default=False
In the multilabel case, this calculates a confusion matrix per sample
Returns
-------
multi_confusion : array, shape (n_outputs, 2, 2)
A 2x2 confusion matrix corresponding to each output in the input.
When calculating class-wise multi_confusion (default), then
n_outputs = n_labels; when calculating sample-wise multi_confusion
(samplewise=True), n_outputs = n_samples. If ``labels`` is defined,
the results will be returned in the order specified in ``labels``,
otherwise the results will be returned in sorted order by default.
See also
--------
confusion_matrix
Notes
-----
The multilabel_confusion_matrix calculates class-wise or sample-wise
multilabel confusion matrices, and in multiclass tasks, labels are
binarized under a one-vs-rest way; while confusion_matrix calculates
one confusion matrix for confusion between every two classes.
Examples
--------
Multilabel-indicator case:
>>> import numpy as np
>>> from sklearn.metrics import multilabel_confusion_matrix
>>> y_true = np.array([[1, 0, 1],
... [0, 1, 0]])
>>> y_pred = np.array([[1, 0, 0],
... [0, 1, 1]])
>>> multilabel_confusion_matrix(y_true, y_pred)
array([[[1, 0],
[0, 1]],
<BLANKLINE>
[[1, 0],
[0, 1]],
<BLANKLINE>
[[0, 1],
[1, 0]]])
Multiclass case:
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> multilabel_confusion_matrix(y_true, y_pred,
... labels=["ant", "bird", "cat"])
array([[[3, 1],
[0, 2]],
<BLANKLINE>
[[5, 0],
[1, 0]],
<BLANKLINE>
[[2, 1],
[1, 2]]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in ("binary", "multiclass", "multilabel-indicator"):
raise ValueError("%s is not supported" % y_type)
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
if y_true.ndim == 1:
if samplewise:
raise ValueError("Samplewise metrics are not available outside of "
"multilabel classification.")
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
else:
sum_axis = 1 if samplewise else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.array_equal(labels, present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels) for '
'multilabel targets. '
'Got %d < 0' % np.min(labels))
if n_labels is not None:
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
fp = pred_sum - tp_sum
fn = true_sum - tp_sum
tp = tp_sum
if sample_weight is not None and samplewise:
sample_weight = np.array(sample_weight)
tp = np.array(tp)
fp = np.array(fp)
fn = np.array(fn)
tn = sample_weight * y_true.shape[1] - tp - fp - fn
elif sample_weight is not None:
tn = sum(sample_weight) - tp - fp - fn
elif samplewise:
tn = y_true.shape[1] - tp - fp - fn
else:
tn = y_true.shape[0] - tp - fp - fn
return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None):
r"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
Weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<https://www.mitpressjournals.org/doi/pdf/10.1162/coli.07-034-R2>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
.. deprecated:: 0.21
This is deprecated to be removed in 0.23, since its handling of
binary and multiclass inputs was broken. `jaccard_score` has an API
that is consistent with precision_score, f_score, etc.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
"""
warnings.warn('jaccard_similarity_score has been deprecated and replaced '
'with jaccard_score. It will be removed in version 0.23. '
'This implementation has surprising behavior for binary '
'and multiclass classification tasks.',
FutureWarning)
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def jaccard_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float (if average is not None) or array of floats, shape =\
[n_unique_labels]
See also
--------
accuracy_score, f_score, multilabel_confusion_matrix
Notes
-----
:func:`jaccard_score` may be a poor metric if there are no
positives for some samples or classes. Jaccard is undefined if there are
no true or predicted labels, and our implementation will return a score
of 0 with a warning.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_score
>>> y_true = np.array([[0, 1, 1],
... [1, 1, 0]])
>>> y_pred = np.array([[1, 1, 1],
... [1, 0, 0]])
In the binary case:
>>> jaccard_score(y_true[0], y_pred[0])
0.6666...
In the multilabel case:
>>> jaccard_score(y_true, y_pred, average='samples')
0.5833...
>>> jaccard_score(y_true, y_pred, average='macro')
0.6666...
>>> jaccard_score(y_true, y_pred, average=None)
array([0.5, 0.5, 1. ])
In the multiclass case:
>>> y_pred = [0, 2, 1, 2]
>>> y_true = [0, 1, 2, 2]
>>> jaccard_score(y_true, y_pred, average=None)
array([1. , 0. , 0.33...])
"""
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
numerator = MCM[:, 1, 1]
denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]
if average == 'micro':
numerator = np.array([numerator.sum()])
denominator = np.array([denominator.sum()])
jaccard = _prf_divide(numerator, denominator, 'jaccard',
'true or predicted', average, ('jaccard',))
if average is None:
return jaccard
if average == 'weighted':
weights = MCM[:, 1, 0] + MCM[:, 1, 1]
if not np.any(weights):
# numerator is 0, and warning should have already been issued
weights = None
elif average == 'samples' and sample_weight is not None:
weights = sample_weight
else:
weights = None
return np.average(jaccard, weights=weights)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Binary and multiclass labels are supported. Only in the binary case does
this relate to information about true and false positives and negatives.
See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<https://doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
.. [3] `Gorodkin, (2004). Comparing two K-category assignments by a
K-category correlation coefficient
<https://www.sciencedirect.com/science/article/pii/S1476927104000799>`_
.. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN
Error Measures in MultiClass Prediction
<https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041882>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred)
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if y_type not in {"binary", "multiclass"}:
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
t_sum = C.sum(axis=1, dtype=np.float64)
p_sum = C.sum(axis=0, dtype=np.float64)
n_correct = np.trace(C, dtype=np.float64)
n_samples = p_sum.sum()
cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None, zero_division="warn"):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the average of
the F1 score of each class with weighting depending on the ``average``
parameter.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division, i.e. when all
predictions and labels are negative. If set to "warn", this acts as 0,
but warnings are also raised.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
See also
--------
fbeta_score, precision_recall_fscore_support, jaccard_score,
multilabel_confusion_matrix
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro')
0.26...
>>> f1_score(y_true, y_pred, average='micro')
0.33...
>>> f1_score(y_true, y_pred, average='weighted')
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> f1_score(y_true, y_pred, zero_division=1)
1.0...
Notes
-----
When ``true positive + false positive == 0``, precision is undefined;
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight,
zero_division=zero_division)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None, zero_division="warn"):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of recall in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> +inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Determines the weight of recall in the combined score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division, i.e. when all
predictions and labels are negative. If set to "warn", this acts as 0,
but warnings are also raised.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
array([0.71..., 0. , 0. ])
Notes
-----
When ``true positive + false positive == 0`` or
``true positive + false negative == 0``, f-score returns 0 and raises
``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight,
zero_division=zero_division)
return f
def _prf_divide(numerator, denominator, metric,
modifier, average, warn_for, zero_division="warn"):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements equal to
0 or 1 (according to ``zero_division``). Plus, if
``zero_division != "warn"`` raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid infs/nans
result = numerator / denominator
if not np.any(mask):
return result
# if ``zero_division=1``, set those with denominator == 0 equal to 1
result[mask] = 0.0 if zero_division in ["warn", 0] else 1.0
# the user will be removing warnings if zero_division is set to something
# different than its default value. If we are computing only f-score
# the warning will be raised only if precision and recall are ill-defined
if zero_division != "warn" or metric not in warn_for:
return result
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples. Use ``zero_division`` parameter to
# control this behavior."
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
_warn_prf(average, modifier, msg_start, len(result))
return result
def _warn_prf(average, modifier, msg_start, result_size):
axis0, axis1 = 'sample', 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s. Use `zero_division` parameter to control'
' this behavior.'.format(msg_start, modifier, axis0))
if result_size == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):
"""Validation associated with set-wise metrics
Returns identified labels
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) >= 2:
raise ValueError("pos_label=%r is not a valid label: "
"%r" % (pos_label, present_labels))
labels = [pos_label]
else:
average_options = list(average_options)
if y_type == 'multiclass':
average_options.remove('samples')
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting, one of %r."
% (y_type, average_options))
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
return labels
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None,
zero_division="warn"):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
Notes
-----
When ``true positive + false positive == 0``, precision is undefined;
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
"""
_check_zero_division(zero_division)
if beta < 0:
raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == 'samples'
MCM = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
labels=labels, samplewise=samplewise)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(tp_sum, pred_sum, 'precision',
'predicted', average, warn_for, zero_division)
recall = _prf_divide(tp_sum, true_sum, 'recall',
'true', average, warn_for, zero_division)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == "warn" and ("f-score",) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(
average, "true nor predicted", 'F-score is', len(true_sum)
)
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
zero_division_value = 0.0 if zero_division in ["warn", 0] else 1.0
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
return (zero_division_value if pred_sum.sum() == 0 else 0,
zero_division_value,
zero_division_value if pred_sum.sum() == 0 else 0,
None)
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None,
zero_division="warn"):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro')
0.22...
>>> precision_score(y_true, y_pred, average='micro')
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
0.22...
>>> precision_score(y_true, y_pred, average=None)
array([0.66..., 0. , 0. ])
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> precision_score(y_true, y_pred, average=None)
array([0.33..., 0. , 0. ])
>>> precision_score(y_true, y_pred, average=None, zero_division=1)
array([0.33..., 1. , 1. ])
Notes
-----
When ``true positive + false positive == 0``, precision returns 0 and
raises ``UndefinedMetricWarning``. This behavior can be
modified with ``zero_division``.
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight,
zero_division=zero_division)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None, zero_division="warn"):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
See also
--------
precision_recall_fscore_support, balanced_accuracy_score,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro')
0.33...
>>> recall_score(y_true, y_pred, average='micro')
0.33...
>>> recall_score(y_true, y_pred, average='weighted')
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([1., 0., 0.])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> recall_score(y_true, y_pred, average=None)
array([0.5, 0. , 0. ])
>>> recall_score(y_true, y_pred, average=None, zero_division=1)
array([0.5, 1. , 1. ])
Notes
-----
When ``true positive + false negative == 0``, recall returns 0 and raises
``UndefinedMetricWarning``. This behavior can be modified with
``zero_division``.
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight,
zero_division=zero_division)
return r
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
"""Compute the balanced accuracy
The balanced accuracy in binary and multiclass classification problems to
deal with imbalanced datasets. It is defined as the average of recall
obtained on each class.
The best value is 1 and the worst value is 0 when ``adjusted=False``.
Read more in the :ref:`User Guide <balanced_accuracy_score>`.
Parameters
----------
y_true : 1d array-like
Ground truth (correct) target values.
y_pred : 1d array-like
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
adjusted : bool, default=False
When true, the result is adjusted for chance, so that random
performance would score 0, and perfect performance scores 1.
Returns
-------
balanced_accuracy : float
See also
--------
recall_score, roc_auc_score
Notes
-----
Some literature promotes alternative definitions of balanced accuracy. Our
definition is equivalent to :func:`accuracy_score` with class-balanced
sample weights, and shares desirable properties with the binary case.
See the :ref:`User Guide <balanced_accuracy_score>`.
References
----------
.. [1] Brodersen, K.H.; Ong, C.S.; Stephan, K.E.; Buhmann, J.M. (2010).
The balanced accuracy and its posterior distribution.
Proceedings of the 20th International Conference on Pattern
Recognition, 3121-24.
.. [2] John. D. Kelleher, Brian Mac Namee, Aoife D'Arcy, (2015).
`Fundamentals of Machine Learning for Predictive Data Analytics:
Algorithms, Worked Examples, and Case Studies
<https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_.
Examples
--------
>>> from sklearn.metrics import balanced_accuracy_score
>>> y_true = [0, 1, 0, 0, 1, 0]
>>> y_pred = [0, 1, 0, 0, 0, 1]
>>> balanced_accuracy_score(y_true, y_pred)
0.625
"""
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2, output_dict=False,
zero_division="warn"):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool (default = False)
If True, return output as dict
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
report : string / dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), sample average (only for multilabel classification) and
micro average (averaging the total true positives, false negatives and
false positives) it is only shown for multi-label or multi-class
with a subset of classes because it is accuracy otherwise.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See also
--------
precision_recall_fscore_support, confusion_matrix,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
labels_given = True
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
# labelled micro average
micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and
(not labels_given or
(set(labels) == set(unique_labels(y_true, y_pred)))))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
zero_division=zero_division)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers,
[i.item() for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
# compute all applicable averages
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true, y_pred, labels=labels,
average=average, sample_weight=sample_weight)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(
zip(headers, [i.item() for i in avg]))
else:
if line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + \
' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \
' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '',
*avg[2:], width=width,
digits=digits)
else:
report += row_fmt.format(line_heading, *avg,
width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default='deprecated')
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
.. deprecated:: 0.21
This parameter ``labels`` is deprecated in version 0.21 and will
be removed in version 0.23. Hamming loss uses ``y_true.shape[1]``
for the number of labels when y_true is binary label indicators,
so it is unnecessary for the user to specify.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.18
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss corresponds to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function, when `normalize` parameter is set to
True.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does not entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes only the
individual labels.
The Hamming loss is upperbounded by the subset zero-one loss, when
`normalize` parameter is set to True. It is always between 0 and 1,
lower being better.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> import numpy as np
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
check_consistent_length(y_true, y_pred, sample_weight)
if labels is not None:
warnings.warn("The labels parameter is unused. It was"
" deprecated in version 0.21 and"
" will be removed in version 0.23",
FutureWarning)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * y_true.shape[1] * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(random_state=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision)
0.30...
In the multiclass case:
>>> import numpy as np
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC()
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels)
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
np.clip(losses, 0, None, out=losses)
return np.average(losses, weights=sample_weight)
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1). The Brier loss is composed of refinement loss and
calibration loss.
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
pos_label : int or str, default=None
Label of the positive class.
Defaults to the greater label unless y_true is all 0 or all -1
in which case pos_label defaults to 1.
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob)
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0)
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, pos_label="ham")
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
check_consistent_length(y_true, y_prob, sample_weight)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Labels in y_true: %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
# if pos_label=None, when y_true is in {-1, 1} or {0, 1},
# pos_label is set to 1 (consistent with precision_recall_curve/roc_curve),
# otherwise pos_label is set to the greater label
# (different from precision_recall_curve/roc_curve,
# the purpose is to keep backward compatibility).
if pos_label is None:
if (np.array_equal(labels, [0]) or
np.array_equal(labels, [-1])):
pos_label = 1
else:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
the-stack_106_20952
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import subprocess
import sys
import time
import pytest
import requests
BASE_URL = "http://0.0.0.0:8080/"
PING_URL = BASE_URL + "ping"
INVOCATION_URL = BASE_URL + "models/{}/invoke"
MODELS_URL = BASE_URL + "models"
DELETE_MODEL_URL = BASE_URL + "models/{}"
@pytest.fixture(scope="module", autouse=True)
def container():
try:
command = (
"docker run --name sagemaker-inference-toolkit-test -p 8080:8080 "
"-e SAGEMAKER_MULTI_MODEL=true "
"sagemaker-inference-toolkit-test:mxnet serve"
)
proc = subprocess.Popen(command.split(), stdout=sys.stdout, stderr=subprocess.STDOUT)
attempts = 0
while attempts < 5:
time.sleep(3)
try:
requests.get(PING_URL)
break
except: # noqa: E722
attempts += 1
pass
yield proc.pid
finally:
subprocess.check_call("docker rm -f sagemaker-inference-toolkit-test".split())
def make_list_model_request():
response = requests.get(MODELS_URL)
return response.status_code, json.loads(response.content.decode("utf-8"))
def make_load_model_request(data, content_type="application/json"):
headers = {"Content-Type": content_type}
response = requests.post(MODELS_URL, data=data, headers=headers)
return response.status_code, json.loads(response.content.decode("utf-8"))
def make_unload_model_request(model_name):
response = requests.delete(DELETE_MODEL_URL.format(model_name))
return response.status_code, json.loads(response.content.decode("utf-8"))
def make_invocation_request(model_name, data, content_type="application/x-image"):
headers = {"Content-Type": content_type}
response = requests.post(INVOCATION_URL.format(model_name), data=data, headers=headers)
return response.status_code, json.loads(response.content.decode("utf-8"))
def test_ping():
res = requests.get(PING_URL)
assert res.status_code == 200
def test_list_models_empty():
code, models = make_list_model_request()
assert code == 200
assert models["models"] == []
def test_load_models():
data1 = {"model_name": "resnet_152", "url": "/opt/ml/models/resnet_152/model"}
code1, content1 = make_load_model_request(data=json.dumps(data1))
assert code1 == 200
assert content1["status"] == "Workers scaled"
code2, content2 = make_list_model_request()
assert code2 == 200
assert content2["models"] == [
{"modelName": "resnet_152", "modelUrl": "/opt/ml/models/resnet_152/model"}
]
data2 = {"model_name": "resnet_18", "url": "/opt/ml/models/resnet_18/model"}
code3, content3 = make_load_model_request(data=json.dumps(data2))
assert code3 == 200
assert content3["status"] == "Workers scaled"
code4, content4 = make_list_model_request()
assert code4 == 200
assert content4["models"] == [
{"modelName": "resnet_152", "modelUrl": "/opt/ml/models/resnet_152/model"},
{"modelName": "resnet_18", "modelUrl": "/opt/ml/models/resnet_18/model"},
]
def test_unload_models():
code1, content1 = make_unload_model_request("resnet_152")
assert code1 == 200
assert content1["status"] == 'Model "resnet_152" unregistered'
code2, content2 = make_list_model_request()
assert code2 == 200
assert content2["models"] == [
{"modelName": "resnet_18", "modelUrl": "/opt/ml/models/resnet_18/model"}
]
def test_load_non_existing_model():
data1 = {"model_name": "banana", "url": "/banana"}
code1, content1 = make_load_model_request(data=json.dumps(data1))
assert code1 == 404
def test_unload_non_existing_model():
# resnet_152 is already unloaded
code1, content1 = make_unload_model_request("resnet_152")
assert code1 == 404
def test_load_model_multiple_times():
# resnet_18 is already loaded
data = {"model_name": "resnet_18", "url": "/opt/ml/models/resnet_18/model"}
code3, content3 = make_load_model_request(data=json.dumps(data))
assert code3 == 409
|
the-stack_106_20955
|
"""
Hyperspy dataset converter to sidpy
part of SciFiReader, a pycroscopy package
author: Gerd Duscher, UTK
First Version 11/19/2021
"""
import sidpy
import numpy as np
try:
import hyperspy.api as hs
except ModuleNotFoundError:
hs = None
def convert_hyperspy(s):
"""
imports a hyperspy signal object into sidpy.Dataset
Parameters
----------
s: hyperspy dataset
Return
------
dataset: sidpy.Dataset
"""
if not hs:
raise ModuleNotFoundError("Hyperspy is not installed")
if not isinstance(s, (hs.signals.Signal1D, hs.signals.Signal2D)):
raise TypeError('This is not a hyperspy signal object')
dataset = sidpy.Dataset.from_array(s, name=s.metadata.General.title)
# Add dimension info
axes = s.axes_manager.as_dictionary()
if isinstance(s, hs.signals.Signal1D):
if s.data.ndim < 2:
dataset.data_type = 'spectrum'
elif s.data.ndim > 1:
if s.data.ndim == 2:
dataset = sidpy.Dataset.from_array(np.expand_dims(s, 2), name=s.metadata.General.title)
dataset.set_dimension(2, sidpy.Dimension([0], name='y', units='pixel',
quantity='distance', dimension_type='spatial'))
dataset.data_type = sidpy.DataType.SPECTRAL_IMAGE
for key, axis in axes.items():
if axis['navigate']:
dimension_type = 'spatial'
else:
dimension_type = 'spectral'
dim_array = np.arange(axis['size']) * axis['scale'] + axis['offset']
if axis['units'] == '':
axis['units'] = 'frame'
dataset.set_dimension(int(key[-1]), sidpy.Dimension(dim_array, name=axis['name'], units=axis['units'],
quantity=axis['name'], dimension_type=dimension_type))
elif isinstance(s, hs.signals.Signal2D):
if s.data.ndim < 4:
if s.data.ndim == 2:
dataset.data_type = 'image'
elif s.data.ndim == 3:
dataset.data_type = 'image_stack'
for key, axis in axes.items():
if axis['navigate']:
dimension_type = 'temporal'
else:
dimension_type = 'spatial'
dim_array = np.arange(axis['size']) * axis['scale'] + axis['offset']
if axis['units'] == '' or not isinstance(axis['units'], str):
axis['units'] = 'pixel'
if not isinstance(axis['name'], str):
axis['name'] = str(key)
dataset.set_dimension(int(key[-1]), sidpy.Dimension(dim_array, name=axis['name'], units=axis['units'],
quantity=axis['name'],
dimension_type=dimension_type))
elif s.data.ndim == 4:
dataset.data_type = 'IMAGE_4D'
for key, axis in axes.items():
if axis['navigate']:
dimension_type = 'spatial'
else:
dimension_type = 'reciprocal'
dim_array = np.arange(axis['size']) * axis['scale'] + axis['offset']
if axis['units'] == '' or not isinstance(axis['units'], str):
axis['units'] = 'pixel'
if not isinstance(axis['name'], str):
axis['name'] = str(key)
dataset.set_dimension(int(key[-1]), sidpy.Dimension(dim_array, name=axis['name'], units=axis['units'],
quantity=axis['name'],
dimension_type=dimension_type))
dataset.metadata = dict(s.metadata)
dataset.original_metadata = dict(s.original_metadata)
dataset.title = dataset.metadata['General']['title']
if 'quantity' in dataset.metadata['Signal']:
dataset.units = dataset.metadata['Signal']['quantity'].split('(')[-1][:-1]
dataset.quantity = dataset.metadata['Signal']['quantity'].split('(')[0]
dataset.source = 'hyperspy'
return dataset
|
the-stack_106_20956
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# diffpy.structure documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 27 11:16:48 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import time
sys.path.insert(0, os.path.abspath('../../..'))
# abbreviations
ab_authors = 'Pavol Juhás, Christopher L. Farrow, Simon J.L. Billinge group'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
# 'sphinx.ext.coverage',
# 'sphinx.ext.doctest',
'm2r',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'diffpy.structure'
copyright = '%Y, Brookhaven National Laboratory'
author = ab_authors
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from setup import versiondata
fullversion = versiondata.get('DEFAULT', 'version')
# The short X.Y version.
version = ''.join(fullversion.split('.post')[:1])
# The full version, including alpha/beta/rc tags.
release = fullversion
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
today_seconds = versiondata.getint('DEFAULT', 'timestamp')
today = time.strftime('%B %d, %Y', time.localtime(today_seconds))
year = today.split()[-1]
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# substitute YEAR in the copyright string
copyright = copyright.replace('%Y', year)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['diffpy.structure']
# Display all warnings for missing links.
nitpicky = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_py3doc_enhanced_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapsiblesidebar' : 'true',
'navigation_with_keys' : 'true',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'diffpystructuredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'diffpy.structure_manual.tex', 'diffpy.structure Documentation',
ab_authors, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'diffpy.structure', 'diffpy.structure Documentation',
ab_authors.split(', '), 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'diffpy.structure', 'diffpy.structure Documentation',
ab_authors, 'diffpy.structure', 'One line description of project.',
'Miscellaneous'),
]
# -- intersphinx configuration --------------------------------------------
intersphinx_mapping = {
'python' : ('https://docs.python.org/3.7', None),
'numpy' : ('https://docs.scipy.org/doc/numpy', None),
}
# -- autodoc configuration ------------------------------------------------
# See http://www.sphinx-doc.org/en/stable/ext/autodoc.html
autodoc_member_order = 'bysource'
# -- napoleon configuration -----------------------------------------------
# See https://sphinxcontrib-napoleon.readthedocs.io.
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_use_param = False
napoleon_use_rtype = False
|
the-stack_106_20957
|
from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int(choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you doing to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
|
the-stack_106_20958
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re, ast
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in cai/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('cai/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='cai',
version=version,
description='Este modulo es donde se crearan los CAIs para los distintios tipos de documentos que maneja la empresa.',
author='Frappe',
author_email='[email protected]',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
the-stack_106_20959
|
import logging
from tornado.options import options
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
import tornado.web
import tornado.escape
import csv
import re
from oauth.decorator import CheckAuthorized
from pretty_json import PrettyJsonRequestHandler
RESERVED_KEYS = ["output", "output_filename", "sort_by", "sort_direction", "_", "_exclude", "_include"]
RESERVED_COLLECTIONS = ["google_oauth_tokens", "private_userinfo", "admin", "local", "collection_fields"]
class MongoDbQueryHandler(PrettyJsonRequestHandler):
datastores_config = {}
def initialize(self):
self._datastore_map = self.datastores
@CheckAuthorized
def get(self, *uri_path):
try:
if options.verbose: logging.info("GET [uri=%s] [arguments=%s]" % (self.request.uri, self.request.arguments))
sub_path = self.request.path.replace("/datastores", "")
uri_parts = sub_path.split("/")
if options.verbose: logging.info("GET [sub_path=%s] [len=%d]" % (sub_path, len(uri_parts)))
if len(uri_parts) == 1:
self.list_datastores()
self.set_status(200)
return
datastore_id = uri_parts[1]
if not datastore_id in self._datastore_map.keys():
if options.verbose: logging.info("unknown datastore [%s]" % datastore_id)
raise tornado.web.HTTPError(404, ("datastore %s not found" % datastore_id))
if len(uri_parts) == 2:
self.list_databases(datastore_id)
self.set_status(200)
return
db_name = uri_parts[2]
if len(uri_parts) == 3:
self.list_collections(datastore_id, db_name)
self.set_status(200)
return
collection_id = uri_parts[3]
datastore = self._datastore_map[datastore_id]
collection = self.open_collection(datastore_id, db_name, collection_id)
if len(uri_parts) == 4:
datatypes = self.get_datatypes(datastore_id, db_name, collection_id)
query = self.transpose_query_arguments(db_name, datastore, datatypes)
sort_fld = self.get_argument("sort_by", None)
sort_dir = int(self.get_argument("sort_direction", "1"))
json_items = self.query_collection(collection, query, sort_fld, sort_dir)
if self.get_argument("output", "json") == "tsv":
headers = self.collection_fields(datastore_id, db_name, collection)
self.write_tsv(json_items, headers)
self.set_status(200)
return
self.write({"items": json_items, "kind": "addama#collection" })
self.set_status(200)
return
last_part = uri_parts[4]
if last_part == "fields":
flds = self.collection_fields(datastore_id, db_name, collection)
self.write({"items": flds, "kind": "addama#fields" })
self.set_status(200)
return
if last_part == "search":
search_field = uri_parts[5]
search_value = self.get_argument("term")
query = {}
query[search_field] = { "$regex": re.compile(search_value, re.IGNORECASE) }
json_items = self.query_collection(collection, query)
self.write({"items": json_items, "kind": "addama#searchResults" })
self.set_status(200)
return
raise tornado.web.HTTPError(404, ("%s was not found" % self.request.path))
except ConnectionFailure as cfe:
raise tornado.web.HTTPError(500, str(cfe))
def post(self, *uri_path):
try:
if options.verbose: logging.info("POST [uri=%s] [body=%s]" % (self.request.uri, self.request.body))
sub_path = self.request.path.replace("/datastores", "")
uri_parts = sub_path.split("/")
if options.verbose: logging.info("POST [sub_path=%s] [len=%d]" % (sub_path, len(uri_parts)))
if len(uri_parts) == 1:
self.list_datastores()
self.set_status(200)
return
datastore_id = uri_parts[1]
if not datastore_id in self._datastore_map.keys():
if options.verbose: logging.info("unknown datastore [%s]" % datastore_id)
raise tornado.web.HTTPError(404)
if len(uri_parts) == 2:
self.list_databases(datastore_id)
self.set_status(200)
return
db_name = uri_parts[2]
if len(uri_parts) == 3:
self.list_collections(datastore_id, db_name)
self.set_status(200)
return
collection_id = uri_parts[3]
# datastore = self._datastore_map[datastore_id]
collection = self.open_collection(datastore_id, db_name, collection_id)
if len(uri_parts) == 4:
datatypes = self.get_datatypes(datastore_id, db_name, collection_id)
query = tornado.escape.json_decode(self.request.body)
sort_fld = query.pop("sort_by", None)
sort_dir = query.pop("sort_direction", 1)
json_items = self.query_collection(collection, query, sort_fld, sort_dir)
if query.pop("output", "json") == "tsv":
self.write_tsv(json_items)
self.set_status(200)
return
self.write({"items": json_items})
self.set_status(200)
return
raise tornado.web.HTTPError(404)
except ConnectionFailure as cfe:
raise tornado.web.HTTPError(500, str(cfe))
def list_datastores(self):
if options.verbose: logging.info("list_datastores [%s]" % self.request.uri)
items = []
for datastore_id in self._datastore_map.keys():
items.append({ "id": datastore_id, "uri": self.request.path + "/" + datastore_id })
self.write({"items": items, "kind": "addama#datastores" })
def list_databases(self, datastore_id):
if options.verbose: logging.info("list_databases [%s] [%s]" % (self.request.uri, datastore_id))
mongo_uri = self._datastore_map[datastore_id].uri
if options.verbose: logging.info("list_databases [%s] [%s] [%s]" % (self.request.uri, datastore_id, mongo_uri))
mongoClient = MongoClient(mongo_uri)
items = []
for database_name in mongoClient.database_names():
if not database_name in RESERVED_COLLECTIONS:
items.append({ "id": database_name, "uri": self.request.path + "/" + database_name })
self.write({"items": items, "kind": "addama#databases" })
def list_collections(self, datastore_id, database_id):
if options.verbose: logging.info("list_collections [%s] [%s] [%s]" % (self.request.uri, datastore_id, database_id))
mongo_uri = self._datastore_map[datastore_id].uri
if options.verbose: logging.info("list_collections [%s] [%s] [%s] [%s]" % (self.request.uri, datastore_id, database_id, mongo_uri))
mongoClient = MongoClient(mongo_uri)
database = mongoClient[database_id]
items = []
for collection_name in database.collection_names(False):
if not collection_name in RESERVED_COLLECTIONS:
items.append({ "id": collection_name, "uri": self.request.path + "/" + collection_name })
self.write({"items": items, "kind": "addama#collections" })
def open_collection(self, datastore_id, db_name, collection_id, InternalUse=False):
if options.verbose: logging.info("open_collection [%s] [%s] [%s]" % (datastore_id, db_name, collection_id))
if not InternalUse and collection_id in RESERVED_COLLECTIONS: raise tornado.web.HTTPError(403, "This collection is reserved for internal use")
mongo_uri = self._datastore_map[datastore_id].uri
mongoClient = MongoClient(mongo_uri)
database = mongoClient[db_name]
return database[collection_id]
def collection_fields(self, datastore_id, db_name, collection):
if options.verbose: logging.info("collection_fields [%s,%s,%s]" % (datastore_id, db_name, collection.name))
c_fields = self.open_collection(datastore_id, db_name, "collection_fields", InternalUse=True)
c_out = c_fields.find({ "value": collection.name })
if c_out is None:
logging.warn("need to run mapreduce_collection_fields.js")
return []
fields = []
for field in c_out:
fields.append(field["_id"])
return fields
def query_collection(self, collection, query, sort_fld=None, sort_dir=1):
if options.verbose: logging.info("query_collection [%s] [%s] [%s] [%s]" % (collection.name, query, sort_fld, sort_dir))
json_items = []
query_limit = options.mongo_rows_limit
keyword_args = {}
projection = self.get_projection()
if (len(projection.keys()) > 0): keyword_args["fields"] = projection
if sort_fld:
for idx, item in enumerate(collection.find(query, **keyword_args).sort(sort_fld, sort_dir)):
if idx > query_limit: break
json_items.append(self.jsonable(item))
else:
for idx, item in enumerate(collection.find(query, **keyword_args)):
if idx > query_limit: break
json_items.append(self.jsonable(item))
return json_items
def get_projection(self):
projection = {}
if "_exclude" in self.request.arguments:
proj_exclude = self.get_arguments("_exclude", [])
for proj in proj_exclude: projection[proj] = False
elif "_include" in self.request.arguments:
proj_include = self.get_arguments("_include", [])
for proj in proj_include: projection[proj] = True
if options.verbose: logging.info("projection=%s" % projection)
return projection
def get_datatypes(self, datasource_id, db_name, collection_id):
c_dtypes = {}
if options.verbose: logging.info("get_datatypes(%s, %s, %s)" % (datasource_id, db_name, collection_id))
if not self.datastores_config is None and "datastores" in self.datastores_config:
c_datastores = self.datastores_config["datastores"]
if not c_datastores is None and datasource_id in c_datastores:
if db_name in c_datastores[datasource_id]:
c_db = c_datastores[datasource_id][db_name]
if not c_db is None and "datatypes" in c_db and collection_id in c_db["datatypes"]:
c_dtypes = c_db["datatypes"][collection_id]
if options.verbose: logging.info("get_datatypes(%s, %s, %s): %s" % (datasource_id, db_name, collection_id, str(c_dtypes)))
return c_dtypes
def transpose_query_arguments(self, db_name, datasource, datatypes={}):
# by default, queries are case-insensitive
normalize_fn = lambda x: re.compile("^" + x + "$", re.IGNORECASE)
if datasource.is_case_sensitive_database(db_name):
normalize_fn = lambda x: x
query = {}
args = self.request.arguments
for key in args.keys():
if not key in RESERVED_KEYS:
if len(args[key]) == 1:
if key in datatypes:
if datatypes[key] == "int":
query[key] = int(args[key][0])
elif datatypes[key] == "float":
query[key] = float(args[key][0])
else:
query[key] = normalize_fn(args[key][0])
else:
query[key] = normalize_fn(args[key][0])
else:
if key in datatypes:
if datatypes[key] == "int":
query[key] = {"$in": map(int, args[key])}
elif datatypes[key] == "float":
query[key] = {"$in": map(float, args[key])}
else:
query[key] = {"$in": map(normalize_fn, args[key])}
else:
query[key] = {"$in": map(normalize_fn, args[key])}
return query
def write_tsv(self, items, headers):
filename = self.get_argument("output_filename", "data_export.tsv")
attachment = "attachment; filename=\"%s\"" % filename
if options.verbose: logging.info("write_tsv [%s]" % attachment)
self.set_header("Content-Type", "text/tab-separated-values")
self.set_header("Content-Disposition", attachment)
tsvwriter = csv.writer(self, delimiter="\t")
projection = self.get_projection()
if len(items) > 0:
colheaders = headers
if "values" in colheaders: colheaders.remove("values")
newheaders = []
if len(projection.keys()) > 0:
for key in projection.keys():
val = projection[key]
logging.info("key,val=%s,%s" % (key, val))
if key in colheaders:
if val: newheaders.append(key)
if not val: colheaders.remove(str(key))
if len(newheaders) > 0: colheaders = newheaders
pivotkeys = []
if "values" in items[0]:
values_keys = items[0]["values"].keys()
for value_key in values_keys: pivotkeys.append(str(value_key))
combinedkeys = []
combinedkeys.extend(colheaders)
if pivotkeys: combinedkeys.extend(pivotkeys)
tsvwriter.writerow(combinedkeys)
for item in items:
vals = []
for colheader in colheaders:
if colheader in item:
vals.append(item[colheader])
else:
vals.append("")
if "values" in item:
item_values = item["values"]
for pivotkey in pivotkeys:
vals.append(item_values[pivotkey])
tsvwriter.writerow(vals)
|
the-stack_106_20960
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-"
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to build a component artifact."""
from __future__ import print_function
import json
import os
import re
import zipfile
from distutils.version import LooseVersion
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gs
from chromite.lib import osutils
logger = logging.getLogger(__name__)
COMPONENT_ZIP = 'files.zip'
MANIFEST_FILE_NAME = 'manifest.json'
MANIFEST_VERSION_FIELD = u'version'
MANIFEST_PACKAGE_VERSION_FIELD = u'package_version'
def GetParser():
parser = commandline.ArgumentParser(description=__doc__)
# Optional arguments:
parser.add_argument('--gsbucket', default=None, metavar='GS_BUCKET_URI',
help='Override the gsbucket field (Google Cloud Storage '
'bucket where component is uploaded to) in config '
'file.')
parser.add_argument('--upload', dest='upload', action='store_true',
default=False,
help='Upload to Omaha gsbucket.')
# Required arguments:
required = parser.add_argument_group('required arguments')
required.add_argument('--board', metavar='BOARD',
help='Board to build the component for.',
required=True)
required.add_argument('--config_path', metavar='CONFIG',
help='Path to the config file.', required=True)
required.add_argument('--platform', metavar='PLATFORM',
help='Name for the platform folder in Omaha.',
choices=['chromeos_arm32-archive',
'chromeos_intel64-archive',
'chromeos_arm32',
'chromeos_intel64'], required=True)
# Positional arguments:
parser.add_argument('component', metavar='COMPONENT',
help='The component to build (key inside the config '
'file).')
return parser
def ParseVersion(version_str):
"""Parse version string into a list with components.
Args:
version_str: (str) version string.
Returns:
[int]: a list with version components.
"""
pattern = re.compile("[0-9]+(\\.[0-9]+){2,3}")
m = pattern.match(version_str)
if m:
return [int(x) for x in m.group().split('.')]
return []
def CheckGsBucket(gsbucket):
"""Return list of folders in a gs bucket.
Args:
gsbucket: (str) gs bucket url.
Returns:
[str]: a list of folder paths.
"""
ctx = gs.GSContext()
dirs = ctx.LS(gsbucket)
return [x for x in dirs if x != gsbucket]
def GetCurrentVersion(paths, platform):
"""Find the current component version by iterating gsbucket root folder.
Args:
paths: ([str]) a list of folder paths strings.
platform: (str) the platform for which the component is being built
Returns:
str: current component version.
str: gs path for current component version.
"""
current_version = LooseVersion('0.0.0.0')
current_version_path = None
for version_path in paths:
if version_path[-1] != '/':
logger.fatal("version_path (%s) needs to end with '/'.", version_path)
continue
version = os.path.basename(version_path[:-1])
if len(ParseVersion(version)) < 3:
# Path does not contain a component version.
continue
v = LooseVersion(version)
if v > current_version:
# Skip the version if the path for the target platform does not exist.
ctx = gs.GSContext()
src = os.path.join(version_path, platform, COMPONENT_ZIP)
if not ctx.Exists(src):
continue
current_version = v
current_version_path = version_path
return str(current_version), current_version_path
def DecideVersion(version, current_version):
"""Decide the component version
Each version has release.major.minor[.bugfix] format.
If release.major.minor are equal, then use current_version as version and
increase bugfix by 1 (set to be 1 if bugfix is missing). Otherwise, use
version (package) as final version and set bugfix 1.
Args:
version: (str) package version
current_version: (str) current component version
Returns:
str: next component version.
"""
version = ParseVersion(version)
current_version = ParseVersion(current_version)
if (len(version) != 3 and len(version) != 4) or \
(len(current_version) != 3 and len(current_version) != 4):
logger.fatal('version is in wrong format.')
return None
if LooseVersion('.'.join([str(x) for x in version[0:3]])) < \
LooseVersion('.'.join([str(x) for x in current_version[0:3]])):
logger.fatal('component being built is outdated.')
return None
if version[0] == current_version[0] and version[1] == current_version[1] and \
version[2] == current_version[2]:
# Rev bug fix on top of current_version.
version = current_version
if len(version) < 4:
version.append(1)
else:
version[3] = version[3] + 1
else:
# Use package version.1 as next component version.
if len(version) < 4:
version.append(1)
else:
version[3] = 1
return '.'.join([str(x) for x in version])
def CheckValidMetadata(metadata):
"""Check if metadata in configuration is valid.
Args:
metadata: (str) metadata in component configs.
Returns:
bool: if metadata is valid.
"""
if not "files" in metadata or \
not "gsbucket" in metadata or \
not "pkgpath" in metadata or \
not "name" in metadata or \
not "manifest" in metadata:
cros_build_lib.Die('attribute is missing.')
return False
else:
return True
def CheckComponentFilesExistence(paths):
"""Check if paths exist.
Args:
paths: ([str]) a list of path.
Returns:
bool: true if all paths exists.
"""
for path in paths:
if not os.path.exists(path):
cros_build_lib.Die('component file is missing: %s', path)
return False
logger.info('File to be included to final component: %s', path)
return True
def AddDirectoryToZip(zip_file, dir_path):
"""Adds a directory to a zip file.
This will add the whole directory to the zip, rather than contents of the
directory. Empty (sub)directories will be ignored.
Args:
zip_file: (zipfile.ZipFile) the zip file to which to add the dir.
dir_path: (string) the directory to add to the zip file.
"""
# The directories parent path, used to calculate the target paths in the zip
# file, which should include the |dir_path| itself.
dir_parent = os.path.normpath(os.path.join(dir_path, os.pardir))
for current_dir, _subdirs, file_names in os.walk(dir_path):
for file_name in file_names:
file_path = os.path.normpath(os.path.join(current_dir, file_name))
zip_path = os.path.relpath(file_path, dir_parent)
zip_file.write(file_path, zip_path)
def UploadComponent(component_dir, gsbucket):
"""Upload a component.
Args:
component_dir: (str) location for generated component.
gsbucket: (str) gs bucket to upload.
"""
logger.info('upload %s to %s', component_dir, gsbucket)
ctx = gs.GSContext()
if cros_build_lib.BooleanPrompt(
prompt='Are you sure you want to upload component to official gs bucket',
default=False,
prolog='Once component is uploaded, it can not be modified again.'):
# Upload component to gs.
ctx.Copy(component_dir, gsbucket, recursive=True)
def CreateComponent(manifest_path, version, package_name, package_version,
platform, files, upload, gsbucket):
"""Create component zip file.
Args:
manifest_path: (str) path to raw manifest file.
version: (str) component version.
package_name: (str) the package name
package_version: (str) package version.
platform: (str) platform folder name on Omaha.
files: ([str]) paths for component files.
upload: (bool) whether to upload the generate component to Omaha.
gsbucket: (str) Omaha gsbucket path.
"""
if not os.path.exists(manifest_path):
cros_build_lib.Die('manifest file is missing: %s', manifest_path)
with open(manifest_path) as f:
# Construct final manifest file.
data = json.load(f)
data[MANIFEST_VERSION_FIELD] = version
data[MANIFEST_PACKAGE_VERSION_FIELD] = package_version
# Create final zip file of the component and store it to a temp folder.
with osutils.TempDir(prefix='component_') as tempdir:
component_folder = os.path.join(tempdir, data[MANIFEST_VERSION_FIELD],
platform)
os.makedirs(component_folder)
component_zipfile = os.path.join(component_folder, COMPONENT_ZIP)
zf = zipfile.ZipFile(component_zipfile, 'w', zipfile.ZIP_DEFLATED)
# Move component files into zip file.
for f in files:
if os.path.isdir(f):
AddDirectoryToZip(zf, f)
else:
zf.write(f, os.path.basename(f))
# Write manifest file into zip file.
zf.writestr(MANIFEST_FILE_NAME, json.dumps(data))
logger.info('component is generated at %s', zf.filename)
zf.close()
# Upload component to gs bucket.
if upload:
if '9999' in package_version:
cros_build_lib.Die('Cannot upload component while the %s package '
'is being worked on.', package_name)
UploadComponent(os.path.join(tempdir, data[MANIFEST_VERSION_FIELD]),
gsbucket)
def GetCurrentPackageVersion(current_version_path, platform):
"""Get package version of current component.
Args:
current_version_path: (str) path to current version component.
platform: (str) platform name in omaha.
Returns:
str: package version of current component.
"""
if current_version_path:
ctx = gs.GSContext()
src = os.path.join(current_version_path, platform, COMPONENT_ZIP)
if ctx.Exists(src):
with osutils.TempDir(prefix='component_') as tempdir:
ctx.Copy(src, tempdir)
cros_build_lib.RunCommand(
['unzip', '-o', '-d',
tempdir, os.path.join(tempdir, COMPONENT_ZIP)],
redirect_stdout=True, redirect_stderr=True)
with open(os.path.join(tempdir, MANIFEST_FILE_NAME)) as f:
manifest = json.load(f)
if MANIFEST_PACKAGE_VERSION_FIELD in manifest:
return manifest[MANIFEST_PACKAGE_VERSION_FIELD]
return '0.0.0.0'
def FixPackageVersion(version):
"""Fix version to the format of X.Y.Z-rN
Package name in ebuild is in the format of (X){1,3}-rN, we convert it
to X.Y.Z-rN by padding 0 to Z (and Y).
This function is added because a package like arc++ has version numbers
(X)-rN which is not consistent with the rest of the packages.
Args:
version: (str) version to format.
Returns:
str: fixed version.
Or None: if version is not fixable.
"""
pattern = re.compile('([0-9]+)(\\.[0-9]+)?(\\.[0-9]+)?(-r[0-9]+)?$')
m = pattern.match(version)
if m is None or m.group(1) is None:
logger.info('version %s is in wrong format.', version)
return None
version = m.group(1)
for i in range(2, 4):
version = (version + '.0') if m.group(i) is None else (version + m.group(i))
if m.group(4) is not None:
version += m.group(4)
return version
def GetPackageVersion(folder_name, package_name):
"""Get the version of the package.
It checks if the folder is for the package. If yes, return the version of the
package.
Args:
folder_name: (str) name of the folder.
package_name: (str) name of the package.
Returns:
str: fixed version.
"""
pattern = re.compile('(^[\\w-]*)-[0-9]+(\\.[0-9]+){0,2}(-r[0-9]+)?$')
m = pattern.match(folder_name)
if m is not None and m.group(1) == package_name:
return FixPackageVersion(folder_name[len(package_name)+1:])
return None
def BuildComponent(component_to_build, components, board, platform,
gsbucket_override=None, upload=False):
"""Build a component.
Args:
component_to_build: (str) component to build.
components: ([object]) a list of components.
board: (str) board to build the component on.
platform: (str) platform name in omaha.
gsbucket_override: (str) gsbucket value to override in components if not
None.
upload: (bool) True if uploading to Omaha; False if not uploading to Omaha.
"""
for component in components:
for pkg, metadata in component.iteritems():
if pkg == component_to_build:
if not CheckValidMetadata(metadata):
continue
if (metadata.get('valid_platforms') and
not platform in metadata['valid_platforms']):
cros_build_lib.Die('Invalid platform')
logger.info('build component:%s', pkg)
# Check if component files are built successfully.
files = [os.path.join(cros_build_lib.GetSysroot(), 'build', board, x) \
for x in metadata["files"]]
if not CheckComponentFilesExistence(files):
cros_build_lib.Die('component files are missing.')
# Check release versions on gs.
if gsbucket_override is not None:
gsbucket = gsbucket_override
else:
gsbucket = metadata['gsbucket']
logger.info('Use %s gsbucket for component.', gsbucket)
dirs = CheckGsBucket(gsbucket)
if len(dirs) == 0:
cros_build_lib.Die('gsbucket %s has no subfolders', gsbucket)
logger.info('Dirs in gsbucket:%s', dirs)
current_version, current_version_path = GetCurrentVersion(dirs,
platform)
logger.info('latest component version on Omaha gs: %s', current_version)
# Get package version of current component.
current_package_version = GetCurrentPackageVersion(current_version_path,
platform)
# Check component (gentoo package) version.
name = metadata["name"]
for f in os.listdir(os.path.join(cros_build_lib.GetSysroot(), 'build',
board, metadata["pkgpath"])):
package_version = GetPackageVersion(f, name)
if package_version is not None:
logger.info('current package version: %s', package_version)
logger.info('package version of current component: %s',
current_package_version)
version = DecideVersion(package_version, current_version)
logger.info('next component version on Omaha gs: %s', version)
manifest_path = os.path.join(cros_build_lib.GetSysroot(), 'build',
board, metadata["manifest"])
CreateComponent(manifest_path, version, name, package_version,
platform, files, upload, gsbucket)
return
cros_build_lib.Die('Package could not be found, component could not be'
'built.')
def GetComponentsToBuild(path):
"""Parse components from config file.
Args:
path: (str) file path to config file.
Returns:
Object: a json object of config file content.
"""
with open(path) as f:
return json.load(f)
def main(argv):
opts = GetParser().parse_args(argv)
BuildComponent(component_to_build=opts.component,
components=GetComponentsToBuild(opts.config_path),
board=opts.board,
platform=opts.platform,
gsbucket_override=opts.gsbucket,
upload=opts.upload)
if __name__ == '__main__':
commandline.ScriptWrapperMain(lambda _: main)
|
the-stack_106_20962
|
from __future__ import absolute_import, division, print_function
import os
import re
import string
import sys
import time
import unittest
from collections import defaultdict, namedtuple, OrderedDict
import numpy as np
import ray
import ray.test.test_functions as test_functions
import ray.test.test_utils
if sys.version_info >= (3, 0):
from importlib import reload
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different.".format(
obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
}
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3])
]
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = (
[{
obj: obj
} for obj in PRIMITIVE_OBJECTS
if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0:
obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS
class SerializationTest(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testRecursiveObjects(self):
ray.init(num_workers=0)
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
self.assertRaises(Exception, lambda: ray.put(obj))
def testPassingArgumentsByValue(self):
ray.init(num_workers=1)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
def testPassingArgumentsByValueOutOfTheBox(self):
ray.init(num_workers=1)
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
self.assertEqual(ray.get(f.remote(temp))(), 1)
self.assertEqual(ray.get(f.remote(lambda x: x + 1))(3), 4)
# Test sets.
self.assertEqual(ray.get(f.remote(set())), set())
s = set([1, (1, 2, "hi")])
self.assertEqual(ray.get(f.remote(s)), s)
# Test types.
self.assertEqual(ray.get(f.remote(int)), int)
self.assertEqual(ray.get(f.remote(float)), float)
self.assertEqual(ray.get(f.remote(str)), str)
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def testPuttingObjectThatClosesOverObjectID(self):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
ray.init(num_workers=0)
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
with self.assertRaises(ray.local_scheduler.common_error):
ray.put(f)
class WorkerTest(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testPythonWorkers(self):
# Test the codepath for starting workers from the Python script,
# instead of the local scheduler. This codepath is for debugging
# purposes only.
num_workers = 4
ray.worker._init(
num_workers=num_workers,
start_workers_from_local_scheduler=False,
start_ray_local=True)
@ray.remote
def f(x):
return x
values = ray.get([f.remote(1) for i in range(num_workers * 2)])
self.assertEqual(values, [1] * (num_workers * 2))
def testPutGet(self):
ray.init(num_workers=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
self.assertEqual(value_before, value_after)
class APITest(unittest.TestCase):
def init_ray(self, **kwargs):
if kwargs is None:
kwargs = {}
ray.init(**kwargs)
def tearDown(self):
ray.worker.cleanup()
def testCustomSerializers(self):
self.init_ray(num_workers=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(Foo, serializer=custom_serializer,
deserializer=custom_deserializer)
self.assertEqual(ray.get(ray.put(Foo())),
((3, "string1", Foo.__name__), "string2"))
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(Bar, serializer=custom_serializer,
deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
self.assertEqual(ray.get(f.remote()),
((3, "string1", Bar.__name__), "string2"))
def testRegisterClass(self):
self.init_ray(num_workers=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test subtypes of dictionaries.
value_before = OrderedDict([("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
self.assertEqual(value_before, ray.get(object_id))
value_before = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
self.assertEqual(value_before, ray.get(object_id))
value_before = defaultdict(lambda: [], [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
self.assertEqual(value_before, ray.get(object_id))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
foo = ray.get(f.remote(Foo(7)))
self.assertEqual(foo, Foo(7))
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# self.assertEqual(regex, new_regex)
# Instead, we do this:
self.assertEqual(regex.pattern, new_regex.pattern)
# Test returning custom classes created on workers.
@ray.remote
def g():
return SubQux(), Qux()
subqux, qux = ray.get(g.remote())
self.assertEqual(subqux.objs[2].foo.value, 0)
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
self.assertEqual(ray.get(h2.remote(10)).value, 10)
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
self.assertFalse(hasattr(c0, "method1"))
self.assertFalse(hasattr(c0, "method2"))
self.assertFalse(hasattr(c1, "method0"))
self.assertFalse(hasattr(c1, "method2"))
self.assertFalse(hasattr(c2, "method0"))
self.assertFalse(hasattr(c2, "method1"))
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
self.assertFalse(hasattr(c0, "method1"))
self.assertFalse(hasattr(c0, "method2"))
self.assertFalse(hasattr(c1, "method0"))
self.assertFalse(hasattr(c1, "method2"))
self.assertFalse(hasattr(c2, "method0"))
self.assertFalse(hasattr(c2, "method1"))
def testKeywordArgs(self):
reload(test_functions)
self.init_ray()
x = test_functions.keyword_fct1.remote(1)
self.assertEqual(ray.get(x), "1 hello")
x = test_functions.keyword_fct1.remote(1, "hi")
self.assertEqual(ray.get(x), "1 hi")
x = test_functions.keyword_fct1.remote(1, b="world")
self.assertEqual(ray.get(x), "1 world")
x = test_functions.keyword_fct2.remote(a="w", b="hi")
self.assertEqual(ray.get(x), "w hi")
x = test_functions.keyword_fct2.remote(b="hi", a="w")
self.assertEqual(ray.get(x), "w hi")
x = test_functions.keyword_fct2.remote(a="w")
self.assertEqual(ray.get(x), "w world")
x = test_functions.keyword_fct2.remote(b="hi")
self.assertEqual(ray.get(x), "hello hi")
x = test_functions.keyword_fct2.remote("w")
self.assertEqual(ray.get(x), "w world")
x = test_functions.keyword_fct2.remote("w", "hi")
self.assertEqual(ray.get(x), "w hi")
x = test_functions.keyword_fct3.remote(0, 1, c="w", d="hi")
self.assertEqual(ray.get(x), "0 1 w hi")
x = test_functions.keyword_fct3.remote(0, 1, d="hi", c="w")
self.assertEqual(ray.get(x), "0 1 w hi")
x = test_functions.keyword_fct3.remote(0, 1, c="w")
self.assertEqual(ray.get(x), "0 1 w world")
x = test_functions.keyword_fct3.remote(0, 1, d="hi")
self.assertEqual(ray.get(x), "0 1 hello hi")
x = test_functions.keyword_fct3.remote(0, 1)
self.assertEqual(ray.get(x), "0 1 hello world")
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with self.assertRaises(Exception):
f1.remote(3)
with self.assertRaises(Exception):
f1.remote(x=3)
with self.assertRaises(Exception):
f2.remote(0, w=0)
# Make sure we get an exception if too many arguments are passed in.
with self.assertRaises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
self.assertEqual(ray.get(f3.remote(4)), 4)
def testVariableNumberOfArgs(self):
reload(test_functions)
self.init_ray()
x = test_functions.varargs_fct1.remote(0, 1, 2)
self.assertEqual(ray.get(x), "0 1 2")
x = test_functions.varargs_fct2.remote(0, 1, 2)
self.assertEqual(ray.get(x), "1 2")
self.assertTrue(test_functions.kwargs_exception_thrown)
self.assertTrue(test_functions.varargs_and_kwargs_exception_thrown)
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
self.assertEqual(ray.get(f1.remote()), ())
self.assertEqual(ray.get(f1.remote(1)), (1, ))
self.assertEqual(ray.get(f1.remote(1, 2, 3)), (1, 2, 3))
with self.assertRaises(Exception):
f2.remote()
with self.assertRaises(Exception):
f2.remote(1)
self.assertEqual(ray.get(f2.remote(1, 2)), (1, 2, ()))
self.assertEqual(ray.get(f2.remote(1, 2, 3)), (1, 2, (3, )))
self.assertEqual(ray.get(f2.remote(1, 2, 3, 4)), (1, 2, (3, 4)))
def testNoArgs(self):
reload(test_functions)
self.init_ray()
ray.get(test_functions.no_op.remote())
def testDefiningRemoteFunctions(self):
self.init_ray(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
self.assertEqual(ray.get(f.remote(0)), 1)
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
self.assertTrue(val in [1, 10])
if val == 10:
break
else:
print("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert_equal(ray.get(h.remote()), np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
self.assertEqual(ray.get(k.remote(1)), 2)
self.assertEqual(ray.get(k2.remote(1)), 2)
self.assertEqual(ray.get(m.remote(1)), 2)
def testGetMultiple(self):
self.init_ray()
object_ids = [ray.put(i) for i in range(10)]
self.assertEqual(ray.get(object_ids), list(range(10)))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
self.assertEqual(results, indices)
def testWait(self):
self.init_ray(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [
f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)
]
ready_ids, remaining_ids = ray.wait(objectids)
self.assertEqual(len(ready_ids), 1)
self.assertEqual(len(remaining_ids), 3)
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
self.assertEqual(set(ready_ids), set(objectids))
self.assertEqual(remaining_ids, [])
objectids = [
f.remote(0.5),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)
]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(
objectids, timeout=1750, num_returns=4)
self.assertLess(time.time() - start_time, 2)
self.assertEqual(len(ready_ids), 3)
self.assertEqual(len(remaining_ids), 1)
ray.wait(objectids)
objectids = [
f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)
]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5000)
self.assertTrue(time.time() - start_time < 5)
self.assertEqual(len(ready_ids), 1)
self.assertEqual(len(remaining_ids), 3)
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
self.assertRaises(Exception, lambda: ray.wait([x, x]))
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
self.assertEqual(ready_ids, [])
self.assertEqual(remaining_ids, [])
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with self.assertRaises(TypeError):
ray.wait(x)
with self.assertRaises(TypeError):
ray.wait(1)
with self.assertRaises(TypeError):
ray.wait([1])
def testMultipleWaitsAndGets(self):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
self.init_ray()
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def testCachingFunctionsToRun(self):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
self.init_ray()
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
self.assertEqual(ray.get(res1), (1, 2, 3, 4))
self.assertEqual(ray.get(res2), (1, 2, 3, 4))
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def testRunningFunctionOnAllWorkers(self):
self.init_ray()
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
self.assertEqual("fake_directory", ray.get(get_path1.remote())[-1])
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
self.assertTrue("fake_directory" not in ray.get(get_path2.remote()))
def testLoggingAPI(self):
self.init_ray(driver_mode=ray.SILENT_MODE)
def events():
# This is a hack for getting the event log. It is not part of the
# API.
keys = ray.worker.global_worker.redis_client.keys("event_log:*")
res = []
for key in keys:
res.extend(
ray.worker.global_worker.redis_client.zrange(key, 0, -1))
return res
def wait_for_num_events(num_events, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(events()) >= num_events:
return
time.sleep(0.1)
print("Timing out of wait.")
@ray.remote
def test_log_event():
ray.log_event("event_type1", contents={"key": "val"})
@ray.remote
def test_log_span():
with ray.log_span("event_type2", contents={"key": "val"}):
pass
# Make sure that we can call ray.log_event in a remote function.
ray.get(test_log_event.remote())
# Wait for the event to appear in the event log.
wait_for_num_events(1)
self.assertEqual(len(events()), 1)
# Make sure that we can call ray.log_span in a remote function.
ray.get(test_log_span.remote())
# Wait for the events to appear in the event log.
wait_for_num_events(2)
self.assertEqual(len(events()), 2)
@ray.remote
def test_log_span_exception():
with ray.log_span("event_type2", contents={"key": "val"}):
raise Exception("This failed.")
# Make sure that logging a span works if an exception is thrown.
test_log_span_exception.remote()
# Wait for the events to appear in the event log.
wait_for_num_events(3)
self.assertEqual(len(events()), 3)
def testIdenticalFunctionNames(self):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
self.init_ray()
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
self.assertEqual(ray.get(results1), num_calls * [1])
self.assertEqual(ray.get(results2), num_calls * [2])
self.assertEqual(ray.get(results3), num_calls * [3])
self.assertEqual(ray.get(results4), num_calls * [4])
self.assertEqual(ray.get(results5), num_calls * [5])
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
self.assertEqual(result_values, num_calls * [5])
def testIllegalAPICalls(self):
self.init_ray()
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with self.assertRaises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with self.assertRaises(Exception):
ray.get(3)
class APITestSharded(APITest):
def init_ray(self, **kwargs):
if kwargs is None:
kwargs = {}
kwargs["start_ray_local"] = True
kwargs["num_redis_shards"] = 20
kwargs["redirect_output"] = True
ray.worker._init(**kwargs)
class PythonModeTest(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testPythonMode(self):
reload(test_functions)
ray.init(driver_mode=ray.PYTHON_MODE)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert_equal(xref, np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert_equal(xref, ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert_equal(y, ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = test_functions.python_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = test_functions.python_mode_g.remote(aref)
# Make sure python_mode_g does not mutate aref.
assert_equal(aref, np.array([0, 0]))
assert_equal(bref, np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert_equal(ready, object_ids[:num_returns])
assert_equal(remaining, object_ids[num_returns:])
# Test actors in PYTHON_MODE.
@ray.remote
class PythonModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = PythonModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert_equal(test_actor.get_array.remote(), np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert_equal(test_array, np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
class ResourcesTest(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testResourceConstraints(self):
num_workers = 20
ray.init(num_workers=num_workers, num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
def testMultiResourceConstraints(self):
num_workers = 20
ray.init(num_workers=num_workers, num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 0.5 + time_buffer)
self.assertGreater(duration, 0.5)
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
self.assertLess(duration, 1 + time_buffer)
self.assertGreater(duration, 1)
def testGPUIDs(self):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
self.assertEqual(list_of_ids, 10 * [[]])
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = set([tuple(gpu_ids) for gpu_ids in list_of_ids])
self.assertEqual(set_of_ids, set([(i, ) for i in range(10)]))
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
self.assertEqual(set(all_ids), set(range(10)))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
self.assertGreater(t2 - t1, 0.09)
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# self.assertEqual(set(all_ids), set(range(10)))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] ==
",".join([str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def testMultipleLocalSchedulers(self):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
address_info = ray.worker._init(
start_ray_local=True,
num_local_schedulers=3,
num_workers=1,
num_cpus=[100, 5, 10],
num_gpus=[0, 5, 1])
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
store_names = [
object_store_address.name
for object_store_address in address_info["object_store_addresses"]
]
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
self.assertIn(result, [store_names[0]])
elif name == "run_on_1":
self.assertIn(result, [store_names[1]])
elif name == "run_on_2":
self.assertIn(result, [store_names[2]])
elif name == "run_on_0_1_2":
self.assertIn(result, [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
self.assertIn(result, [store_names[1], store_names[2]])
elif name == "run_on_0_2":
self.assertIn(result, [store_names[0], store_names[2]])
else:
raise Exception("This should be unreachable.")
self.assertEqual(set(ray.get(results)), set(store_names))
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def testCustomResources(self):
ray.worker._init(
start_ray_local=True,
num_local_schedulers=2,
num_cpus=[3, 3],
resources=[{"CustomResource": 0}, {"CustomResource": 1}])
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
self.assertEqual(len(set(ray.get([f.remote() for _ in range(50)]))), 2)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
self.assertEqual(len(local_scheduler_ids), 1)
self.assertNotEqual(list(local_scheduler_ids)[0], local_plasma)
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def testTwoCustomResources(self):
ray.worker._init(
start_ray_local=True,
num_local_schedulers=2,
num_cpus=[3, 3],
resources=[{"CustomResource1": 1, "CustomResource2": 2},
{"CustomResource1": 3, "CustomResource2": 4}])
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
self.assertEqual(len(set(ray.get([f.remote() for _ in range(50)]))), 2)
self.assertEqual(len(set(ray.get([g.remote() for _ in range(50)]))), 2)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
self.assertEqual(len(local_scheduler_ids), 1)
self.assertNotEqual(list(local_scheduler_ids)[0], local_plasma)
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()],
timeout=500)
self.assertEqual(ready_ids, [])
def testManyCustomResources(self):
num_custom_resources = 10000
total_resources = {str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {str(i): total_resources[str(i)]
for i in permuted_resources}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
class CudaVisibleDevicesTest(unittest.TestCase):
def setUp(self):
# Record the curent value of this environment variable so that we can
# reset it after the test.
self.original_gpu_ids = os.environ.get(
"CUDA_VISIBLE_DEVICES", None)
def tearDown(self):
ray.worker.cleanup()
# Reset the environment variable.
if self.original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = self.original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def testSpecificGPUs(self):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
class WorkerPoolTests(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testNoWorkers(self):
ray.init(num_workers=0)
@ray.remote
def f():
return 1
# Make sure we can call a remote function. This will require starting a
# new worker.
ray.get(f.remote())
ray.get([f.remote() for _ in range(100)])
def testBlockingTasks(self):
ray.init(num_workers=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(10)]
return ray.get(object_ids)
ray.get([g.remote(i) for i in range(100)])
@ray.remote
def _sleep(i):
time.sleep(1)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes one second to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def testMaxCallTasks(self):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
self.assertEqual(pid1, pid2)
ray.test.test_utils.wait_for_pid_to_exit(pid1)
class SchedulingAlgorithm(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def attempt_to_load_balance(self,
remote_function,
args,
total_tasks,
num_local_schedulers,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
print("Counts are {}.".format(counts))
if (len(names) == num_local_schedulers
and all([count >= minimum_count for count in counts])):
break
attempts += 1
self.assertLess(attempts, num_attempts)
def testLoadBalancing(self):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
num_local_schedulers = 3
num_cpus = 7
ray.worker._init(
start_ray_local=True,
num_local_schedulers=num_local_schedulers,
num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
self.attempt_to_load_balance(f, [], 100, num_local_schedulers, 10)
self.attempt_to_load_balance(f, [], 1000, num_local_schedulers, 100)
def testLoadBalancingWithDependencies(self):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
num_workers = 3
num_local_schedulers = 3
ray.worker._init(
start_ray_local=True,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers)
@ray.remote
def f(x):
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
self.attempt_to_load_balance(f, [x], 100, num_local_schedulers, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@unittest.skipIf(
os.environ.get('RAY_USE_NEW_GCS', False),
"New GCS API doesn't have a Python API yet.")
class GlobalStateAPI(unittest.TestCase):
def tearDown(self):
ray.worker.cleanup()
def testGlobalStateAPI(self):
with self.assertRaises(Exception):
ray.global_state.object_table()
with self.assertRaises(Exception):
ray.global_state.task_table()
with self.assertRaises(Exception):
ray.global_state.client_table()
with self.assertRaises(Exception):
ray.global_state.function_table()
with self.assertRaises(Exception):
ray.global_state.log_files()
ray.init()
self.assertEqual(ray.global_state.object_table(), dict())
ID_SIZE = 20
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.current_task_id.id())
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
self.assertEqual(len(task_table), 1)
self.assertEqual(driver_task_id, list(task_table.keys())[0])
self.assertEqual(task_table[driver_task_id]["State"],
ray.experimental.state.TASK_STATUS_RUNNING)
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["TaskID"],
driver_task_id)
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["ActorID"],
ID_SIZE * "ff")
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["Args"], [])
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["DriverID"],
driver_id)
self.assertEqual(task_table[driver_task_id]["TaskSpec"]["FunctionID"],
ID_SIZE * "ff")
self.assertEqual(
(task_table[driver_task_id]["TaskSpec"]["ReturnObjectIDs"]), [])
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
self.assertEqual(len(client_table[node_ip_address]), 3)
manager_client = [
c for c in client_table[node_ip_address]
if c["ClientType"] == "plasma_manager"
][0]
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
start_time = time.time()
while time.time() - start_time < 10:
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
self.assertEqual(len(task_table), 1 + 1)
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
if task_table[task_id]["State"] == "DONE":
break
time.sleep(0.1)
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
self.assertEqual(task_spec["ActorID"], ID_SIZE * "ff")
self.assertEqual(task_spec["Args"], [1, "hi", x_id])
self.assertEqual(task_spec["DriverID"], driver_id)
self.assertEqual(task_spec["ReturnObjectIDs"], [result_id])
function_table_entry = function_table[task_spec["FunctionID"]]
self.assertEqual(function_table_entry["Name"], "__main__.f")
self.assertEqual(function_table_entry["DriverID"], driver_id)
self.assertEqual(function_table_entry["Module"], "__main__")
self.assertEqual(task_table[task_id],
ray.global_state.task_table(task_id))
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (
object_table[x_id]["ManagerIDs"] is not None
and object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
# Wait for the object table to be updated.
wait_for_object_table()
object_table = ray.global_state.object_table()
self.assertEqual(len(object_table), 2)
self.assertEqual(object_table[x_id]["IsPut"], True)
self.assertEqual(object_table[x_id]["TaskID"], driver_task_id)
self.assertEqual(object_table[x_id]["ManagerIDs"],
[manager_client["DBClientID"]])
self.assertEqual(object_table[result_id]["IsPut"], False)
self.assertEqual(object_table[result_id]["TaskID"], task_id)
self.assertEqual(object_table[result_id]["ManagerIDs"],
[manager_client["DBClientID"]])
self.assertEqual(object_table[x_id],
ray.global_state.object_table(x_id))
self.assertEqual(object_table[result_id],
ray.global_state.object_table(result_id))
def testLogFileAPI(self):
ray.init(redirect_output=True)
message = "unique message"
@ray.remote
def f():
print(message)
# The call to sys.stdout.flush() seems to be necessary when using
# the system Python 2.7 on Ubuntu.
sys.stdout.flush()
ray.get(f.remote())
# Make sure that the message appears in the log files.
start_time = time.time()
found_message = False
while time.time() - start_time < 10:
log_files = ray.global_state.log_files()
for ip, innerdict in log_files.items():
for filename, contents in innerdict.items():
contents_str = "".join(contents)
if message in contents_str:
found_message = True
if found_message:
break
time.sleep(0.1)
self.assertEqual(found_message, True)
def testTaskProfileAPI(self):
ray.init(redirect_output=True)
@ray.remote
def f():
return 1
num_calls = 5
[f.remote() for _ in range(num_calls)]
# Make sure the event log has the correct number of events.
start_time = time.time()
while time.time() - start_time < 10:
profiles = ray.global_state.task_profiles(
100, start=0, end=time.time())
limited_profiles = ray.global_state.task_profiles(
1, start=0, end=time.time())
if len(profiles) == num_calls and len(limited_profiles) == 1:
break
time.sleep(0.1)
self.assertEqual(len(profiles), num_calls)
self.assertEqual(len(limited_profiles), 1)
# Make sure that each entry is properly formatted.
for task_id, data in profiles.items():
self.assertIn("execute_start", data)
self.assertIn("execute_end", data)
self.assertIn("get_arguments_start", data)
self.assertIn("get_arguments_end", data)
self.assertIn("store_outputs_start", data)
self.assertIn("store_outputs_end", data)
def testWorkers(self):
num_workers = 3
ray.init(
redirect_output=True,
num_cpus=num_workers,
num_workers=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker)
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
self.assertEqual(len(worker_info), num_workers)
for worker_id, info in worker_info.items():
self.assertEqual(info["node_ip_address"], "127.0.0.1")
self.assertIn("local_scheduler_socket", info)
self.assertIn("plasma_manager_socket", info)
self.assertIn("plasma_store_socket", info)
self.assertIn("stderr_file", info)
self.assertIn("stdout_file", info)
def testDumpTraceFile(self):
ray.init(redirect_output=True)
@ray.remote
def f():
return 1
@ray.remote
class Foo(object):
def __init__(self):
pass
def method(self):
pass
ray.get([f.remote() for _ in range(10)])
actors = [Foo.remote() for _ in range(5)]
ray.get([actor.method.remote() for actor in actors])
ray.get([actor.method.remote() for actor in actors])
path = os.path.join("/tmp/ray_test_trace")
task_info = ray.global_state.task_profiles(
100, start=0, end=time.time())
ray.global_state.dump_catapult_trace(path, task_info)
# TODO(rkn): This test is not perfect because it does not verify that
# the visualization actually renders (e.g., the context of the dumped
# trace could be malformed).
if __name__ == "__main__":
unittest.main(verbosity=2)
|
the-stack_106_20963
|
from unittest.mock import Mock
from django.apps import apps
from cms import app_registration
from cms.utils.setup import setup_cms_apps
from djangocms_internalsearch.base import BaseSearchConfig
from .utils import TestCase
class TestModelConfig(BaseSearchConfig):
pass
class InternalSearchInvalidConfigTestCase(TestCase):
def test_missing_prepare_text(self):
with self.assertRaises(NotImplementedError):
TestModelConfig.prepare_text(self, Mock())
def test_missing_model(self):
with self.assertRaises(NotImplementedError):
TestModelConfig().model
def test_missing_list_display(self):
with self.assertRaises(NotImplementedError):
TestModelConfig().list_display
class InternalSearchValidConfigTestCase(TestCase):
def setUp(self):
app_registration.get_cms_extension_apps.cache_clear()
app_registration.get_cms_config_apps.cache_clear()
def test_search_config_with_expected_method(self):
setup_cms_apps()
internalsearch_config = apps.get_app_config('djangocms_internalsearch')
registered_configs = internalsearch_config.cms_extension.internalsearch_apps_config
expected_method = ['prepare_text', ]
with self.assertNotRaises(NotImplementedError):
for config in registered_configs:
for attr in expected_method:
self.assertTrue(hasattr(config, attr))
def test_search_config_with_expected_attributes(self):
setup_cms_apps()
internalsearch_config = apps.get_app_config('djangocms_internalsearch')
registered_configs = internalsearch_config.cms_extension.internalsearch_apps_config
expected_attributes = ['model', 'list_display']
with self.assertNotRaises(NotImplementedError):
for config in registered_configs:
for attr in expected_attributes:
self.assertTrue(hasattr(config, attr))
|
the-stack_106_20965
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListSlowlogRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'offset': 'int',
'limit': 'int',
'sort_key': 'str',
'sort_dir': 'str',
'start_time': 'str',
'end_time': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'offset': 'offset',
'limit': 'limit',
'sort_key': 'sort_key',
'sort_dir': 'sort_dir',
'start_time': 'start_time',
'end_time': 'end_time'
}
def __init__(self, instance_id=None, offset=None, limit=None, sort_key=None, sort_dir=None, start_time=None, end_time=None):
"""ListSlowlogRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._offset = None
self._limit = None
self._sort_key = None
self._sort_dir = None
self._start_time = None
self._end_time = None
self.discriminator = None
self.instance_id = instance_id
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if sort_key is not None:
self.sort_key = sort_key
if sort_dir is not None:
self.sort_dir = sort_dir
self.start_time = start_time
self.end_time = end_time
@property
def instance_id(self):
"""Gets the instance_id of this ListSlowlogRequest.
实例ID。
:return: The instance_id of this ListSlowlogRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListSlowlogRequest.
实例ID。
:param instance_id: The instance_id of this ListSlowlogRequest.
:type: str
"""
self._instance_id = instance_id
@property
def offset(self):
"""Gets the offset of this ListSlowlogRequest.
偏移量,表示从此偏移量开始查询, offset大于等于0
:return: The offset of this ListSlowlogRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListSlowlogRequest.
偏移量,表示从此偏移量开始查询, offset大于等于0
:param offset: The offset of this ListSlowlogRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListSlowlogRequest.
每页显示的条目数量。
:return: The limit of this ListSlowlogRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListSlowlogRequest.
每页显示的条目数量。
:param limit: The limit of this ListSlowlogRequest.
:type: int
"""
self._limit = limit
@property
def sort_key(self):
"""Gets the sort_key of this ListSlowlogRequest.
返回结果按该关键字排序,支持start_time,duration,默认为“start_time”
:return: The sort_key of this ListSlowlogRequest.
:rtype: str
"""
return self._sort_key
@sort_key.setter
def sort_key(self, sort_key):
"""Sets the sort_key of this ListSlowlogRequest.
返回结果按该关键字排序,支持start_time,duration,默认为“start_time”
:param sort_key: The sort_key of this ListSlowlogRequest.
:type: str
"""
self._sort_key = sort_key
@property
def sort_dir(self):
"""Gets the sort_dir of this ListSlowlogRequest.
降序或升序(分别对应desc和asc,默认为“desc”)
:return: The sort_dir of this ListSlowlogRequest.
:rtype: str
"""
return self._sort_dir
@sort_dir.setter
def sort_dir(self, sort_dir):
"""Sets the sort_dir of this ListSlowlogRequest.
降序或升序(分别对应desc和asc,默认为“desc”)
:param sort_dir: The sort_dir of this ListSlowlogRequest.
:type: str
"""
self._sort_dir = sort_dir
@property
def start_time(self):
"""Gets the start_time of this ListSlowlogRequest.
查询开始时间,时间为UTC时间的Unix时间戳。如:1598803200000。
:return: The start_time of this ListSlowlogRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ListSlowlogRequest.
查询开始时间,时间为UTC时间的Unix时间戳。如:1598803200000。
:param start_time: The start_time of this ListSlowlogRequest.
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ListSlowlogRequest.
查询结束时间,时间为UTC时间的Unix时间戳。如:1599494399000。
:return: The end_time of this ListSlowlogRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListSlowlogRequest.
查询结束时间,时间为UTC时间的Unix时间戳。如:1599494399000。
:param end_time: The end_time of this ListSlowlogRequest.
:type: str
"""
self._end_time = end_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSlowlogRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_20967
|
import logging
from typing import Any, Dict, Optional, Type
import pydantic
from datahub.configuration.common import (
ConfigModel,
ConfigurationError,
DynamicTypedConfig,
)
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.ingestion_state_provider import IngestionStateProvider, JobId
from datahub.ingestion.api.source import Source
from datahub.ingestion.source.state.checkpoint import Checkpoint, CheckpointStateBase
from datahub.ingestion.source.state_provider.datahub_ingestion_state_provider import (
DatahubIngestionStateProviderConfig,
)
from datahub.ingestion.source.state_provider.state_provider_registry import (
ingestion_state_provider_registry,
)
from datahub.metadata.schema_classes import DatahubIngestionCheckpointClass
logger: logging.Logger = logging.getLogger(__name__)
class StatefulIngestionConfig(ConfigModel):
"""
Basic Stateful Ingestion Specific Configuration for any source.
"""
enabled: bool = False
max_checkpoint_state_size: int = 2 ** 24 # 16MB
state_provider: Optional[DynamicTypedConfig] = DynamicTypedConfig(
type="datahub", config=DatahubIngestionStateProviderConfig()
)
ignore_old_state: bool = False
ignore_new_state: bool = False
@pydantic.root_validator()
def validate_config(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if values.get("enabled"):
if values.get("state_provider") is None:
raise ConfigurationError(
"Must specify state_provider configuration if stateful ingestion is enabled."
)
return values
class StatefulIngestionConfigBase(ConfigModel):
"""
Base configuration class for stateful ingestion for source configs to inherit from.
"""
stateful_ingestion: Optional[StatefulIngestionConfig] = None
class StatefulIngestionSourceBase(Source):
"""
Defines the base class for all stateful sources.
"""
def __init__(
self, config: StatefulIngestionConfigBase, ctx: PipelineContext
) -> None:
super().__init__(ctx)
self.stateful_ingestion_config = config.stateful_ingestion
self.source_config_type = type(config)
self.last_checkpoints: Dict[JobId, Optional[Checkpoint]] = {}
self.cur_checkpoints: Dict[JobId, Optional[Checkpoint]] = {}
self._initialize_state_provider()
def _initialize_state_provider(self) -> None:
self.ingestion_state_provider: Optional[IngestionStateProvider] = None
if (
self.stateful_ingestion_config is not None
and self.stateful_ingestion_config.state_provider is not None
and self.stateful_ingestion_config.enabled
):
if self.ctx.pipeline_name is None:
raise ConfigurationError(
"pipeline_name must be provided if stateful ingestion is enabled."
)
state_provider_class = ingestion_state_provider_registry.get(
self.stateful_ingestion_config.state_provider.type
)
self.ingestion_state_provider = state_provider_class.create(
self.stateful_ingestion_config.state_provider.dict().get("config", {}),
self.ctx,
)
if self.stateful_ingestion_config.ignore_old_state:
logger.warning(
"The 'ignore_old_state' config is True. The old checkpoint state will not be provided."
)
if self.stateful_ingestion_config.ignore_new_state:
logger.warning(
"The 'ignore_new_state' config is True. The new checkpoint state will not be created."
)
logger.debug(
f"Successfully created {self.stateful_ingestion_config.state_provider.type} state provider."
)
def is_stateful_ingestion_configured(self) -> bool:
if (
self.stateful_ingestion_config is not None
and self.stateful_ingestion_config.enabled
and self.ingestion_state_provider is not None
):
return True
return False
# Basic methods that sub-classes must implement
def create_checkpoint(self, job_id: JobId) -> Optional[Checkpoint]:
raise NotImplementedError("Sub-classes must implement this method.")
def get_platform_instance_id(self) -> str:
raise NotImplementedError("Sub-classes must implement this method.")
def is_checkpointing_enabled(self, job_id: JobId) -> bool:
"""
Sub-classes should override this method to tell if checkpointing is enabled for this run.
For instance, currently all of the SQL based sources use checkpointing for stale entity removal.
They would turn it on only if remove_stale_metadata=True. Otherwise, the feature won't work correctly.
"""
raise NotImplementedError("Sub-classes must implement this method.")
def _get_last_checkpoint(
self, job_id: JobId, checkpoint_state_class: Type[CheckpointStateBase]
) -> Optional[Checkpoint]:
"""
This is a template method implementation for querying the last checkpoint state.
"""
last_checkpoint: Optional[Checkpoint] = None
if self.is_stateful_ingestion_configured():
# Obtain the latest checkpoint from GMS for this job.
last_checkpoint_aspect = self.ingestion_state_provider.get_latest_checkpoint( # type: ignore
pipeline_name=self.ctx.pipeline_name, # type: ignore
platform_instance_id=self.get_platform_instance_id(),
job_name=job_id,
)
# Convert it to a first-class Checkpoint object.
last_checkpoint = Checkpoint.create_from_checkpoint_aspect(
job_name=job_id,
checkpoint_aspect=last_checkpoint_aspect,
config_class=self.source_config_type,
state_class=checkpoint_state_class,
)
return last_checkpoint
# Base-class implementations for common state management tasks.
def get_last_checkpoint(
self, job_id: JobId, checkpoint_state_class: Type[CheckpointStateBase]
) -> Optional[Checkpoint]:
if not self.is_stateful_ingestion_configured() or (
self.stateful_ingestion_config
and self.stateful_ingestion_config.ignore_old_state
):
return None
if JobId not in self.last_checkpoints:
self.last_checkpoints[job_id] = self._get_last_checkpoint(
job_id, checkpoint_state_class
)
return self.last_checkpoints[job_id]
def get_current_checkpoint(self, job_id: JobId) -> Optional[Checkpoint]:
if not self.is_stateful_ingestion_configured():
return None
if job_id not in self.cur_checkpoints:
self.cur_checkpoints[job_id] = (
self.create_checkpoint(job_id)
if self.is_checkpointing_enabled(job_id)
else None
)
return self.cur_checkpoints[job_id]
def commit_checkpoints(self) -> None:
if not self.is_stateful_ingestion_configured():
return None
if (
self.stateful_ingestion_config
and self.stateful_ingestion_config.ignore_new_state
):
logger.info(
"The `ignore_new_state` config is True. Not committing current checkpoint."
)
return None
if self.ctx.dry_run_mode or self.ctx.preview_mode:
logger.warning(
f"Will not be committing checkpoints in dry_run_mode(={self.ctx.dry_run_mode})"
f" or preview_mode(={self.ctx.preview_mode})."
)
return None
job_checkpoint_aspects: Dict[JobId, DatahubIngestionCheckpointClass] = {}
for job_name, job_checkpoint in self.cur_checkpoints.items():
if job_checkpoint is None:
continue
try:
checkpoint_aspect = job_checkpoint.to_checkpoint_aspect(
self.stateful_ingestion_config.max_checkpoint_state_size # type: ignore
)
except Exception as e:
logger.error(
f"Failed to convert checkpoint to aspect for job {job_name}. It will not be committed.",
e,
)
else:
if checkpoint_aspect is not None:
job_checkpoint_aspects[job_name] = checkpoint_aspect
self.ingestion_state_provider.commit_checkpoints( # type: ignore
job_checkpoints=job_checkpoint_aspects
)
|
the-stack_106_20968
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Rendering with serialized assets
#
# This example explains how to render images with the serialized assets.
import lmenv
env = lmenv.load('.lmenv')
import os
import traceback
import numpy as np
import imageio
# %matplotlib inline
import matplotlib.pyplot as plt
import lightmetrica as lm
# %load_ext lightmetrica_jupyter
lm.init()
lm.log.init('jupyter')
lm.progress.init('jupyter')
lm.info()
if not lm.Release:
lm.debug.attach_to_debugger()
# ### Visualizing asset tree
#
# An asset can hold another assets in the instance. As a result, a created set of asset can constitute of an *asset tree*. We can visualize the structure of the tree using ``lm.debug.print_asset_tree()`` function.
accel = lm.load_accel('accel', 'sahbvh')
scene = lm.load_scene('scene', 'default', accel=accel)
lm.debug.print_asset_tree()
# Clear the internal state
lm.reset()
lm.debug.print_asset_tree()
# ### Asset group
#
# *Asset group* is a special type of asset that can hold multiple instance of assets. By means of the asset group, we can hierarchcally manage the assets. Asset group can be created by ``lm.load_asset_group()`` function.
g = lm.load_asset_group('fireplace_room', 'default')
# An another asset can be loaded as a child of the asset group by calling ``lm.AssetGroup.load_*()`` member functions. The arguments are same as ``lm.load_*()`` functions. Note that the locator of the asset includes the id of the group.
camera = g.load_camera('camera1', 'pinhole',
position=[5.101118, 1.083746, -2.756308],
center=[4.167568, 1.078925, -2.397892],
up=[0,1,0],
vfov=43.001194,
aspect=16/9)
model = g.load_model('model', 'wavefrontobj',
path=os.path.join(env.scene_path, 'fireplace_room/fireplace_room.obj'))
accel = g.load_accel('accel', 'sahbvh')
scene = g.load_scene('scene', 'default', accel=accel)
scene.add_primitive(camera=camera)
scene.add_primitive(model=model)
scene.build()
lm.debug.print_asset_tree()
# ### Serialization of asset
# An asset can be serialized into a disk as a binary stream. For instance, it is useful to accelerate the loading time of the assets in debug mode or in the repetitive experiments, since we can skip the precomputation along with loading of the asset.
#
# Serialization to a file can be done by ``lm.Component.save_to_file()`` function. We give the path to the output file as an argument.
g.save_to_file('fireplace_room.serialized')
# Reset the internal state
lm.reset()
lm.debug.print_asset_tree()
# Note that serializing aseet group means serializing a subtree of the entire asset tree. The serialization process can fail if an asset being serialized (incl. child assets) contains external reference out of the subtree.
accel = lm.load_accel('accel', 'sahbvh')
g = lm.load_asset_group('fireplace_room', 'default')
scene = g.load_scene('scene', 'default', accel=accel)
lm.debug.print_asset_tree()
# Serialization will fail because
# accel is out of the subtree starting from g as a root.
try:
g.save_to_file('failed.serialized')
except Exception:
traceback.print_exc()
# Reset the internal state
lm.reset()
lm.debug.print_asset_tree()
# ### Loading serialized asset
#
# The serialized asset can be loaded by ``lm.load_serialized()`` funcction, where the first argument specifies the id of the asset and the second argument specifies the path to the serialized asset. Note that the id of the asset can be not always the same from the original asset before serialization.
lm.load_serialized('fireplace_room', 'fireplace_room.serialized')
lm.debug.print_asset_tree()
# Reset the internal state
lm.reset()
lm.debug.print_asset_tree()
# Also note that the serialized asset can be loaded in a different location in the asset tree, for instance, as a child of the different asset group.
g = lm.load_asset_group('another_group', 'default')
g.load_serialized('fireplace_room', 'fireplace_room.serialized')
lm.debug.print_asset_tree()
# ### Rendering with serialized asset
#
# We can render the image using the serializaed asset. Here we are using a locator directly instead of passing the instance, since the previously obtained reference (``scene``) became invalid.
# Rendering
film = lm.load_film('film', 'bitmap', w=1920, h=1080)
renderer = lm.load_renderer('renderer', 'pt',
scene='$.assets.another_group.fireplace_room.scene',
output=film,
scheduler='sample',
spp=1,
max_verts=20)
renderer.render()
img = np.copy(film.buffer())
f = plt.figure(figsize=(15,15))
ax = f.add_subplot(111)
ax.imshow(np.clip(np.power(img,1/2.2),0,1), origin='lower')
plt.show()
|
the-stack_106_20969
|
import sys
def compute_min_refills(distance , tank , stops):
length , refill = 0,0
stops.append(distance)
z = 0
stops.insert(0,z)
stops.sort()
for i in range(len(stops) - 1):
if stops[i+1] - stops[i] > tank:
return -1
if tank >= distance:
return 0
i = 0
while length < stops[-1]:
array = []
refill += 1
while length + tank >= stops[i]:
array.append(stops[i])
i += 1
if i == len(stops) :
break
length += tank
x = max(array)
if distance <= stops[i-1]:
refill -= 1
if x == stops[-2]:
return refill
return refill
if __name__ == '__main__':
# distance, tank, _, *stops = map(int, sys.stdin.read().split())
# print(compute_min_refills(distance, tank, stops))
distance = 240
tank = 50
stops = [50,100 ,150, 190]
print(compute_min_refills(distance, tank, stops))
|
the-stack_106_20970
|
#!/usr/bin/env python
"""A keyword index of client machines.
An index of client machines, associating likely identifiers to client IDs.
"""
from grr.lib import keyword_index
from grr.lib import rdfvalue
from grr.lib import utils
# The system's primary client index.
MAIN_INDEX = rdfvalue.RDFURN("aff4:/client_index")
class ClientIndex(keyword_index.AFF4KeywordIndex):
"""An index of client machines.
"""
START_TIME_PREFIX = "start_date:"
START_TIME_PREFIX_LEN = len(START_TIME_PREFIX)
END_TIME_PREFIX = "end_date:"
END_TIME_PREFIX_LEN = len(END_TIME_PREFIX)
# We accept and return client URNs, but store client ids,
# e.g. "C.00aaeccbb45f33a3".
def _ClientIdFromURN(self, urn):
return urn.Basename()
def _URNFromClientID(self, client_id):
return rdfvalue.ClientURN(client_id)
def _NormalizeKeyword(self, keyword):
return keyword.lower()
def LookupClients(self, keywords):
"""Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
"""
start_time = rdfvalue.RDFDatetime().Now() - rdfvalue.Duration("180d")
end_time = rdfvalue.RDFDatetime(self.LAST_TIMESTAMP)
filtered_keywords = []
for k in keywords:
if k.startswith(self.START_TIME_PREFIX):
try:
start_time = rdfvalue.RDFDatetime(k[self.START_TIME_PREFIX_LEN:])
except ValueError:
pass
elif k.startswith(self.END_TIME_PREFIX):
try:
time = rdfvalue.RDFDatetime()
time.ParseFromHumanReadable(k[self.END_TIME_PREFIX_LEN:], eoy=True)
end_time = time
except ValueError:
pass
else:
filtered_keywords.append(k)
if not filtered_keywords:
filtered_keywords.append(".")
return map(self._URNFromClientID,
self.Lookup(map(self._NormalizeKeyword, filtered_keywords),
start_time=start_time,
end_time=end_time))
def AnalyzeClient(self, client):
"""Finds the client_id and keywords for a client.
Args:
client: A VFSGRRClient record to find keywords for.
Returns:
A tuple (client_id, keywords) where client_id is the client identifier and
keywords is a list of keywords related to client.
"""
client_id = self._ClientIdFromURN(client.urn)
# Start with both the client id itself, and a universal keyword, used to
# find all clients.
#
# TODO(user): Remove the universal keyword once we have a better way
# to do this, i.e., once we have a storage library which can list all
# clients directly.
keywords = [self._NormalizeKeyword(client_id), "."]
def TryAppend(prefix, keyword):
if keyword:
keyword_string = self._NormalizeKeyword(utils.SmartStr(keyword))
keywords.append(keyword_string)
if prefix:
keywords.append(prefix + ":" + keyword_string)
def TryAppendPrefixes(prefix, keyword, delimiter):
TryAppend(prefix, keyword)
segments = str(keyword).split(delimiter)
for i in range(1, len(segments)):
TryAppend(prefix, delimiter.join(segments[0:i]))
return len(segments)
def TryAppendIP(ip):
TryAppend("ip", ip)
# IP4v?
if TryAppendPrefixes("ip", str(ip), ".") == 4:
return
# IP6v?
TryAppendPrefixes("ip", str(ip), ":")
def TryAppendMac(mac):
TryAppend("mac", mac)
if len(mac) == 12:
# If looks like a mac address without ":" symbols, also add the keyword
# with them.
TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)]))
s = client.Schema
TryAppend("host", client.Get(s.HOSTNAME))
TryAppendPrefixes("host", client.Get(s.HOSTNAME), "-")
TryAppend("fdqn", client.Get(s.FQDN))
TryAppendPrefixes("host", client.Get(s.FQDN), ".")
TryAppend("", client.Get(s.SYSTEM))
TryAppend("", client.Get(s.UNAME))
TryAppend("", client.Get(s.OS_RELEASE))
TryAppend("", client.Get(s.OS_VERSION))
TryAppend("", client.Get(s.KERNEL))
TryAppend("", client.Get(s.ARCH))
for user in client.Get(s.USER, []):
TryAppend("user", user.username)
TryAppend("", user.full_name)
if user.full_name:
for name in user.full_name.split():
# full_name often includes nicknames and similar, wrapped in
# punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common
# wrapping characters.
TryAppend("", name.strip("\"'()"))
for username in client.Get(s.USERNAMES, []):
TryAppend("user", username)
for interface in client.Get(s.LAST_INTERFACES, []):
if interface.mac_address:
TryAppendMac(interface.mac_address.human_readable_address)
for ip in interface.GetIPAddresses():
TryAppendIP(ip)
# We should have all mac and ip addresses already, but some test data only
# has it attached directly, so just in case we look there also.
if client.Get(s.MAC_ADDRESS):
for mac in str(client.Get(s.MAC_ADDRESS)).split("\n"):
TryAppendMac(mac)
for ip_list in client.Get(s.HOST_IPS, []):
for ip in str(ip_list).split("\n"):
TryAppendIP(ip)
client_info = client.Get(s.CLIENT_INFO)
if client_info:
TryAppend("client", client_info.client_name)
if client_info.labels:
for label in client_info.labels:
TryAppend("label", label)
for label in client.GetLabelsNames():
TryAppend("label", label)
return (client_id, keywords)
def AddClient(self, client, **kwargs):
"""Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update.
**kwargs: Additional arguments to pass to the datastore.
"""
self.AddKeywordsForName(*self.AnalyzeClient(client), **kwargs)
|
the-stack_106_20973
|
from neuwon.database.time import TimeSeries
from neuwon.examples.HH import make_model_with_hh
import matplotlib.pyplot as plt
import numpy as np
class Model:
def __init__(self,
time_step = 1e-3,
stagger = True,
# These parameters approximately match Figure 4.9 & 4.10 of the NEURON book.
soma_diameter = 5.642,
stimulus = 0.025e-9,
):
self.time_step = time_step
self.stagger = stagger
self.soma_diameter = soma_diameter
self.stimulus = stimulus
self.make_model()
self.init_steady_state()
self.run_experiment()
def make_model(self):
self.model = m = make_model_with_hh(self.time_step)
hh = m.get_reaction("hh")
self.soma = m.Segment(None, [0,0,0], self.soma_diameter)
self.hh = hh(self.soma, scale=1)
if True:
sa_units = self.soma.get_database_class().get("surface_area").get_units()
print("Soma surface area:", self.soma.surface_area, sa_units)
def init_steady_state(self):
while self.model.clock() < 40:
self.model.advance()
self.model.clock.reset()
def run_experiment(self):
self.v_data = TimeSeries().record(self.soma, "voltage")
self.m_data = TimeSeries().record(self.hh, "m")
ap_times = [10, 25, 40]
while self.model.clock() < 50:
if ap_times and self.model.clock() >= ap_times[0]:
ap_times.pop(0)
self.soma.inject_current(self.stimulus, duration=1)
if self.stagger:
self.model.advance()
else:
self.model._advance_lockstep()
self.v_data.stop()
self.m_data.stop()
self.model.check()
def main():
gold = Model(time_step = 1e-3)
def measure_error(model):
model.v_data.interpolate(gold.v_data)
model.m_data.interpolate(gold.m_data)
error_v = np.abs(np.subtract(model.v_data.get_data(), gold.v_data.get_data()))
error_m = np.abs(np.subtract(model.m_data.get_data(), gold.m_data.get_data()))
return (model.v_data.get_timestamps(), error_v, error_m)
def make_label(x):
if x.stagger: return "staggered, dt = %g ms"%(x.time_step)
else: return "unstaggered, dt = %g ms"%(x.time_step)
def make_figure(stagger):
slow = Model(time_step = 2*200e-3, stagger=stagger)
medium = Model(time_step = 2*100e-3, stagger=stagger)
fast = Model(time_step = 2* 50e-3, stagger=stagger)
slow_times, slow_error_v, slow_error_m = measure_error(slow)
medium_times, medium_error_v, medium_error_m = measure_error(medium)
fast_times, fast_error_v, fast_error_m = measure_error(fast)
plt.subplot(2,2,1)
plt.plot(slow.v_data.get_timestamps(), slow.v_data.get_data(), 'r',
label=make_label(slow))
plt.plot(medium.v_data.get_timestamps(), medium.v_data.get_data(), 'g',
label=make_label(medium))
plt.plot(fast.v_data.get_timestamps(), fast.v_data.get_data(), 'b',
label=make_label(fast))
plt.plot(gold.v_data.get_timestamps(), gold.v_data.get_data(), 'k',
label=make_label(gold))
gold.v_data.label_axes()
plt.legend()
plt.subplot(2,2,3)
plt.plot(slow.m_data.get_timestamps(), slow.m_data.get_data(), 'r',
label=make_label(slow))
plt.plot(medium.m_data.get_timestamps(), medium.m_data.get_data(), 'g',
label=make_label(medium))
plt.plot(fast.m_data.get_timestamps(), fast.m_data.get_data(), 'b',
label=make_label(fast))
plt.plot(gold.m_data.get_timestamps(), gold.m_data.get_data(), 'k',
label=make_label(gold))
plt.legend()
gold.m_data.label_axes()
plt.subplot(2,2,2)
plt.plot(slow_times, slow_error_v, 'r',
label=make_label(slow))
plt.plot(medium_times, medium_error_v, 'g',
label=make_label(medium))
plt.plot(fast_times, fast_error_v, 'b',
label=make_label(fast))
plt.legend()
plt.xlabel('ms')
plt.ylabel('|mV error|')
plt.subplot(2,2,4)
plt.plot(slow_times, slow_error_m, 'r',
label=make_label(slow))
plt.plot(medium_times, medium_error_m, 'g',
label=make_label(medium))
plt.plot(fast_times, fast_error_m, 'b',
label=make_label(fast))
plt.legend()
plt.xlabel('ms')
plt.ylabel('|m error|')
plt.figure("Unstaggered Time Steps")
make_figure(False)
plt.figure("Staggered Time Steps")
make_figure(True)
plt.show()
if __name__ == "__main__": main()
|
the-stack_106_20974
|
from django.core.urlresolvers import reverse
from _index.models import Ids as _IndexIds
from avatar.templatetags.avatar import avatar
from _commons.helpers.types import TypesHelper
from tutorial.models import Author as TutorialAuthor
class ContentHelper(object):
"""
An helper on content operations
"""
@staticmethod
def get(item_id, item_type):
"""
Get indexed content
"""
index = None
try:
index = _IndexIds.objects.get(item_id=item_id, item_type=TypesHelper.encode(item_type))
except _IndexIds.DoesNotExist:
pass
finally:
return index
@classmethod
def validate(_class, item_id, item_type):
"""
Validate indexed content
"""
# Import some stuff there (prevents circular imports)
from tutorial.helpers.process import ProcessHelper as TutorialProcessHelper
content_exists, content_author_id = False, None
index = _class.get(item_id, item_type)
if index:
content_exists = True
item_type = TypesHelper.reverse(index.item_type) if content_exists else None
if item_type == 'tutorial':
content_author_id = TutorialProcessHelper.author(item_id)
return content_exists, content_author_id
@staticmethod
def read(item):
"""
Read indexed content data
"""
# Import some stuff there (prevents circular imports)
from tutorial.helpers.process import ProcessHelper as TutorialProcessHelper
from book.helpers import BookHelper
content_data = {
'type': None,
'id': None,
'content': {},
'author': {}
}
# Read content relations
user = None
if item is not None:
content_data['id'] = item.item_id
content_data['type'] = TypesHelper.reverse(item.item_type)
content_data_url, content_data_title = None, None
if content_data['type'] == 'tutorial':
content_data_url = TutorialProcessHelper.url_full(content_data['id'])
content_data_title = TutorialProcessHelper.title(content_data['id'])
elif content_data['type'] == 'book':
book = BookHelper.resolve(content_data['id'])
if book:
content_data_url = BookHelper.url_full(book)
content_data_title = BookHelper.title(book)
content_data['content'] = {
'url': content_data_url,
'title': content_data_title,
}
# Get author data
if user is not None:
profile = user.profile
content_data['author'] = {
'avatar': avatar(user),
'rank': profile.rank,
'first_name': user.first_name,
'last_name': user.last_name,
'url': reverse('user.views.main', args=[user.username]),
'specialty': profile.specialty,
}
return content_data
|
the-stack_106_20975
|
import sys
from functools import update_wrapper
from future.utils import iteritems
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.base import ModelBase
from django.utils import six
from django.views.decorators.cache import never_cache
from django.template.engine import Engine
import inspect
if six.PY2 and sys.getdefaultencoding() == 'ascii':
import imp
imp.reload(sys)
sys.setdefaultencoding("utf-8")
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class MergeAdminMetaclass(type):
def __new__(cls, name, bases, attrs):
return type.__new__(cls, str(name), bases, attrs)
class AdminSite(object):
def __init__(self, name='xadmin'):
self.name = name
self.app_name = 'xadmin'
self._registry = {} # model_class class -> admin_class class
self._registry_avs = {} # admin_view_class class -> admin_class class
self._registry_settings = {} # settings name -> admin_class class
self._registry_views = []
# url instance contains (path, admin_view class, name)
self._registry_modelviews = []
# url instance contains (path, admin_view class, name)
self._registry_plugins = {} # view_class class -> plugin_class class
self._admin_view_cache = {}
# self.check_dependencies()
self.model_admins_order = 0
def copy_registry(self):
import copy
return {
'models': copy.copy(self._registry),
'avs': copy.copy(self._registry_avs),
'views': copy.copy(self._registry_views),
'settings': copy.copy(self._registry_settings),
'modelviews': copy.copy(self._registry_modelviews),
'plugins': copy.copy(self._registry_plugins),
}
def restore_registry(self, data):
self._registry = data['models']
self._registry_avs = data['avs']
self._registry_views = data['views']
self._registry_settings = data['settings']
self._registry_modelviews = data['modelviews']
self._registry_plugins = data['plugins']
def register_modelview(self, path, admin_view_class, name):
from xadmin.views.base import BaseAdminView
if issubclass(admin_view_class, BaseAdminView):
self._registry_modelviews.append((path, admin_view_class, name))
else:
raise ImproperlyConfigured(u'The registered view class %s isn\'t subclass of %s' %
(admin_view_class.__name__, BaseAdminView.__name__))
def register_view(self, path, admin_view_class, name):
self._registry_views.append((path, admin_view_class, name))
def register_plugin(self, plugin_class, admin_view_class):
from xadmin.views.base import BaseAdminPlugin
if issubclass(plugin_class, BaseAdminPlugin):
self._registry_plugins.setdefault(
admin_view_class, []).append(plugin_class)
else:
raise ImproperlyConfigured(u'The registered plugin class %s isn\'t subclass of %s' %
(plugin_class.__name__, BaseAdminPlugin.__name__))
def register_settings(self, name, admin_class):
self._registry_settings[name.lower()] = admin_class
def register(self, model_or_iterable, admin_class=object, **options):
from xadmin.views.base import BaseAdminView
if isinstance(model_or_iterable, ModelBase) or issubclass(model_or_iterable, BaseAdminView):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if isinstance(model, ModelBase):
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with customer_admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered(
'The model %s is already registered' % model.__name__)
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type(str("%s%sAdmin" % (model._meta.app_label, model._meta.model_name)), (admin_class,), options or {})
admin_class.model = model
admin_class.order = self.model_admins_order
self.model_admins_order += 1
self._registry[model] = admin_class
else:
if model in self._registry_avs:
raise AlreadyRegistered('The admin_view_class %s is already registered' % model.__name__)
if options:
options['__module__'] = __name__
admin_class = type(str(
"%sAdmin" % model.__name__), (admin_class,), options)
# Instantiate the customer_admin class to save in the registry
self._registry_avs[model] = admin_class
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
from xadmin.views.base import BaseAdminView
if isinstance(model_or_iterable, (ModelBase, BaseAdminView)):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if isinstance(model, ModelBase):
if model not in self._registry:
raise NotRegistered(
'The model %s is not registered' % model.__name__)
del self._registry[model]
else:
if model not in self._registry_avs:
raise NotRegistered('The admin_view_class %s is not registered' % model.__name__)
del self._registry_avs[model]
def set_loginview(self, login_view):
self.login_view = login_view
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the customer_admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the customer_admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.contenttypes.models import ContentType
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the customer_admin application.")
default_template_engine = Engine.get_default()
if not ('django.contrib.auth.context_processors.auth' in default_template_engine.context_processors or
'django.core.context_processors.auth' in default_template_engine.context_processors):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the customer_admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an customer_admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request) and getattr(view, 'need_site_permission', True):
return self.create_admin_view(self.login_view)(request, *args, **kwargs)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
return update_wrapper(inner, view)
def _get_merge_attrs(self, option_class, plugin_class):
return dict([(name, getattr(option_class, name)) for name in dir(option_class)
if name[0] != '_' and not callable(getattr(option_class, name)) and hasattr(plugin_class, name)])
def _get_settings_class(self, admin_view_class):
name = admin_view_class.__name__.lower()
if name in self._registry_settings:
return self._registry_settings[name]
elif name.endswith('customer_admin') and name[0:-5] in self._registry_settings:
return self._registry_settings[name[0:-5]]
elif name.endswith('adminview') and name[0:-9] in self._registry_settings:
return self._registry_settings[name[0:-9]]
return None
def _create_plugin(self, option_classes):
def merge_class(plugin_class):
if option_classes:
attrs = {}
bases = [plugin_class]
for oc in option_classes:
attrs.update(self._get_merge_attrs(oc, plugin_class))
meta_class = getattr(oc, plugin_class.__name__, getattr(oc, plugin_class.__name__.replace('Plugin', ''), None))
if meta_class:
bases.insert(0, meta_class)
if attrs:
plugin_class = MergeAdminMetaclass(
'%s%s' % (''.join([oc.__name__ for oc in option_classes]), plugin_class.__name__),
tuple(bases), attrs)
return plugin_class
return merge_class
def get_plugins(self, admin_view_class, *option_classes):
from xadmin.views import BaseAdminView
plugins = []
opts = [oc for oc in option_classes if oc]
for klass in admin_view_class.mro():
if klass == BaseAdminView or issubclass(klass, BaseAdminView):
merge_opts = []
reg_class = self._registry_avs.get(klass)
if reg_class:
merge_opts.append(reg_class)
settings_class = self._get_settings_class(klass)
if settings_class:
merge_opts.append(settings_class)
merge_opts.extend(opts)
ps = self._registry_plugins.get(klass, [])
plugins.extend(map(self._create_plugin(
merge_opts), ps) if merge_opts else ps)
return plugins
def get_view_class(self, view_class, option_class=None, **opts):
merges = [option_class] if option_class else []
for klass in view_class.mro():
reg_class = self._registry_avs.get(klass)
if reg_class:
merges.append(reg_class)
settings_class = self._get_settings_class(klass)
if settings_class:
merges.append(settings_class)
merges.append(klass)
new_class_name = ''.join([c.__name__ for c in merges])
if new_class_name not in self._admin_view_cache:
plugins = self.get_plugins(view_class, option_class)
self._admin_view_cache[new_class_name] = MergeAdminMetaclass(
new_class_name, tuple(merges),
dict({'plugin_classes': plugins, 'admin_site': self}, **opts))
return self._admin_view_cache[new_class_name]
def create_admin_view(self, admin_view_class):
return self.get_view_class(admin_view_class).as_view()
def create_model_admin_view(self, admin_view_class, model, option_class):
return self.get_view_class(admin_view_class, option_class).as_view()
def get_urls(self):
from django.urls import include, path, re_path
from xadmin.views.base import BaseAdminView
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
path('jsi18n/', wrap(self.i18n_javascript, cacheable=True), name='jsi18n')
]
# Registed customer_admin views
# inspect[isclass]: Only checks if the object is a class. With it lets you create an custom view that
# inherits from multiple views and have more of a metaclass.
urlpatterns += [
re_path(
_path,
wrap(self.create_admin_view(clz_or_func))
if inspect.isclass(clz_or_func) and issubclass(clz_or_func, BaseAdminView)
else include(clz_or_func(self)),
name=name
)
for _path, clz_or_func, name in self._registry_views
]
# Add in each model's views.
for model, admin_class in iteritems(self._registry):
view_urls = [
re_path(
_path,
wrap(self.create_model_admin_view(clz, model, admin_class)),
name=name % (model._meta.app_label, model._meta.model_name)
)
for _path, clz, name in self._registry_modelviews
]
urlpatterns += [
re_path(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(view_urls))
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.name, self.app_name
def i18n_javascript(self, request):
from django.views.i18n import JavaScriptCatalog
"""
Displays the i18n JavaScript that the Django customer_admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request)
# This global object represents the default customer_admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom customer_admin site.
site = AdminSite()
def register(models, **kwargs):
def _model_admin_wrapper(admin_class):
site.register(models, admin_class)
return _model_admin_wrapper
|
the-stack_106_20977
|
from __future__ import unicode_literals
from xml.dom.minidom import getDOMImplementation, parseString, Node
from rd.core import RD, Element, Link, Property, Title
XRD_NAMESPACE = "http://docs.oasis-open.org/ns/xri/xrd-1.0"
def _get_text(root):
text = ''
for node in root.childNodes:
if node.nodeType == Node.TEXT_NODE and node.nodeValue:
text += node.nodeValue
else:
text += _get_text(node)
return text.strip() or None
def loads(content):
import isodate
def expires_handler(node, obj):
obj.expires = isodate.parse_datetime(_get_text(node))
def subject_handler(node, obj):
obj.subject = _get_text(node)
def alias_handler(node, obj):
obj.aliases.append(_get_text(node))
def property_handler(node, obj):
obj.properties.append(Property(node.getAttribute('type'), _get_text(node)))
def title_handler(node, obj):
obj.titles.append(Title(_get_text(node), node.getAttribute('xml:lang')))
def link_handler(node, obj):
l = Link()
l.rel = node.getAttribute('rel')
l.type = node.getAttribute('type')
l.href = node.getAttribute('href')
l.template = node.getAttribute('template')
obj.links.append(l)
handlers = {
'Expires': expires_handler,
'Subject': subject_handler,
'Alias': alias_handler,
'Property': property_handler,
'Link': link_handler,
'Title': title_handler,
}
def unknown_handler(node, obj):
obj.elements.append(Element(
name=node.tagName,
value=_get_text(node),
))
def handle_node(node, obj):
handler = handlers.get(node.nodeName, unknown_handler)
if handler and node.nodeType == node.ELEMENT_NODE:
handler(node, obj)
doc = parseString(content)
root = doc.documentElement
rd = RD(root.getAttribute('xml:id'))
for name, value in root.attributes.items():
if name != 'xml:id':
rd.attributes.append((name, value))
for node in root.childNodes:
handle_node(node, rd)
if node.nodeName == 'Link':
link = rd.links[-1]
for child in node.childNodes:
handle_node(child, link)
return rd
def dumps(xrd):
doc = getDOMImplementation().createDocument(XRD_NAMESPACE, "XRD", None)
root = doc.documentElement
root.setAttribute('xmlns', XRD_NAMESPACE)
if xrd.xml_id:
root.setAttribute('xml:id', xrd.xml_id)
for attr in xrd.attributes:
root.setAttribute(attr.name, attr.value)
if xrd.expires:
node = doc.createElement('Expires')
node.appendChild(doc.createTextNode(xrd.expires.isoformat()))
root.appendChild(node)
if xrd.subject:
node = doc.createElement('Subject')
node.appendChild(doc.createTextNode(xrd.subject))
root.appendChild(node)
for alias in xrd.aliases:
node = doc.createElement('Alias')
node.appendChild(doc.createTextNode(alias))
root.appendChild(node)
for prop in xrd.properties:
node = doc.createElement('Property')
node.setAttribute('type', prop.type)
if prop.value:
node.appendChild(doc.createTextNode(str(prop.value)))
else:
node.setAttribute('xsi:nil', 'true')
root.appendChild(node)
for element in xrd.elements:
node = doc.createElement(element.name)
node.appendChild(doc.createTextNode(element.value))
root.appendChild(node)
for link in xrd.links:
if link.href and link.template:
raise ValueError('only one of href or template attributes may be specified')
link_node = doc.createElement('Link')
if link.rel:
link_node.setAttribute('rel', link.rel)
if link.type:
link_node.setAttribute('type', link.type)
if link.href:
link_node.setAttribute('href', link.href)
if link.template:
link_node.setAttribute('template', link.template)
for title in link.titles:
node = doc.createElement('Title')
node.appendChild(doc.createTextNode(str(title)))
if title.lang:
node.setAttribute('xml:lang', title.lang)
link_node.appendChild(node)
for prop in link.properties:
node = doc.createElement('Property')
node.setAttribute('type', prop.type)
if prop.value:
node.appendChild(doc.createTextNode(str(prop.value)))
else:
node.setAttribute('xsi:nil', 'true')
link_node.appendChild(node)
root.appendChild(link_node)
return doc
|
the-stack_106_20980
|
import requests
from app.modules.error_messages import error_messages
def get_all_contries(url):
# Requests: HTTP for Humans
# Documentation: https://docs.python-requests.org/en/latest/
# The information is obtained from the countries and returned without any processing.
try:
print('Obtaining information from countries... 🌎')
r = requests.get(url)
return(r.json())
except requests.exceptions.MissingSchema:
return error_messages('MSG_MISSING_SCHEMA')
except requests.exceptions.InvalidURL:
return error_messages('MSG_INVALID_URL')
except requests.exceptions.InvalidSchema:
return error_messages('MSG_INVALID_SCHEMA')
except requests.exceptions.ConnectionError:
return error_messages('MSG_CONNECTION_ERROR')
|
the-stack_106_20982
|
#!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import time
from typing import Dict, List
import tabulate
from openr.cli.utils import utils
from openr.cli.utils.commands import OpenrCtrlCmd
from openr.OpenrCtrl import OpenrCtrl
from openr.utils import printing
class MonitorCmd(OpenrCtrlCmd):
def print_log_list_type(self, llist: List) -> str:
idx = 1
str_txt = "{}".format("".join(llist[0]) + "\n")
while idx < len(llist):
str_txt += "{:<18} {}".format("", "".join(llist[idx]) + "\n")
idx += 1
return str_txt
def print_log_sample(self, log_sample: Dict) -> None:
columns = ["", ""]
rows = []
for _, value0 in log_sample.items():
for key, value in value0.items():
if key == "time":
value = time.strftime("%H:%M:%S %Y-%m-%d", time.localtime(value))
if type(value) is list:
value = self.print_log_list_type(value)
rows.append(["{:<17} {}".format(key, value)])
print(tabulate.tabulate(rows, headers=columns, tablefmt="plain"))
class CountersCmd(MonitorCmd):
def _run(
self, client: OpenrCtrl.Client, prefix: str = "", json: bool = False
) -> None:
resp = client.getCounters()
self.print_counters(client, resp, prefix, json)
def print_counters(
self, client: OpenrCtrl.Client, resp: Dict, prefix: str, json: bool
) -> None:
""" print the Kv Store counters """
host_id = client.getMyNodeName()
caption = "{}'s counters".format(host_id)
rows = []
for key, counter in sorted(resp.items()):
if not key.startswith(prefix):
continue
rows.append([key, ":", counter])
if json:
json_data = {k: v for k, _, v in rows}
print(utils.json_dumps(json_data))
else:
print(
printing.render_horizontal_table(
rows, caption=caption, tablefmt="plain"
)
)
print()
class LogCmd(MonitorCmd):
def _run(self, client: OpenrCtrl.Client, json_opt: bool = False) -> None:
try:
resp = client.getEventLogs()
self.print_log_data(resp, json_opt)
except TypeError:
host_id = client.getMyNodeName()
print(
"Incompatible return type. Please upgrade Open/R binary on {}".format(
host_id
)
)
def print_log_data(self, resp, json_opt):
""" print the log data"""
if json_opt:
event_logs = []
for event_log in resp:
event_logs.append(json.loads(event_log))
print(utils.json_dumps(event_logs))
else:
for event_log in resp:
self.print_log_sample(json.loads(event_log))
class StatisticsCmd(MonitorCmd):
def _run(self, client: OpenrCtrl.Client) -> None:
stats_templates = [
{
"title": "KvStore Stats",
"counters": [
("KeyVals", "kvstore.num_keys"),
("Peering Sessions", "kvstore.num_peers"),
("Pending Sync", "kvstore.pending_full_sync"),
],
"stats": [
("Rcvd Publications", "kvstore.received_publications.count"),
("Rcvd KeyVals", "kvstore.received_key_vals.sum"),
("Updates KeyVals", "kvstore.updated_key_vals.sum"),
],
},
{
"title": "LinkMonitor/Spark Stats",
"counters": [
("Adjacent Neighbors", "spark.num_adjacent_neighbors"),
("Tracked Neighbors", "spark.num_tracked_neighbors"),
],
"stats": [
("Updates AdjDb", "link_monitor.advertise_adjacencies.sum"),
("Rcvd Hello Pkts", "spark.hello.packet_recv.sum"),
("Sent Hello Pkts", "spark.hello.packet_sent.sum"),
],
},
{
"title": "Decision/Fib Stats",
"counters": [],
"stats": [
("Updates AdjDbs", "decision.adj_db_update.count"),
("Updates PrefixDbs", "decision.prefix_db_update.count"),
("SPF Runs", "decision.spf_runs.count"),
("SPF Avg Duration (ms)", "decision.spf_ms.avg"),
("Convergence Duration (ms)", "fib.convergence_time_ms.avg"),
("Updates RouteDb", "fib.process_route_db.count"),
("Full Route Sync", "fib.sync_fib_calls.count"),
],
},
]
counters = client.getCounters()
self.print_stats(stats_templates, counters)
def print_stats(self, stats_templates, counters):
"""
Print in pretty format
"""
suffixes = [".60", ".600", ".3600", ""]
for template in stats_templates:
counters_rows = []
for title, key in template["counters"]:
val = counters.get(key, None)
counters_rows.append([title, "N/A" if not val and val != 0 else val])
stats_cols = ["Stat", "1 min", "10 mins", "1 hour", "All Time"]
stats_rows = []
for title, key_prefix in template["stats"]:
row = [title]
for key in ["{}{}".format(key_prefix, s) for s in suffixes]:
val = counters.get(key, None)
row.append("N/A" if not val and val != 0 else val)
stats_rows.append(row)
print("\n> {} ".format(template["title"]))
if counters_rows:
print()
print(
printing.render_horizontal_table(
counters_rows, tablefmt="plain"
).strip("\n")
)
if stats_rows:
print()
print(
printing.render_horizontal_table(
stats_rows, column_labels=stats_cols, tablefmt="simple"
).strip("\n")
)
|
the-stack_106_20984
|
from setuptools import setup, find_packages
from pathlib import Path
import os
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'yumi_gym', 'envs', 'assets')
data_files = []
for root, dirs, files in os.walk(directory):
for file in files:
data_files.append(os.path.join(root, file))
setup(
name='yumi-gym',
version='0.0.3',
packages=find_packages(),
package_data={'yumi_gym': data_files},
include_package_data=True,
install_requires=['gym', 'pybullet', 'numpy'],
description="Physics simulation for ABB's collaborative robot yumi",
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
url='https://github.com/0aqz0/yumi-gym',
author='Haodong Zhang',
author_email='[email protected]',
license='MIT',
)
|
the-stack_106_20986
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gplates(CMakePackage):
"""GPlates is desktop software for the interactive visualisation of
plate-tectonics. GPlates offers a novel combination of interactive
plate-tectonic reconstructions, geographic information system (GIS)
functionality and raster data visualisation. GPlates enables both the
visualisation and the manipulation of plate-tectonic reconstructions
and associated data through geological time."""
homepage = 'https://www.gplates.org'
url = 'https://sourceforge.net/projects/gplates/files/gplates/2.0/gplates-2.0.0-unixsrc.tar.bz2/download'
version('2.1.0', sha256='5a52242520d7e243c541e164c8417b23f4e17fcd79ed81f865b2c13628bb0e07', deprecated=True)
version('2.0.0', sha256='1c27d3932a851153baee7cec48e57c2bbc87e4eea02f8a986882515ba4b44c0b', deprecated=True)
depends_on('[email protected]:', type='build')
depends_on('ninja', type='build')
# Qt 5 does not support (at least) the Q_WS_* constants.
depends_on('[email protected]:4')
depends_on('[email protected]:')
depends_on('glu')
depends_on('glew')
# GDAL's OGRSFDriverRegistrar is not compatible anymore starting with 2.0.
depends_on('[email protected]:1')
depends_on('[email protected]:')
# The latest release of gplates came out before PROJ.6 was released,
# so I'm assuming it's not supported.
depends_on('[email protected]:5')
# Boost's Python library has a different name starting with 1.67.
# There were changes to Boost's optional in 1.61 that make the build fail.
depends_on('[email protected]:1.60')
depends_on('[email protected]:2')
# When built in parallel, headers are not generated before they are used
# (specifically, ViewportWindowUi.h) with the Makefiles generator.
generator = 'Ninja'
def url_for_version(self, version):
url = 'https://sourceforge.net/projects/gplates/files/gplates/{0}/gplates-{1}-unixsrc.tar.bz2/download'
return url.format(version.up_to(2), version)
def patch(self):
# GPlates overrides FindPythonLibs and finds the static library, which
# can not be used easily. Fall back to CMake's version, which finds
# the shared library instead.
force_remove('cmake/modules/FindPythonLibs.cmake')
# GPlates only installs its binary for the Release configuration.
filter_file('CONFIGURATIONS release',
'CONFIGURATIONS Debug Release RelWithDebInfo MinSizeRel',
'src/CMakeLists.txt')
|
the-stack_106_20988
|
#!/usr/bin/python2.4
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class to manage notifications."""
import settings
from django import template
from django.template import loader
from django.utils import translation
from google.appengine.api import mail
from google.appengine.ext import db
from core import models
from core import utils
_ = translation.ugettext
class NotificationType(utils.RegistrationStatus):
"""Different types of user notifications.
Attributes:
ENROLL_REJECTED: An enroll request can get rejected if for example an online
contention leads to a WAITING state, but the offline rule does not allow
WAITING on an activity.
Another example could be an activity which freezes ENROLLED list 24 hours
before activity start, and rejects all WAITING.
REMINDER: Used to send email reminders as activity schedule approaches.
MANAGER_APPROVAL_REQUEST: Notification to manager that approval for a user
is required before joining a course.
REGISTRATION_UPDATE: Used when the registration has been updated.
"""
ENROLL_REJECTED = 'enroll_rejected'
REMINDER = 'reminder'
MANAGER_APPROVAL_REQUEST = 'manager_approval_request'
REGISTRATION_UPDATE = 'update'
def SendMail(user_registration, notification_type, to=None,
reply_to=None, cc=None, bcc=None, extra_context=None):
"""Sends mail about a particular event.
Args:
user_registration: The models.UserRegistration for which we need to send the
latest email notification.
notification_type: A NotificationType.
to: An optinoal string address override to send notification email to.
reply_to: An optional string for the reply-to address.
cc: An optional string or list of string for emails to be cc-ed.
bcc: An optional string or list of string for emails to be bcc-ed.
extra_context: A dict to pass in extra context to the email templates. The
context passed to the templates is updated with this dict.
"""
bcc = bcc or []
cc = cc or []
# Get contact list users.
contact_list = user_registration.program.contact_list
if contact_list:
reply_to = reply_to or contact_list[0].email()
to = to or user_registration.user.email()
datastore_user = models.GlearnUser.FromAppengineUser(user_registration.user)
access_points = db.get(user_registration.access_point_list)
schedules = db.get(user_registration.schedule_list)
# Get locations and times.
locations_and_times = []
for access_point, schedule in zip(access_points, schedules):
locations_and_times.append({
'start_time_local': datastore_user.GetLocalTime(schedule.start_time),
'location': access_point.uri})
locations_and_times = sorted(locations_and_times,
key=lambda x: x['start_time_local'])
# Get possible reasons for status.
status_reasons = [_(str(cfg.GetDescription()))
for cfg in user_registration.affecting_rule_configs]
context_values = {'register': user_registration,
'locations_and_times': locations_and_times,
'hostname': settings.DATABASE_OPTIONS['remote_host'],
'contact_list': contact_list,
'status_reasons': status_reasons}
# Add extra_context to the template context.
extra_context = extra_context or {}
context_values.update(extra_context)
if notification_type == NotificationType.ENROLLED:
template_name = 'email_enroll.html'
subject = _('Registration confirmation: %s')
elif notification_type == NotificationType.REGISTRATION_UPDATE:
template_name = 'email_registration_update.html'
subject = _('Updated activity: %s')
elif notification_type == NotificationType.ENROLL_REJECTED:
template_name = 'email_enroll_rejected.html'
if user_registration.creator != user_registration.user:
# The registration was rejected and was initiated by somebody else.
# We send the email to the person who initiated the registration, not
# to the user who wasn't allowed to register, to notify the person who
# attempted this action of failure.
to = user_registration.creator.email()
subject = _('Sign-up denied: %s')
elif notification_type == NotificationType.WAITLISTED:
# Determine if the user is waiting only for max people in activity rule.
if user_registration.OnlyWaitingForMaxPeopleActivity():
template_name = 'email_waitlisted.html'
subject = _('Waitlist notification: %s')
context_values['rank'] = models.UserRegistration.WaitlistRankForUser(
user_registration.activity, user_registration.user)
context_values['capacity'] = user_registration.activity.MaxCapacity()
else:
# Waitlisted, but rules other than max rule are also in play.
# Mention to user that registration is pending and give reasons.
template_name = 'email_pending.html'
subject = _('Enroll request pending: %s')
elif notification_type == NotificationType.UNREGISTERED:
if not user_registration.activity.to_be_deleted:
# Ordinary unregistration.
subject = _('Cancellation confirmation: %s')
else: # Special system unregistration.
subject = _('Unregistered due to session cancellation: %s')
template_name = 'email_unregister.html'
elif notification_type == NotificationType.MANAGER_APPROVAL_REQUEST:
template_name = 'email_manager_approval.html'
subject = _('Approval required for %s to attend %s')
subject %= (user_registration.user.nickname(), '%s')
else:
assert False
subject %= user_registration.program.name
context = template.Context(context_values)
body = loader.render_to_string(template_name,
context_instance=context)
message = mail.EmailMessage(sender=settings.ADMIN_EMAIL)
message.to = to
message.body = body
message.html = body
if cc: message.cc = cc
if bcc: message.bcc = bcc
if reply_to: message.reply_to = reply_to
message.subject = subject
message.send()
|
the-stack_106_20990
|
import torch
import torch.nn as nn
from torch.utils.data import Dataset
class MetricDataset(Dataset):
def __init__(self, data):
super(MetricDataset, self).__init__()
self.data = data[0].squeeze()
self.targets = data[1].squeeze()
def __getitem__(self, index):
return (
torch.from_numpy(self.data[index]).float().flatten(),
torch.tensor(self.targets[index]).float().flatten(),
)
def __len__(self):
return self.data.shape[0]
class MetaNN(nn.Module):
def __init__(self, input_size):
super(MetaNN, self).__init__()
self.act = nn.ReLU()
self.layers = nn.Sequential(
nn.Linear(input_size, 50),
self.act,
nn.Linear(50, 40),
self.act,
nn.Linear(40, 30),
self.act,
nn.Linear(30, 20),
self.act,
nn.Linear(20, 10),
self.act,
nn.Linear(10, 1),
)
def forward(self, x):
return self.layers(x).view(x.shape[0], -1)
|
the-stack_106_20991
|
"""The FSI for zipfiles"""
import zipfile
import os
import tempfile
import time
import shutil
import datetime
import stat
from io import BytesIO
from stashutils.fsi import base
from stashutils.fsi import errors
# TODO: check filename bug when writing
class ZipfileFSI(base.BaseFSI):
"""FSI for zipfiles"""
def __init__(self, logger):
base.BaseFSI.__init__(self, logger)
self.logger = logger
self.path = "/"
self.zf = None
self.is_new = True
self.dirs = ["/"] # list of dirs with no files in them
self.log("Warning: The ZipfileFSI has some unfixed bugs!\n")
# ^^^ These bugs are beyond my abilities (and they seem to be case
# dependent)
def abspath(self, path):
"""returns the absolute path for path."""
p = os.path.join(self.path, path)
while p.startswith("/"):
p = p[1:]
return p
def _getdirs(self):
"""returns a list of all dirs"""
dirs = ["/"] + self.dirs
for name in self.zf.namelist():
dirpath = os.path.dirname(name)
if dirpath not in dirs:
dirs.append(dirpath)
return dirs
def _update(self, remove=[]):
"""create a new zipfile with some changes"""
nzfp = os.path.join(
tempfile.gettempdir(), "tempzip_{t}.zip".format(t=time.time())
)
op = self.zf.fp.name
pswd = self.zf.pwd
comment = self.zf.comment
nzf = zipfile.ZipFile(nzfp, "w", self.zf.compression, True)
infos = self.zf.infolist()
for zipinfo in infos:
add = True
for rm in remove:
if zipinfo.filename.startswith(rm):
add = False
break
if not add:
continue
ofo = self.zf.open(zipinfo)
nzf.writestr(zipinfo, ofo.read())
self.zf.close()
os.remove(op)
nzf.close()
shutil.copy(nzfp, op)
self.zf = zipfile.ZipFile(op, "a", zipfile.ZIP_DEFLATED, True)
self.zf.setpassword(pswd)
self.zf.comment = comment
def connect(self, *args):
"""open the zipfile"""
if len(args) != 1:
return "expected one or two arguments!"
ap = os.path.abspath(args[0])
if os.path.exists(ap):
if not zipfile.is_zipfile(ap):
return "not a zipfile"
try:
self.zf = zipfile.ZipFile(ap, "a", zipfile.ZIP_DEFLATED, True)
self.is_new = False
except Exception as e:
return e.message
if len(args) == 2:
self.zf.setpassword(args[1])
return True
else:
try:
self.zf = zipfile.ZipFile(ap, "w", zipfile.ZIP_DEFLATED, True)
self.is_new = True
except Exception as e:
return e.message
return True
def repr(self):
"""returns a string representing this fsi"""
template = "{inz} Zipfile at '{p}'"
inz = "New" if self.is_new else "Open"
return template.format(inz=inz, p=self.zf.fp.name)
def listdir(self, path="."):
ap = self.abspath(path)
dirlist = self._getdirs()
namelist = self.zf.namelist()
names = dirlist + namelist
content = []
for name in names:
dirname = os.path.dirname(name)
if dirname == ap:
content.append(name.replace(dirname, ""))
return content
def cd(self, path):
np = self.abspath(path)
dirs = self._getdirs()
if np not in dirs:
raise errors.OperationFailure("Dir does not exists!")
self.path = np
def get_path(self):
return self.path
def remove(self, path):
ap = self.abspath(path)
self._update(remove=[ap])
def mkdir(self, name):
ap = self.abspath(name)
self.dirs.append(ap)
def close(self):
self.zf.close()
def isdir(self, name):
ap = self.abspath(name)
return (ap in self._getdirs()) and not self.isfile(name)
def isfile(self, name):
ap = self.abspath(name)
return ap in self.zf.namelist()
def stat(self, name):
ap = self.abspath(name)
self.log("stat: {ap}\n".format(ap=ap))
isdir = self.isdir(name)
isfile = self.isfile(name)
if not (isdir or isfile):
self.log("stat-target not found.\n")
raise errors.OperationFailure("Not found!")
if isdir:
size = 1
mtime = None
else:
zipinfo = self.zf.getinfo(ap)
size = zipinfo.file_size
timestamp = zipinfo.date_time
dt = datetime.datetime(*timestamp)
mtime = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
type_ = stat.S_IFREG if isfile else stat.S_IFDIR
mode = base.calc_mode(type=type_)
self.log("stat return\n")
return base.make_stat(size=size, mtime=mtime, ctime=mtime, mode=mode)
def open(self, name, mode="r", buffering=0):
ap = self.abspath(name)
self.log("open {ap} with mode {m}\n".format(ap=ap, m=mode))
if "r" in mode:
try:
reader = ZipReader(self, ap, mode, buffering)
except:
raise errors.OperationFailure("Not found!")
else:
return reader
elif "w" in mode:
if ap in self.zf.namelist():
self._update(remove=[ap])
return ZipWriter(self, ap, mode, buffering)
else:
raise errors.OperationFailure("Unsupported mode!")
class ZipWriter(object):
"""utility class used for writing to a ZipFile."""
def __init__(self, root, fp, mode, buffering):
self.root = root
self.fp = fp
self.name = fp
self.buffering = buffering
self.mode = mode
self.sio = BytesIO()
self.closed = False
def close(self):
"""called on file close"""
if self.closed:
return
self.closed = True
content = self.sio.getvalue()
self.sio.close()
self.root.zf.writestr(self.fp, content)
def __getattr__(self, name):
return getattr(self.sio, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __del__(self):
self.close()
class ZipReader(ZipWriter):
"""utility class for reading a file from a zip."""
def __init__(self, root, fp, mode, buffering):
self.root = root
self.fp = fp
self.name = fp
self.buffering = buffering
self.mode = mode
self.sio = BytesIO(self.root.zf.read(fp))
self.closed = False
def close(self):
if self.closed:
return
self.closed = True
self.sio.close()
|
the-stack_106_20995
|
# -*- coding: utf-8 -*-
from .common import *
from ..test_program import ProgramTestBase
class NodeTreeCreatorTestCase(TestCase):
def build_xml(self, node):
xml_str = BlocklyXmlBuilder().build(node)
xml_str = xml_str.replace('<xml>', '<xml xmlns="http://www.w3.org/1999/xhtml">')
return xml_str
def build_dict(self, node):
xml_str = self.build_xml(node)
return BlocklyXmlParser().parse(xml_str)[0]
def tree_diff(self, tree1, tree2):
return cleanup_xml_ids(BlocklyXmlBuilder().build(tree1)) != cleanup_xml_ids(BlocklyXmlBuilder().build(tree2))
class NodeTreeCreatorTest(NodeTreeCreatorTestCase):
def test_test_case_diff(self):
tree1 = variable_assign_value()
tree2 = variable_assign_value()
tree3 = variable_assign_value(variable_name='B')
self.assertFalse(self.tree_diff(tree1, tree1))
self.assertFalse(self.tree_diff(tree1, tree2))
self.assertTrue(self.tree_diff(tree1, tree3))
def test_create_content_object(self):
tree1 = variable_assign_value()
dict1 = self.build_dict(tree1)
assignmet_data = dict1['data']
NodeTreeCreator().create_content_object(assignmet_data)
self.assertIn('object_id', assignmet_data)
Assignment.objects.get(id=assignmet_data['object_id'])
constant_data = dict1['children'][1]['data']
NodeTreeCreator().create_content_object(constant_data)
number_constantant = NumberConstant.objects.get(id=constant_data['object_id'])
self.assertEqual(1, number_constantant.value)
self.assertNotIn('value', constant_data)
def test_create_content_object_sould_not_create_variable_definition(self):
tree1 = variable_assign_value()
dict1 = self.build_dict(tree1)
variable_definitions = NodeTreeCreator().create_variable_definitions(dict1)
NodeTreeCreator().create_content_object(variable_definitions[0]['data'])
self.assertEqual(2, VariableDefinition.objects.count())
def test_create_variable_definitions(self):
tree1 = variable_assign_value()
dict1 = self.build_dict(tree1)
self.assertEqual(1, VariableDefinition.objects.count())
variable, = NodeTreeCreator().collect_objects(dict1, get_content_type_id(Variable))
variable_definitions = NodeTreeCreator().create_variable_definitions(dict1)
self.assertIsInstance(variable_definitions, list)
self.assertEqual(2, VariableDefinition.objects.count())
self.assertEqual(1, len(variable_definitions))
variable_definition, = variable_definitions
self.assertEqual(variable['data']['definition_id'], variable_definition['data']['object_id'])
self.assertEqual('A', VariableDefinition.objects.get(id=variable_definition['data']['object_id']).name)
self.assertEqual(get_content_type_id(VariableDefinition), variable_definition['data']['content_type'])
self.assertNotIn('name', variable['data'])
def test_collect_objects(self):
tree1 = variable_assign_value()
dict1 = self.build_dict(tree1)
dict2 = {'data': {}, 'children': [dict1]}
for data in (dict1, dict2):
objects = NodeTreeCreator().collect_objects(data, get_content_type_id(Assignment))
self.assertIsInstance(objects, list)
self.assertEqual([
dict1,
], objects)
objects = NodeTreeCreator().collect_objects(data, get_content_type_id(Variable))
self.assertIsInstance(objects, list)
self.assertEqual([
dict1['children'][0],
], objects)
def test_create_assignment(self):
tree1 = variable_assign_value()
dict1 = self.build_dict(tree1)
tree2 = NodeTreeCreator().create(dict1)
self.assertIsInstance(tree2, Node)
self.assertIsNot(tree1, tree2)
self.assertFalse(self.tree_diff(tree1, tree2))
def test_create_reference_constant(self):
tree1 = Node.add_root()
constant1 = ReferenceConstant.objects.create()
test_model1 = Model.objects.create()
node = tree1.add_child(content_object=constant1)
node.add_child(content_object=test_model1)
tree1 = Node.objects.get(id=tree1.id)
dict1 = self.build_dict(tree1)
tree2 = NodeTreeCreator().create(dict1)
self.assertIsInstance(tree2, Node)
self.assertIsNot(tree1, tree2)
self.assertFalse(self.tree_diff(tree1, tree2))
def test_create_date(self):
today = datetime.date.today()
tree1 = variable_assign_value(value=DateConstant(value=today))
dict1 = self.build_dict(tree1)
tree2 = NodeTreeCreator().create(dict1)
self.assertIsInstance(tree2, Node)
self.assertIsNot(tree1, tree2)
self.assertFalse(self.tree_diff(tree1, tree2))
def test_create_function(self):
function_definition = PythonCodeFunctionDefinition.objects.create(title='xxx')
tree1 = Node.add_root(content_object=Function(definition=function_definition))
tree1.add_child(content_object=NumberConstant(value=3))
tree1 = Node.objects.get(id=tree1.id)
dict1 = self.build_dict(tree1)
tree2 = NodeTreeCreator().create(dict1)
self.assertFalse(self.tree_diff(tree1, tree2))
class NodeTreeCreatorProgramVersionTest(ProgramTestBase, NodeTreeCreatorTestCase):
def test_create_variable_definitions_should_use_program_variable_definitions(self):
tree1 = variable_assign_value(variable_name='test_model.int_value')
dict1 = self.build_dict(tree1)
variable_definitions_count = VariableDefinition.objects.count()
external_variable_definitions = VariableDefinition.objects.filter(
Q(program_argument__program_interface=self.program_interface) |
Q(program_argument_field__program_argument__program_interface=self.program_interface)).order_by(
'name').distinct()
variable_definitions = NodeTreeCreator().create_variable_definitions(dict1, external_variable_definitions)
self.assertEqual(variable_definitions_count, VariableDefinition.objects.count())
self.assertEqual([], variable_definitions)
def test_create_variable_definitions_should_check_program_variable_definitions_type(self):
tree1 = variable_assign_value(variable_name='test_model.int_value')
dict1 = self.build_dict(tree1)
self.assertRaises(NodeTreeCreatorException, NodeTreeCreator().create_variable_definitions, dict1, [None])
self.assertRaises(NodeTreeCreatorException,
NodeTreeCreator().create_variable_definitions, dict1, [self.program_interface])
def test_create_should_check_program_version_type(self):
tree1 = variable_assign_value(variable_name='test_model.int_value')
dict1 = self.build_dict(tree1)
variable_definitions_count = VariableDefinition.objects.count()
self.assertRaises(NodeTreeCreatorException, NodeTreeCreator().create, dict1, self.program_interface)
self.assertEqual(variable_definitions_count, VariableDefinition.objects.count())
|
the-stack_106_20996
|
import logging
from FapgansControleBot.Exceptions.database_exceptions import NoResult
from FapgansControleBot.Models.credit import Credit
from FapgansControleBot.Models.gans import Gans
from FapgansControleBot.Models.user import User
from FapgansControleBot.Repository.i_unit_of_work import IUnitOfWork
from FapgansControleBot.Services.user_service import UserService
from FapgansControleBot.Views.WarningView import WarningView
logger = logging.getLogger(__name__)
class FapgansService:
def __init__(self, unit_of_work: IUnitOfWork, warning_view: WarningView, user_service: UserService):
self.unit_of_work = unit_of_work
self.warning_view = warning_view
self.user_service = user_service
def handle_fapgans(self, tg_user_id: int, tg_username: str, chat_id: int):
user = self.user_service.find_user_or_register(tg_user_id, tg_username)
gans = self.register_gans(user)
self.is_valid_gans(chat_id, user, gans)
def is_valid_gans(self, chat_id: int, user: User, gans: Gans) -> bool:
try:
current_period: Credit = self.current_gans_period()
except NoResult:
logger.info("Not in a gans period")
self.warning_view.not_in_gans_period(chat_id, user)
return False
gans.credit_id = current_period.credit_id
amount_of_ganzen = self.amount_of_ganzen_in_credit(user.user_id, current_period.credit_id)
if amount_of_ganzen > current_period.amount_of_stickers:
logger.info(f'User ({user.user_username}) sent too many fapganzen')
self.warning_view.too_many_ganzen(chat_id, user, amount_of_ganzen)
return False
return True
def current_gans_period(self) -> Credit:
return self.unit_of_work.get_credit_repository().active_gans_periods()
def amount_of_ganzen_in_credit(self, user_id: int, credit_id: int) -> int:
return self.unit_of_work.get_gans_repository().amount_of_ganzen_by_user_id(user_id, credit_id)
def register_gans(self, user: User) -> Gans:
fapgans = Gans(user.user_id)
self.unit_of_work.get_gans_repository().add(fapgans)
return fapgans
def start_gans_period(self, start_price):
try:
result: Credit = self.unit_of_work.get_credit_repository().find_credit_by_price(start_price)
except NoResult:
logger.info("No credit at this price!")
return
result.start()
self.unit_of_work.complete()
|
the-stack_106_20997
|
"""Read CSV file from 2021 Digital Science Maturity and Skills Survey
and rank skills by importance.
Example:
python rank_skills.py DSMS_Survey_20210803.csv
The file argument is as downloaded (All Responses Data) from the
GA Digital Science Maturity and Skills Deep Dive on Survey Monkey.
The script is specific to this survey only.
Ole Nielsen - 2021
"""
import sys
import pandas
import numpy
from skills_analysis_helpers import extract_data,\
SFIA_abbreviations, response_values, print_sorted_skills
# Get filename
if len(sys.argv) != 2:
msg = 'Filename with survey data must be supplied as command line argument'
raise Exception(msg)
filename = sys.argv[1]
# Read data from CSV file
# There are three responses for each skill:
# - How much it is needed,
# - How well we can access it and
# - How sustainable is it.
# Therefore we read three columns for each skill.
skills_dict = {} # Dictionary to keep track of skills and rankings
dataframe = pandas.read_csv(filename) # Input data
# Find number of respondents (There are two headers
# and counting starts at zero so that is N - 2 + 1)
number_of_respondents = dataframe.shape[0] - 1
print(f'Number of respondents: {number_of_respondents}')
number_of_responses = -1 # Flag keeping track of columns
for skill in SFIA_abbreviations:
# Collect responses for each skill
# Find columns with responses for this skill.
# This is done through linear searching, but data is small
# so it doesn't pose a performance issue.
additional_columns_to_collect = 0
for item in dataframe.items():
if additional_columns_to_collect > 0:
# Collect remaining responses for current skill
if additional_columns_to_collect == 2:
entry['ACCESS'] = extract_data('ACCESS', item)
if additional_columns_to_collect == 1:
entry['SUSTAIN'] = extract_data('SUSTAIN', item)
additional_columns_to_collect -= 1
if skill in item[0]:
# Collect first response for this skill
additional_columns_to_collect = 2
skills_dict[skill] = {} # Create new entry
entry = skills_dict[skill] # Shorthand for this entry
entry['NEED'] = extract_data('NEED', item)
# Record number of responses and test that it is the same across
# all columns.
if number_of_responses == -1:
number_of_responses = len(entry['NEED'])
else:
msg = 'Number of responses were not the same across this data'
assert number_of_responses == len(entry['NEED']), msg
# Convert responses to numerical values
responses = {}
for skill in SFIA_abbreviations:
responses[skill] = {} # Create new entry for this skill
skill_response = skills_dict[skill] # Responses for this skill
for key in skill_response:
responses[skill][key] = numpy.zeros(number_of_responses)
for i, response in enumerate(skill_response[key]):
# Make sure all indices are strings and get value
# from response values
val = response_values[str(response)]
responses[skill][key][i] = val
# Calculate skills gaps (G) using the formula
#
# G = avg(N - min(A, S))
#
# and sustainability gaps (U) using the formula
#
# U = A - S
#
# where
# G is the skills gap
# N is how much it is needed
# A is how much access we have
# S is how sustainable the access is
skills_gap = {}
need = {}
unsustainable_skills = {}
for skill in SFIA_abbreviations:
response = responses[skill]
N = response['NEED']
A = response['ACCESS']
S = response['SUSTAIN']
# Remove "N/A" and Don't Know
N = N[~numpy.isnan(N)]
A = A[~numpy.isnan(A)]
S = S[~numpy.isnan(S)]
# Find the mean values of each metric
N = numpy.mean(N)
A = numpy.mean(A)
S = numpy.mean(S)
# Calculate Gaps
G = N - min(A, S)
U = A - S
# Save the values
skills_gap[skill] = G
need[skill] = N
unsustainable_skills[skill] = U
print_sorted_skills(need, label='need')
print_sorted_skills(skills_gap, label='gap')
print_sorted_skills(unsustainable_skills, label='unsustainability')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.