filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_17534
|
# rasterio
from collections import namedtuple
import logging
import os
import warnings
from rasterio._base import eval_window, window_shape, window_index
from rasterio._drivers import driver_count, GDALEnv
import rasterio.dtypes
from rasterio.dtypes import (
bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,
complex_)
from rasterio.five import string_types
from rasterio.transform import Affine, guard_transform
# Classes in rasterio._io are imported below just before we need them.
__all__ = [
'band', 'open', 'drivers', 'copy', 'pad']
__version__ = "0.15.1"
log = logging.getLogger('rasterio')
class NullHandler(logging.Handler):
def emit(self, record):
pass
log.addHandler(NullHandler())
def open(
path, mode='r',
driver=None,
width=None, height=None,
count=None,
crs=None, transform=None,
dtype=None,
nodata=None,
**kwargs):
"""Open file at ``path`` in ``mode`` "r" (read), "r+" (read/write),
or "w" (write) and return a ``Reader`` or ``Updater`` object.
In write mode, a driver name such as "GTiff" or "JPEG" (see GDAL
docs or ``gdal_translate --help`` on the command line), ``width``
(number of pixels per line) and ``height`` (number of lines), the
``count`` number of bands in the new file must be specified.
Additionally, the data type for bands such as ``rasterio.ubyte`` for
8-bit bands or ``rasterio.uint16`` for 16-bit bands must be
specified using the ``dtype`` argument.
A coordinate reference system for raster datasets in write mode can
be defined by the ``crs`` argument. It takes Proj4 style mappings
like
{'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True}
An affine transformation that maps ``col,row`` pixel coordinates to
``x,y`` coordinates in the coordinate reference system can be
specified using the ``transform`` argument. The value may be either
an instance of ``affine.Affine`` or a 6-element sequence of the
affine transformation matrix coefficients ``a, b, c, d, e, f``.
These coefficients are shown in the figure below.
| x | | a b c | | c |
| y | = | d e f | | r |
| 1 | | 0 0 1 | | 1 |
a: rate of change of X with respect to increasing column, i.e.
pixel width
b: rotation, 0 if the raster is oriented "north up"
c: X coordinate of the top left corner of the top left pixel
f: Y coordinate of the top left corner of the top left pixel
d: rotation, 0 if the raster is oriented "north up"
e: rate of change of Y with respect to increasing row, usually
a negative number i.e. -1 * pixel height
f: Y coordinate of the top left corner of the top left pixel
Finally, additional kwargs are passed to GDAL as driver-specific
dataset creation parameters.
"""
if not isinstance(path, string_types):
raise TypeError("invalid path: %r" % path)
if mode and not isinstance(mode, string_types):
raise TypeError("invalid mode: %r" % mode)
if driver and not isinstance(driver, string_types):
raise TypeError("invalid driver: %r" % driver)
if mode in ('r', 'r+'):
if not os.path.exists(path):
raise IOError("no such file or directory: %r" % path)
if transform:
transform = guard_transform(transform)
if mode == 'r':
from rasterio._io import RasterReader
s = RasterReader(path)
elif mode == 'r+':
from rasterio._io import writer
s = writer(path, mode)
elif mode == 'r-':
from rasterio._base import DatasetReader
s = DatasetReader(path)
elif mode == 'w':
from rasterio._io import writer
s = writer(path, mode, driver=driver,
width=width, height=height, count=count,
crs=crs, transform=transform, dtype=dtype,
nodata=nodata,
**kwargs)
else:
raise ValueError(
"mode string must be one of 'r', 'r+', or 'w', not %s" % mode)
s.start()
return s
def copy(src, dst, **kw):
"""Copy a source dataset to a new destination with driver specific
creation options.
``src`` must be an existing file and ``dst`` a valid output file.
A ``driver`` keyword argument with value like 'GTiff' or 'JPEG' is
used to control the output format.
This is the one way to create write-once files like JPEGs.
"""
from rasterio._copy import RasterCopier
with drivers():
return RasterCopier()(src, dst, **kw)
def drivers(**kwargs):
"""Returns a gdal environment with registered drivers."""
if driver_count() == 0:
log.debug("Creating a chief GDALEnv in drivers()")
return GDALEnv(True, **kwargs)
else:
log.debug("Creating a not-responsible GDALEnv in drivers()")
return GDALEnv(False, **kwargs)
Band = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])
def band(ds, bidx):
"""Wraps a dataset and a band index up as a 'Band'"""
return Band(
ds,
bidx,
set(ds.dtypes).pop(),
ds.shape)
def pad(array, transform, pad_width, mode=None, **kwargs):
"""Returns a padded array and shifted affine transform matrix.
Array is padded using `numpy.pad()`."""
import numpy
transform = guard_transform(transform)
padded_array = numpy.pad(array, pad_width, mode, **kwargs)
padded_trans = list(transform)
padded_trans[2] -= pad_width*padded_trans[0]
padded_trans[5] -= pad_width*padded_trans[4]
return padded_array, Affine(*padded_trans[:6])
|
the-stack_106_17535
|
"""
URL: https://codeforces.com/problemset/problem/1417/A
Author: Safiul Kabir [safiulanik at gmail.com]
Tags: greedy, math, *800
"""
t = int(input())
for _ in range(t):
n, k = map(int, input().split())
a = list(map(int, input().split()))
a.sort()
count = 0
for i in range(1, n):
if a[i] <= k:
while a[0] + a[i] <= k:
a[i] += a[0]
count += 1
print(count)
|
the-stack_106_17536
|
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <[email protected]>,
# Apoorv Vyas <[email protected]>
#
"""Similar to the corresponding module in fast_transformers.attention, this
module performs all the query, key, value projections and output projections
leaving the implementation of the attention to the inner attention module."""
from torch.nn import Linear, Module
from ....events import EventDispatcher
from ..._utils import check_state
class RecurrentAttentionLayer(Module):
"""See fast_transformers.attention.attention_layer.AttentionLayer.
The only difference with the corresponding module is that this projects
only one input and then calls the inner attention with the provided
previous state.
Arguments
---------
attention: Specific inner attention implementation that just computes a
weighted average of values given a similarity of queries and
keys.
d_model: The input feature dimensionality
n_heads: The number of heads for the multi head attention
d_keys: The dimensionality of the keys/queries
(default: d_model/n_heads)
d_values: The dimensionality of the values (default: d_model/n_heads)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, attention, d_model, n_heads, d_keys=None,
d_values=None, d_model_keys=None, event_dispatcher=""):
super(RecurrentAttentionLayer, self).__init__()
# Fill d_keys and d_values
d_keys = d_keys or (d_model//n_heads)
d_values = d_values or (d_model//n_heads)
d_model_keys = d_model_keys or d_model
self.inner_attention = attention
self.query_projection = Linear(d_model, d_keys * n_heads)
self.key_projection = Linear(d_model_keys, d_keys * n_heads)
self.value_projection = Linear(d_model_keys, d_values * n_heads)
self.out_projection = Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, query, key, value, state=None, memory=None):
"""Apply attention to the passed in query/key/value after projecting
them to multiple heads.
In the argument description we make use of the following sizes
- N: the batch size
- D: The input feature dimensionality passed in the constructor as
'd_model'
Arguments
---------
query: (N, D) The tensor containing the queries
key: (N, D) The tensor containing the keys
value: (N, D) The tensor containing the values
state: The state varies depending on the inner attention implementation
memory: **Deprecated** and replaced by state
Returns
-------
The new value for each query as a tensor of shape (N, D).
"""
# Normalize the state/memory
state = check_state(state, memory)
# Project the queries/keys/values
query = self.query_projection(query)
key = self.key_projection(key)
value = self.value_projection(value)
# Reshape them into many heads and compute the attention
N, D = query.shape
H = self.n_heads
new_value, state = self.inner_attention(
query.view(N, H, -1),
key.view(N, H, -1),
value.view(N, H, -1),
state
)
new_value = new_value.view(N, -1)
# Project the output and return
return self.out_projection(new_value), state
|
the-stack_106_17537
|
# Copyright (c) 2015-2016, 2018, 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2016 Ceridwen <[email protected]>
# Copyright (c) 2017 Roy Wright <[email protected]>
# Copyright (c) 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2019 Antoine Boellinger <[email protected]>
# Copyright (c) 2020-2021 hippo91 <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/LICENSE
"""Astroid hooks for the PyQT library."""
from astroid import nodes, parse
from astroid.brain.helpers import register_module_extender
from astroid.builder import AstroidBuilder
from astroid.manager import AstroidManager
def _looks_like_signal(node, signal_name="pyqtSignal"):
if "__class__" in node.instance_attrs:
try:
cls = node.instance_attrs["__class__"][0]
return cls.name == signal_name
except AttributeError:
# return False if the cls does not have a name attribute
pass
return False
def transform_pyqt_signal(node):
module = parse(
"""
class pyqtSignal(object):
def connect(self, slot, type=None, no_receiver_check=False):
pass
def disconnect(self, slot):
pass
def emit(self, *args):
pass
"""
)
signal_cls = module["pyqtSignal"]
node.instance_attrs["emit"] = signal_cls["emit"]
node.instance_attrs["disconnect"] = signal_cls["disconnect"]
node.instance_attrs["connect"] = signal_cls["connect"]
def transform_pyside_signal(node):
module = parse(
"""
class NotPySideSignal(object):
def connect(self, receiver, type=None):
pass
def disconnect(self, receiver):
pass
def emit(self, *args):
pass
"""
)
signal_cls = module["NotPySideSignal"]
node.instance_attrs["connect"] = signal_cls["connect"]
node.instance_attrs["disconnect"] = signal_cls["disconnect"]
node.instance_attrs["emit"] = signal_cls["emit"]
def pyqt4_qtcore_transform():
return AstroidBuilder(AstroidManager()).string_build(
"""
def SIGNAL(signal_name): pass
class QObject(object):
def emit(self, signal): pass
"""
)
register_module_extender(AstroidManager(), "PyQt4.QtCore", pyqt4_qtcore_transform)
AstroidManager().register_transform(
nodes.FunctionDef, transform_pyqt_signal, _looks_like_signal
)
AstroidManager().register_transform(
nodes.ClassDef,
transform_pyside_signal,
lambda node: node.qname() in ("PySide.QtCore.Signal", "PySide2.QtCore.Signal"),
)
|
the-stack_106_17538
|
from unittest import mock
import pytest
from h.search import (
DeletedFilter,
Limiter,
Search,
TopLevelAnnotationsFilter,
UserFilter,
)
from h.services.annotation_stats import AnnotationStatsService, annotation_stats_factory
class TestAnnotationStatsService:
def test_total_user_annotation_count_calls_search_with_request(
self, svc, search, pyramid_request
):
svc.total_user_annotation_count("userid")
search.assert_called_with(pyramid_request)
def test_total_user_annotation_count_calls_run_with_userid_and_limit(
self, svc, search
):
svc.total_user_annotation_count("userid")
search.return_value.run.assert_called_with({"limit": 0, "user": "userid"})
def test_toal_user_annotation_count_attaches_correct_modifiers(
self, svc, search, limiter, deleted_filter, user_filter
):
svc.total_user_annotation_count("userid")
assert search.return_value.clear.called
assert search.return_value.append_modifier.call_count == 3
search.return_value.append_modifier.assert_has_calls(
[
mock.call(limiter.return_value),
mock.call(deleted_filter.return_value),
mock.call(user_filter.return_value),
]
)
def test_total_user_annotation_count_returns_total(self, svc, search):
search.return_value.run.return_value.total = 3
anns = svc.total_user_annotation_count("userid")
assert anns == 3
def test_user_annotation_count_calls_search_with_request(
self, svc, search, pyramid_request
):
svc.user_annotation_count("userid")
search.assert_called_with(pyramid_request)
def test_user_annotation_count_calls_run_with_userid_and_limit(self, svc, search):
svc.user_annotation_count("userid")
search.return_value.run.assert_called_with({"limit": 0, "user": "userid"})
def test_user_annotation_count_excludes_replies(
self, svc, search, top_level_annotation_filter
):
svc.user_annotation_count("userid")
search.return_value.append_modifier.assert_called_with(
top_level_annotation_filter.return_value
)
def test_user_annotation_count_returns_total(self, svc, search):
search.return_value.run.return_value.total = 3
anns = svc.user_annotation_count("userid")
assert anns == 3
def test_group_annotation_count_calls_search_with_request(
self, svc, search, pyramid_request
):
svc.group_annotation_count("groupid")
search.assert_called_with(pyramid_request)
def test_group_annotation_count_calls_run_with_groupid_and_limit(self, svc, search):
svc.group_annotation_count("groupid")
search.return_value.run.assert_called_with({"limit": 0, "group": "groupid"})
def test_group_annotation_count_excludes_replies(
self, svc, search, top_level_annotation_filter
):
svc.group_annotation_count("groupid")
search.return_value.append_modifier.assert_called_with(
top_level_annotation_filter.return_value
)
def test_group_annotation_count_returns_total(self, svc, search):
search.return_value.run.return_value.total = 3
anns = svc.group_annotation_count("groupid")
assert anns == 3
class TestAnnotationStatsFactory:
def test_returns_service(self):
svc = annotation_stats_factory(mock.Mock(), mock.Mock())
assert isinstance(svc, AnnotationStatsService)
def test_sets_request(self):
request = mock.Mock()
svc = annotation_stats_factory(mock.Mock(), request)
assert svc.request == request
@pytest.fixture
def svc(pyramid_request):
return AnnotationStatsService(request=pyramid_request)
@pytest.fixture
def search(patch):
return patch("h.services.annotation_stats.Search", autospec=Search, spec_set=True)
@pytest.fixture
def top_level_annotation_filter(patch):
return patch(
"h.services.annotation_stats.TopLevelAnnotationsFilter",
autospec=TopLevelAnnotationsFilter,
spec_set=True,
)
@pytest.fixture
def limiter(patch):
return patch("h.services.annotation_stats.Limiter", autospec=Limiter, spec_set=True)
@pytest.fixture
def deleted_filter(patch):
return patch(
"h.services.annotation_stats.DeletedFilter",
autospec=DeletedFilter,
spec_set=True,
)
@pytest.fixture
def user_filter(patch):
return patch(
"h.services.annotation_stats.UserFilter", autospec=UserFilter, spec_set=True
)
|
the-stack_106_17540
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import s1ap_types
import time
from integ_tests.s1aptests import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import SpgwUtil
class TestAttachDetachDedicatedInvalidImsi(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
self._spgw_util = SpgwUtil()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_attach_detach(self):
""" attach/detach + invalid IMSI in dedicated bearer test with a
single UE """
num_ues = 1
detach_type = [s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,
s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value]
wait_for_s1 = [True, False]
self._s1ap_wrapper.configUEDevice(num_ues)
for i in range(num_ues):
req = self._s1ap_wrapper.ue_req
print("********************** Running End to End attach for ",
"UE id ", req.ue_id)
# Now actually complete the attach
self._s1ap_wrapper._s1_util.attach(
req.ue_id, s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
time.sleep(5)
print("********************** Adding dedicated bearer to IMSI",
''.join('001010000000004'))
# Send wrong IMSI
self._spgw_util.create_bearer(
'IMSI' + ''.join('001010000000004'), 5)
time.sleep(5)
print("********************** Running UE detach for UE id ",
req.ue_id)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
req.ue_id, detach_type[i], wait_for_s1[i])
if __name__ == "__main__":
unittest.main()
|
the-stack_106_17541
|
#
# The Python Imaging Library.
# $Id$
#
# BMP file handler
#
# Windows (and OS/2) native bitmap storage format.
#
# history:
# 1995-09-01 fl Created
# 1996-04-30 fl Added save
# 1997-08-27 fl Fixed save of 1-bit images
# 1998-03-06 fl Load P images as L where possible
# 1998-07-03 fl Load P images as 1 where possible
# 1998-12-29 fl Handle small palettes
# 2002-12-30 fl Fixed load of 1-bit palette images
# 2003-04-21 fl Fixed load of 1-bit monochrome images
# 2003-04-23 fl Added limited support for BI_BITFIELDS compression
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile, ImagePalette
from ._binary import i8, i16le as i16, i32le as i32, \
o8, o16le as o16, o32le as o32
import math
__version__ = "0.7"
#
# --------------------------------------------------------------------
# Read BMP file
BIT2MODE = {
# bits => mode, rawmode
1: ("P", "P;1"),
4: ("P", "P;4"),
8: ("P", "P"),
16: ("RGB", "BGR;15"),
24: ("RGB", "BGR"),
32: ("RGB", "BGRX"),
}
def _accept(prefix):
return prefix[:2] == b"BM"
# ==============================================================================
# Image plugin for the Windows BMP format.
# ==============================================================================
class BmpImageFile(ImageFile.ImageFile):
""" Image plugin for the Windows Bitmap format (BMP) """
# -------------------------------------------------------------- Description
format_description = "Windows Bitmap"
format = "BMP"
# --------------------------------------------------- BMP Compression values
COMPRESSIONS = {'RAW': 0, 'RLE8': 1, 'RLE4': 2, 'BITFIELDS': 3, 'JPEG': 4, 'PNG': 5}
RAW, RLE8, RLE4, BITFIELDS, JPEG, PNG = 0, 1, 2, 3, 4, 5
def _bitmap(self, header=0, offset=0):
""" Read relevant info about the BMP """
read, seek = self.fp.read, self.fp.seek
if header:
seek(header)
file_info = {}
file_info['header_size'] = i32(read(4)) # read bmp header size @offset 14 (this is part of the header size)
file_info['direction'] = -1
# --------------------- If requested, read header at a specific position
header_data = ImageFile._safe_read(self.fp, file_info['header_size'] - 4) # read the rest of the bmp header, without its size
# --------------------------------------------------- IBM OS/2 Bitmap v1
# ------ This format has different offsets because of width/height types
if file_info['header_size'] == 12:
file_info['width'] = i16(header_data[0:2])
file_info['height'] = i16(header_data[2:4])
file_info['planes'] = i16(header_data[4:6])
file_info['bits'] = i16(header_data[6:8])
file_info['compression'] = self.RAW
file_info['palette_padding'] = 3
# ---------------------------------------------- Windows Bitmap v2 to v5
elif file_info['header_size'] in (40, 64, 108, 124): # v3, OS/2 v2, v4, v5
if file_info['header_size'] >= 40: # v3 and OS/2
file_info['y_flip'] = i8(header_data[7]) == 0xff
file_info['direction'] = 1 if file_info['y_flip'] else -1
file_info['width'] = i32(header_data[0:4])
file_info['height'] = i32(header_data[4:8]) if not file_info['y_flip'] else 2**32 - i32(header_data[4:8])
file_info['planes'] = i16(header_data[8:10])
file_info['bits'] = i16(header_data[10:12])
file_info['compression'] = i32(header_data[12:16])
file_info['data_size'] = i32(header_data[16:20]) # byte size of pixel data
file_info['pixels_per_meter'] = (i32(header_data[20:24]), i32(header_data[24:28]))
file_info['colors'] = i32(header_data[28:32])
file_info['palette_padding'] = 4
self.info["dpi"] = tuple(
map(lambda x: int(math.ceil(x / 39.3701)),
file_info['pixels_per_meter']))
if file_info['compression'] == self.BITFIELDS:
if len(header_data) >= 52:
for idx, mask in enumerate(['r_mask', 'g_mask', 'b_mask', 'a_mask']):
file_info[mask] = i32(header_data[36+idx*4:40+idx*4])
else:
# 40 byte headers only have the three components in the bitfields masks,
# ref: https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
# See also https://github.com/python-pillow/Pillow/issues/1293
# There is a 4th component in the RGBQuad, in the alpha location, but it
# is listed as a reserved component, and it is not generally an alpha channel
file_info['a_mask'] = 0x0
for mask in ['r_mask', 'g_mask', 'b_mask']:
file_info[mask] = i32(read(4))
file_info['rgb_mask'] = (file_info['r_mask'], file_info['g_mask'], file_info['b_mask'])
file_info['rgba_mask'] = (file_info['r_mask'], file_info['g_mask'], file_info['b_mask'], file_info['a_mask'])
else:
raise IOError("Unsupported BMP header type (%d)" % file_info['header_size'])
# ------------------ Special case : header is reported 40, which
# ---------------------- is shorter than real size for bpp >= 16
self.size = file_info['width'], file_info['height']
# -------- If color count was not found in the header, compute from bits
file_info['colors'] = file_info['colors'] if file_info.get('colors', 0) else (1 << file_info['bits'])
# -------------------------------- Check abnormal values for DOS attacks
if file_info['width'] * file_info['height'] > 2**31:
raise IOError("Unsupported BMP Size: (%dx%d)" % self.size)
# ----------------------- Check bit depth for unusual unsupported values
self.mode, raw_mode = BIT2MODE.get(file_info['bits'], (None, None))
if self.mode is None:
raise IOError("Unsupported BMP pixel depth (%d)" % file_info['bits'])
# ----------------- Process BMP with Bitfields compression (not palette)
if file_info['compression'] == self.BITFIELDS:
SUPPORTED = {
32: [(0xff0000, 0xff00, 0xff, 0x0), (0xff0000, 0xff00, 0xff, 0xff000000), (0x0, 0x0, 0x0, 0x0), (0xff000000, 0xff0000, 0xff00, 0x0)],
24: [(0xff0000, 0xff00, 0xff)],
16: [(0xf800, 0x7e0, 0x1f), (0x7c00, 0x3e0, 0x1f)]
}
MASK_MODES = {
(32, (0xff0000, 0xff00, 0xff, 0x0)): "BGRX",
(32, (0xff000000, 0xff0000, 0xff00, 0x0)): "XBGR",
(32, (0xff0000, 0xff00, 0xff, 0xff000000)): "BGRA",
(32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
(24, (0xff0000, 0xff00, 0xff)): "BGR",
(16, (0xf800, 0x7e0, 0x1f)): "BGR;16",
(16, (0x7c00, 0x3e0, 0x1f)): "BGR;15"
}
if file_info['bits'] in SUPPORTED:
if file_info['bits'] == 32 and file_info['rgba_mask'] in SUPPORTED[file_info['bits']]:
raw_mode = MASK_MODES[(file_info['bits'], file_info['rgba_mask'])]
self.mode = "RGBA" if raw_mode in ("BGRA",) else self.mode
elif file_info['bits'] in (24, 16) and file_info['rgb_mask'] in SUPPORTED[file_info['bits']]:
raw_mode = MASK_MODES[(file_info['bits'], file_info['rgb_mask'])]
else:
raise IOError("Unsupported BMP bitfields layout")
else:
raise IOError("Unsupported BMP bitfields layout")
elif file_info['compression'] == self.RAW:
if file_info['bits'] == 32 and header == 22: # 32-bit .cur offset
raw_mode, self.mode = "BGRA", "RGBA"
else:
raise IOError("Unsupported BMP compression (%d)" % file_info['compression'])
# ---------------- Once the header is processed, process the palette/LUT
if self.mode == "P": # Paletted for 1, 4 and 8 bit images
# ----------------------------------------------------- 1-bit images
if not (0 < file_info['colors'] <= 65536):
raise IOError("Unsupported BMP Palette size (%d)" % file_info['colors'])
else:
padding = file_info['palette_padding']
palette = read(padding * file_info['colors'])
greyscale = True
indices = (0, 255) if file_info['colors'] == 2 else list(range(file_info['colors']))
# ------------------ Check if greyscale and ignore palette if so
for ind, val in enumerate(indices):
rgb = palette[ind*padding:ind*padding + 3]
if rgb != o8(val) * 3:
greyscale = False
# -------- If all colors are grey, white or black, ditch palette
if greyscale:
self.mode = "1" if file_info['colors'] == 2 else "L"
raw_mode = self.mode
else:
self.mode = "P"
self.palette = ImagePalette.raw("BGRX" if padding == 4 else "BGR", palette)
# ----------------------------- Finally set the tile data for the plugin
self.info['compression'] = file_info['compression']
self.tile = [('raw', (0, 0, file_info['width'], file_info['height']), offset or self.fp.tell(),
(raw_mode, ((file_info['width'] * file_info['bits'] + 31) >> 3) & (~3), file_info['direction'])
)]
def _open(self):
""" Open file, check magic number and read header """
# read 14 bytes: magic number, filesize, reserved, header final offset
head_data = self.fp.read(14)
# choke if the file does not have the required magic bytes
if head_data[0:2] != b"BM":
raise SyntaxError("Not a BMP file")
# read the start position of the BMP image data (u32)
offset = i32(head_data[10:14])
# load bitmap information (offset=raster info)
self._bitmap(offset=offset)
# ==============================================================================
# Image plugin for the DIB format (BMP alias)
# ==============================================================================
class DibImageFile(BmpImageFile):
format = "DIB"
format_description = "Windows Bitmap"
def _open(self):
self._bitmap()
#
# --------------------------------------------------------------------
# Write BMP file
SAVE = {
"1": ("1", 1, 2),
"L": ("L", 8, 256),
"P": ("P", 8, 256),
"RGB": ("BGR", 24, 0),
"RGBA": ("BGRA", 32, 0),
}
def _save(im, fp, filename):
try:
rawmode, bits, colors = SAVE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as BMP" % im.mode)
info = im.encoderinfo
dpi = info.get("dpi", (96, 96))
# 1 meter == 39.3701 inches
ppm = tuple(map(lambda x: int(x * 39.3701), dpi))
stride = ((im.size[0]*bits+7)//8+3) & (~3)
header = 40 # or 64 for OS/2 version 2
offset = 14 + header + colors * 4
image = stride * im.size[1]
# bitmap header
fp.write(b"BM" + # file type (magic)
o32(offset+image) + # file size
o32(0) + # reserved
o32(offset)) # image data offset
# bitmap info header
fp.write(o32(header) + # info header size
o32(im.size[0]) + # width
o32(im.size[1]) + # height
o16(1) + # planes
o16(bits) + # depth
o32(0) + # compression (0=uncompressed)
o32(image) + # size of bitmap
o32(ppm[0]) + o32(ppm[1]) + # resolution
o32(colors) + # colors used
o32(colors)) # colors important
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
if im.mode == "1":
for i in (0, 255):
fp.write(o8(i) * 4)
elif im.mode == "L":
for i in range(256):
fp.write(o8(i) * 4)
elif im.mode == "P":
fp.write(im.im.getpalette("RGB", "BGRX"))
ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0,
(rawmode, stride, -1))])
#
# --------------------------------------------------------------------
# Registry
Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
Image.register_save(BmpImageFile.format, _save)
Image.register_extension(BmpImageFile.format, ".bmp")
Image.register_mime(BmpImageFile.format, "image/bmp")
|
the-stack_106_17542
|
import torch
from PyTorch_VAE.models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class VanillaVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
**kwargs) -> None:
super(VanillaVAE, self).__init__()
self.latent_dim = latent_dim
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
|
the-stack_106_17543
|
import os
import io
from google.oauth2 import service_account
from google.cloud import speech_v1
def recognize(filepath, language_code='en_US', model='default', cred_file=None):
# if cred_file is not None:
# cred = service_account.Credentials.from_service_account_file(
# cred_file,
# scopes=["https://www.googleapis.com/auth/cloud-platform"]
# )
#
# client = speech_v1.SpeechClient(credentials=cred)
# else:
# client = speech_v1.SpeechClient()
if cred_file is not None:
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = cred_file
client = speech_v1.SpeechClient()
config = {
"model": model,
"language_code": language_code
}
with io.open(filepath, 'rb') as fd:
content = fd.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
alternative = result.alternatives[0]
yield alternative.transcript
def main():
import argparse
supported_models = ["command_and_search", "phone_call", "video", "default"]
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--lang", type=str, default="en_US", help="audio language")
parser.add_argument("-m", "--model", type=str,
choices=supported_models,
default="default",
help="transcription model")
parser.add_argument("--cred", type=str, help="credentials file")
parser.add_argument("audiofile", type=str, help="path to the audio file")
args = parser.parse_args()
if args.cred is not None:
print("Using credentials file:", args.cred)
print()
for transcript in recognize(args.audiofile, args.lang, cred_file=args.cred):
print(transcript)
print()
if __name__ == '__main__':
main()
|
the-stack_106_17545
|
# killableprocess - subprocesses which can be reliably killed
#
# Parts of this module are copied from the subprocess.py file contained
# in the Python distribution.
#
# Copyright (c) 2003-2004 by Peter Astrand <[email protected]>
#
# Additions and modifications written by Benjamin Smedberg
# <[email protected]> are Copyright (c) 2006 by the Mozilla Foundation
# <http://www.mozilla.org/>
#
# More Modifications
# Copyright (c) 2006-2007 by Mike Taylor <[email protected]>
# Copyright (c) 2007-2008 by Mikeal Rogers <[email protected]>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of the
# author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""killableprocess - Subprocesses which can be reliably killed
This module is a subclass of the builtin "subprocess" module. It allows
processes that launch subprocesses to be reliably killed on Windows (via the Popen.kill() method.
It also adds a timeout argument to Wait() for a limited period of time before
forcefully killing the process.
Note: On Windows, this module requires Windows 2000 or higher (no support for
Windows 95, 98, or NT 4.0). It also requires ctypes, which is bundled with
Python 2.5+ or available from http://python.net/crew/theller/ctypes/
"""
import subprocess
import sys
import os
import time
import datetime
import types
import exceptions
try:
from subprocess import CalledProcessError
except ImportError:
# Python 2.4 doesn't implement CalledProcessError
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
mswindows = (sys.platform == "win32")
if mswindows:
import winprocess
else:
import signal
def call(*args, **kwargs):
waitargs = {}
if "timeout" in kwargs:
waitargs["timeout"] = kwargs.pop("timeout")
return Popen(*args, **kwargs).wait(**waitargs)
def check_call(*args, **kwargs):
"""Call a program with an optional timeout. If the program has a non-zero
exit status, raises a CalledProcessError."""
retcode = call(*args, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = args[0]
raise CalledProcessError(retcode, cmd)
if not mswindows:
def DoNothing(*args):
pass
class Popen(subprocess.Popen):
if mswindows:
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines, startupinfo,
creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
if not isinstance(args, types.StringTypes):
args = subprocess.list2cmdline(args)
if startupinfo is None:
startupinfo = winprocess.STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = int(p2cread)
startupinfo.hStdOutput = int(c2pwrite)
startupinfo.hStdError = int(errwrite)
if shell:
startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = winprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
# We create a new job for this process, so that we can kill
# the process and any sub-processes
self._job = winprocess.CreateJobObject()
creationflags |= winprocess.CREATE_SUSPENDED
creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT
hp, ht, pid, tid = winprocess.CreateProcess(
executable, args,
None, None, # No special security
1, # Must inherit handles!
creationflags,
winprocess.EnvironmentBlock(env),
cwd, startupinfo)
self._child_created = True
self._handle = hp
self._thread = ht
self.pid = pid
self.tid = tid
winprocess.AssignProcessToJobObject(self._job, hp)
winprocess.ResumeThread(ht)
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
time.sleep(.1)
def kill(self, group=True):
"""Kill the process. If group=True, all sub-processes will also be killed."""
if mswindows:
if group:
winprocess.TerminateJobObject(self._job, 127)
else:
winprocess.TerminateProcess(self._handle, 127)
self.returncode = 127
else:
if group:
try:
os.killpg(self.pid, signal.SIGKILL)
except: pass
else:
os.kill(self.pid, signal.SIGKILL)
self.returncode = -9
def wait(self, timeout=None, group=True):
"""Wait for the process to terminate. Returns returncode attribute.
If timeout seconds are reached and the process has not terminated,
it will be forcefully killed. If timeout is -1, wait will not
time out."""
if timeout is not None:
timeout = timeout * 1000
if self.returncode is not None:
return self.returncode
starttime = datetime.datetime.now()
if mswindows:
if timeout is None:
timeout = -1
rc = winprocess.WaitForSingleObject(self._handle, timeout)
if rc != winprocess.WAIT_TIMEOUT:
while (starttime - datetime.datetime.now()).microseconds < timeout or ( winprocess.QueryInformationJobObject(self._job, 8)['BasicInfo']['ActiveProcesses'] > 0 ):
time.sleep(.5)
if (starttime - datetime.datetime.now()).microseconds > timeout:
self.kill(group)
else:
self.returncode = winprocess.GetExitCodeProcess(self._handle)
else:
if sys.platform == 'linux2' or sys.platform == 'cygwin':
def group_wait():
os.waitpid(self.pid, 0)
return self.returncode
elif sys.platform == 'darwin':
def group_wait():
try:
while 1:
os.killpg(self.pid, signal.SIG_DFL)
time.sleep(.5)
except exceptions.OSError:
return self.returncode
if timeout is None:
if group is True:
return group_wait()
else:
subprocess.Popen.wait(self)
return self.returncode
returncode = False
while (starttime - datetime.datetime.now()).microseconds < timeout or ( returncode is False ):
if group is True:
return group_wait()
else:
if subprocess.poll() is not None:
returncode = self.returncode
time.sleep(.5)
return self.returncode
return self.returncode
# We get random maxint errors from subprocesses __del__
__del__ = lambda self: None
def setpgid_preexec_fn():
os.setpgid(0, 0)
def runCommand(cmd, **kwargs):
if sys.platform != "win32":
return Popen(cmd, preexec_fn=setpgid_preexec_fn, **kwargs)
else:
return Popen(cmd, **kwargs)
|
the-stack_106_17547
|
import pandas as pd
from abc import abstractmethod
import benchutils, os, time
class Preprocessor:
"""Super class of all preprocessor implementations.
Inherit from this class and implement :meth:`preprocessing.Preprocessor.preprocess()` if you want to add a new preprocessor class.
:param input: absolute path to the input file.
:type input: str
:param metadata: absolute path to the metadata file.
:type metadata: str
:param output: absolute path to the output directory.
:type output: str
"""
def __init__(self, input, metadata, output):
self.input = input
self.metadata = metadata
self.output = output
super().__init__()
@abstractmethod
def preprocess(self):
"""Abstract method.
Interface method that is invoked externally to trigger preprocessing.
:return: absolute path to the preprocessed output file.
:rtype: str
"""
pass
class MappingPreprocessor(Preprocessor):
"""Maps the input data set to a desired format.
:param input: absolute path to the input file.
:type input: str
:param output: absolute path to the output directory.
:type output: str
:param currentFormat: current identifier format.
:type currentFormat: str
:param desiredFormat: desired identifier format.
:type desiredFormat: str
:param labeled: boolean value if the input data is labeled.
:type labeled: bool
"""
def __init__(self, input, output, currentFormat, desiredFormat, labeled):
self.currentFormat = currentFormat
self.desiredFormat = desiredFormat
self.labeled = labeled
super().__init__(input, None, output)
def preprocess(self):
"""Maps the identifiers in the input dataset to the desired format that was specified when constructing the preprocessor.
:return: absolute path to the mapped file.
:rtype: str
"""
inputMatrix = pd.read_csv(self.input, index_col = 0)
original_filename = self.input.split("/")[-1]
mapped_filename = "mapped_" + self.desiredFormat + "_" + original_filename
output = self.input
output_filepath = "/".join(self.input.split("/")[0:-1])
#as the DataFormatter always transposes the data before any further processing, we can expect all genes to be in the columns
genesInColumn = "true"
#only map genes if the current format is not the desired format
if (self.currentFormat != self.desiredFormat):
output = output_filepath + "/" + mapped_filename
benchutils.mapDataMatrix(inputMatrix, genesInColumn, self.currentFormat, self.desiredFormat, output, self.labeled)
return output
class FilterPreprocessor(Preprocessor):
"""Filters features or samples above a user-defined threshold of missing values.
:param input: absolute path to the input file.
:type input: str
:param metadata: absolute path to the metadata file.
:type metadata: str
:param output: absolute path to the output directory.
:type output: str
:param config: configuration parameter for preprocessing as specified in the config file.
:type config: str
"""
def __init__(self, input, metadata, output):
self.config = benchutils.getConfig("Preprocessing")
super().__init__(input, metadata, output)
def preprocess(self):
"""Depending on what is specified in the config file, filter samples and/or features.
Remove all samples/features that have missing values above the threshold specified in the config.
:return: absolute path to the filtered output file.
:rtype: str
"""
filtered_data = pd.read_csv(self.input)
if self.config.getboolean("filterMissingsInGenes"):
# first filter out the genes that have more missings than threshold
filtered_data = self.filterMissings(self.config["threshold"], filtered_data)
if self.config.getboolean("filterMissingsInSamples"):
# second transpose matrix and filter out samples that have more missings than threshold
filtered_samples = self.filterMissings(self.config["threshold"], filtered_data.T)
filtered_data = filtered_samples.T
# transpose back into original orientation and save
filePrefix = self.input.split("/")[-1].split(".")[
0] # split path by / to receive filename, split filename by . to receive filename without ending
filename = self.output + filePrefix + "_filtered.csv"
filtered_data.to_csv(filename, index=False)
return filename
def filterMissings(self, threshold, data):
"""Filter the data for entries that have missing information above the given threshold.
:param threshold: maximum percentage of allowed missing items as string.
:type threshold: str
:param data: a DataFrame to be filtered
:type data: :class:`pandas.DataFrame`
:return: filtered DataFrame.
:rtype: :class:`pandas.DataFrame`
"""
#replace NAs by 0 for counting
data.fillna(0).astype(bool).sum(axis=1)
filtered_columns = data.columns
#find out threshold, i.e. minimum number of non-zero in real numbers
rowNumber = data.shape[0]
min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))
zero_counts = data.astype(bool).sum(axis=0)
for columnID, nonZeros in zero_counts.items():
if nonZeros <= min_nonZeros:
filtered_columns = filtered_columns.drop(columnID)
return data[filtered_columns]
class DataTransformationPreprocessor(Preprocessor):
"""Transform the input data to have features in the columns for subsequent processing.
:param input: absolute path to the input file.
:type input: str
:param metadata: absolute path to the metadata file.
:type metadata: str
:param output: absolute path to the output directory.
:type output: str
:param dataSeparator: delimiter to use when parsing the input file.
:type dataSeparator: str
"""
def __init__(self, input, metadata, output, dataSeparator):
self.transposeMatrix = not benchutils.getConfigBoolean("Dataset", "genesInColumns")
self.dataSeparator = dataSeparator
super().__init__(input, metadata, output)
def preprocess(self):
"""If not already so, transpose the input data to have the features in the columns.
:return: absolute path to the correctly formatted output file.
:rtype: str
"""
df = pd.read_csv(self.input, sep=self.dataSeparator, index_col = 0)
#ATTENTION: this processing assumes that the data is formatted in a way that header and index are automatically recognized. remove trailing commas/separators at first line of the file for this to be achieved
if self.transposeMatrix:
df = df.T
filePrefix = self.input.split("/")[-1].split(".")[
0] # split path by / to receive filename, split filename by . to receive filename without ending
filename = self.output + filePrefix + "_transposed.csv"
df.to_csv(filename)
return filename
class MetaDataPreprocessor(Preprocessor):
"""Add labels to input data.
Get labels from meta data attribute that was specified in the user config.
:param input: absolute path to the input file.
:type input: str
:param metadata: absolute path to the metadata file.
:type metadata: str
:param output: absolute path to the output directory.
:type output: str
:param dataSeparator: delimiter to use when parsing the input and metadata file.
:type dataSeparator: str
:param diseaseColumn: column name of the class labels.
:type diseaseColumn: str
:param transposeMetadataMatrix: boolean value if the identifier names are located in the columns, as specified in the config file.
:type transposeMetadataMatrix: bool
"""
def __init__(self, input, metadata, output, separator):
self.diseaseColumn = benchutils.getConfigValue("Dataset", "classLabelName")
self.transposeMetadataMatrix = not benchutils.getConfigBoolean("Dataset", "metadataIDsInColumns")
self.separator = separator
super().__init__(input, metadata, output)
def preprocess(self):
"""Labels all samples of a data set.
Labels are taken from the corresponding metadata file and the metadata attribute that was specified in the config file.
Samples without metadata information well be assigned to class "NotAvailable".
:return: absolute path to the labeled data set.
:rtype: str
"""
df = pd.read_csv(self.input, index_col = 0)
diseaseCodes = pd.read_csv(self.metadata, sep = self.separator, index_col = 0, quotechar = '"')
diseaseColumn = []
if self.transposeMetadataMatrix:
diseaseCodes = diseaseCodes.T
#iterate through all sample IDs and select the corresponding disease/annotation from the metadata for it
for sample in df.index:
try:
diseaseCode = diseaseCodes[sample][self.diseaseColumn]
except:
diseaseCode = "NotAvailable"
benchutils.logWarning("WARNING: No classLabel code found for sample " + str(sample) + ". Assign class NotAvailable.")
diseaseColumn.append(diseaseCode)
df.insert(0, column="classLabel", value=diseaseColumn)
df_without_missings = df.dropna(subset=['classLabel'])
filePrefix = self.input.split("/")[-1].split(".")[
0] # split path by / to receive filename, split filename by . to receive filename without ending
filename = self.output + filePrefix + "_withClassLabels.csv"
df_without_missings.to_csv(filename)
return filename
####### PREPROCESSOR: moves a dataset into its respective folder #######
class DataMovePreprocessor(Preprocessor):
"""Moves the input data set to the specified location.
:param input: absolute path to the input file.
:type input: str
:param output: absolute path to the output directory.
:type output: str
"""
def __init__(self, input, output):
super().__init__(input, None, output)
def preprocess(self):
"""Moves a file (self.input) to another location (self.output).
Typically used at the end of preprocessing, when the final data set is moved to a new location for the actual analysis.
:return: absolute path to the new file location.
:rtype: str
"""
os.system("cp " + self.input + " " + self.output)
return self.output
|
the-stack_106_17550
|
import unittest
from euchre.data_model import FaceCard, Suite, CardDeck, Card, Trick
from euchre.move_simulations import update_possible_cards, possible_cards_in_hand
from euchre.game_controller import GameController
from euchre.players.RandomPlayer import RandomPlayer
from itertools import chain
class TestMoveSimulations(unittest.TestCase):
def test_possible_cards_in_hand(self):
cards = [[Card(Suite.DIAMOND, FaceCard.JACK), Card(Suite.DIAMOND, FaceCard.TEN),
Card(Suite.DIAMOND, FaceCard.QUEEN), Card(Suite.DIAMOND, FaceCard.ACE),
Card(Suite.DIAMOND, FaceCard.KING)],
[Card(Suite.CLUB, FaceCard.JACK), Card(Suite.CLUB, FaceCard.TEN),
Card(Suite.CLUB, FaceCard.QUEEN), Card(Suite.CLUB, FaceCard.ACE),
Card(Suite.CLUB, FaceCard.KING)],
[Card(Suite.HEART, FaceCard.JACK), Card(Suite.HEART, FaceCard.TEN),
Card(Suite.HEART, FaceCard.QUEEN), Card(Suite.HEART, FaceCard.ACE),
Card(Suite.HEART, FaceCard.KING)],
[Card(Suite.SPADE, FaceCard.JACK), Card(Suite.SPADE, FaceCard.TEN),
Card(Suite.SPADE, FaceCard.QUEEN), Card(Suite.SPADE, FaceCard.ACE),
Card(Suite.SPADE, FaceCard.KING)],
[Card(Suite.DIAMOND, FaceCard.NINE), Card(Suite.SPADE, FaceCard.NINE),
Card(Suite.HEART, FaceCard.NINE), Card(Suite.CLUB, FaceCard.NINE)]]
possible_cards = possible_cards_in_hand(cards[0], 0, 0, cards[4][0], Suite.DIAMOND, [])
self.assertTrue(len(possible_cards[0]) == 0, "First player cards should be known")
self.assertTrue(len([card for card in possible_cards[1] if card.suite == Suite.DIAMOND]) == 0,
"Check that the visible players hand is taken out of each persons possible hands")
tricks = [Trick(2, [Card(Suite.HEART, FaceCard.JACK), Card(Suite.SPADE, FaceCard.JACK),
Card(Suite.DIAMOND, FaceCard.JACK), Card(Suite.CLUB, FaceCard.JACK)])]
possible_cards = possible_cards_in_hand(cards[0], 0, 2, cards[4][0], Suite.SPADE, tricks)
self.assertTrue(len(possible_cards[0]) == 0, "First player cards should be known")
self.assertTrue(len([card for card in possible_cards[1] if card.suite == Suite.DIAMOND]) == 0,
"Check that the visible players hand is taken out of each persons possible hands")
self.assertTrue(len([card for card in possible_cards[1] if card.suite == Suite.HEART]) == 0,
"Check that if a player could not follow suit card was removed from hand")
self.assertTrue(len([card for card in possible_cards[4] if card.face_card == FaceCard.JACK]) == 0,
"Check that all visible trick cards are removed from possible cards in the deck")
def test_known_cards_in_hand(self):
"""Idea to test function is to simulate a whole bunch of games and check at each point
if the set of known cards and possible cards calculated is valid
"""
whole_deck = sorted([Card(suite, face_card) for face_card in FaceCard for suite in Suite])
controller = GameController([RandomPlayer(0) for _ in range(4)], CardDeck(0))
controller.play()
for g_round in controller.game.rounds:
if len(g_round.tricks) > 0:
player_cards = g_round.players_cards[0]
possible_cards = possible_cards_in_hand(player_cards.hand, player_cards.index, player_cards.order,
g_round.kitty[0], g_round.trump, g_round.tricks)
known_cards, possible_cards = update_possible_cards(player_cards.hand, player_cards.index,
g_round.tricks, possible_cards)
self.assertTrue(len(list(chain(*list(possible_cards)))) == 0,
"Entire game should be played, possible cards are 0")
self.assertListEqual(sorted(list(chain(*list(known_cards)))), whole_deck)
self.assertListEqual(sorted(known_cards[4]), sorted(g_round.kitty))
for i in range(5):
possible_cards = possible_cards_in_hand(player_cards.hand, player_cards.index, player_cards.order
, g_round.kitty[0], g_round.trump, g_round.tricks)
known_cards, possible_cards = update_possible_cards(player_cards.hand, player_cards.index,
g_round.tricks, possible_cards)
# No repeats in known cards
all_known_cards = list(chain(*list(known_cards)))
self.assertEqual(len(all_known_cards), len(set(all_known_cards)))
# all cards accounted for
self.assertEqual(set(list(chain(*list(possible_cards))) + all_known_cards), set(whole_deck))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_17551
|
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRun
from ..shape_object import ShapeObjectFct
from .op_conv_ import ConvFloat, ConvDouble # pylint: disable=E0611,E0401
class Conv(OpRun):
atts = {'auto_pad': 'NOTSET', 'group': 1,
'dilations': [1, 1],
'kernel_shape': [],
'pads': [],
'strides': [1, 1]}
def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
expected_attributes=Conv.atts,
**options)
self._init()
def _init(self):
self.rt32_ = ConvFloat()
self.rt64_ = ConvDouble()
for rt in [self.rt32_, self.rt64_]:
rt.init(self.auto_pad,
numpy.array(self.dilations, dtype=numpy.int64),
self.group,
numpy.array(self.kernel_shape, dtype=numpy.int64),
numpy.array(self.pads, dtype=numpy.int64),
numpy.array(self.strides, dtype=numpy.int64))
def _run(self, X, W, B=None): # pylint: disable=W0221
if X is None:
raise ValueError( # pragma: no cover
"X cannot be None for operator %r, ONNX=%r" % (
type(self), self.onnx_node))
if min(X.shape) == 0:
raise RuntimeError(
"Unable to run operator Conv on an empty matrix. "
"X.shape=%r." % (X.shape, ))
if min(W.shape) == 0:
raise RuntimeError(
"Unable to run operator Conv on an empty matrix. "
"W.shape=%r." % (W.shape, ))
if B is not None and min(B.shape) == 0:
raise RuntimeError(
"Unable to run operator Conv on an empty matrix. "
"B.shape=%r." % (B.shape, ))
if X.dtype == numpy.float32:
return (self.rt32_.compute(X, W, B), )
return (self.rt64_.compute(X, W, B), )
def _infer_shapes(self, X, W, B=None): # pylint: disable=W0221
def compute_shape(xshape, wshape, bshape):
xs = numpy.ones(xshape, dtype=numpy.float32)
ws = numpy.ones(wshape, dtype=numpy.float32)
bs = (numpy.ones(bshape, dtype=numpy.float32)
if bshape is not None else None)
res = self.rt32_.compute(xs, ws, bs)
return res.shape
return (ShapeObjectFct(
compute_shape, X, W, B, name="Conv", dtype=X.dtype), )
def _infer_types(self, X, W, B=None): # pylint: disable=W0221
return (X, )
def _infer_sizes(self, X, W, B=None): # pylint: disable=W0221
res = self.run(X, W, B=None)
C = X.shape[1]
kernel_size = numpy.prod(self.kernel_shape)
kernel_dim = C / self.group * kernel_size
temp = kernel_dim * res[0].size
return (dict(temp=temp * X.dtype.itemsize), ) + res
|
the-stack_106_17554
|
"""
Ansible action plugin to ensure inventory variables are set
appropriately and no conflicting options have been provided.
"""
import re
from ansible.plugins.action import ActionBase
from ansible import errors
# Valid values for openshift_deployment_type
VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
# Tuple of variable names and default values if undefined.
NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
('openshift_use_flannel', False),
('openshift_use_nuage', False),
('openshift_use_contiv', False),
('openshift_use_calico', False),
('openshift_use_kuryr', False))
ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+.*)',
'error_msg': ORIGIN_TAG_REGEX_ERROR}
ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
'openshift-enterprise': ENTERPRISE_TAG_REGEX}
UNSUPPORTED_OCP_VERSIONS = {
'^3.8.*$': 'OCP 3.8 is not supported and cannot be installed'
}
CONTAINERIZED_NO_TAG_ERROR_MSG = """To install a containerized Origin release,
you must set openshift_release or openshift_image_tag in your inventory to
specify which version of the OpenShift component images to use.
(Suggestion: add openshift_release="x.y" to inventory.)"""
STORAGE_KIND_TUPLE = (
'openshift_hosted_registry_storage_kind',
'openshift_loggingops_storage_kind',
'openshift_logging_storage_kind',
'openshift_metrics_storage_kind',
'openshift_prometheus_alertbuffer_storage_kind',
'openshift_prometheus_alertmanager_storage_kind',
'openshift_prometheus_storage_kind')
def to_bool(var_to_check):
"""Determine a boolean value given the multiple
ways bools can be specified in ansible."""
# http://yaml.org/type/bool.html
yes_list = (True, 1, "True", "1", "true", "TRUE",
"Yes", "yes", "Y", "y", "YES",
"on", "ON", "On")
return var_to_check in yes_list
class ActionModule(ActionBase):
"""Action plugin to execute sanity checks."""
def template_var(self, hostvars, host, varname):
"""Retrieve a variable from hostvars and template it.
If undefined, return None type."""
# We will set the current host and variable checked for easy debugging
# if there are any unhandled exceptions.
# pylint: disable=W0201
self.last_checked_var = varname
# pylint: disable=W0201
self.last_checked_host = host
res = hostvars[host].get(varname)
if res is None:
return None
return self._templar.template(res)
def check_openshift_deployment_type(self, hostvars, host):
"""Ensure a valid openshift_deployment_type is set"""
openshift_deployment_type = self.template_var(hostvars, host,
'openshift_deployment_type')
if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
raise errors.AnsibleModuleError(msg)
return openshift_deployment_type
def check_python_version(self, hostvars, host, distro):
"""Ensure python version is 3 for Fedora and python 2 for others"""
ansible_python = self.template_var(hostvars, host, 'ansible_python')
if distro == "Fedora":
if ansible_python['version']['major'] != 3:
msg = "openshift-ansible requires Python 3 for {};".format(distro)
msg += " For information on enabling Python 3 with Ansible,"
msg += " see https://docs.ansible.com/ansible/python_3_support.html"
raise errors.AnsibleModuleError(msg)
else:
if ansible_python['version']['major'] != 2:
msg = "openshift-ansible requires Python 2 for {};".format(distro)
def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
"""Ensure openshift_image_tag is formatted correctly"""
openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
if not openshift_image_tag or openshift_image_tag == 'latest':
return None
regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
res = re.match(regex_to_match, str(openshift_image_tag))
if res is None:
msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
msg = msg.format(str(openshift_image_tag))
raise errors.AnsibleModuleError(msg)
def no_origin_image_version(self, hostvars, host, openshift_deployment_type):
"""Ensure we can determine what image version to use with origin
fail when:
- openshift_is_containerized
- openshift_deployment_type == 'origin'
- openshift_release is not defined
- openshift_image_tag is not defined"""
if not openshift_deployment_type == 'origin':
return None
oic = self.template_var(hostvars, host, 'openshift_is_containerized')
if not to_bool(oic):
return None
orelease = self.template_var(hostvars, host, 'openshift_release')
oitag = self.template_var(hostvars, host, 'openshift_image_tag')
if not orelease and not oitag:
raise errors.AnsibleModuleError(CONTAINERIZED_NO_TAG_ERROR_MSG)
def network_plugin_check(self, hostvars, host):
"""Ensure only one type of network plugin is enabled"""
res = []
# Loop through each possible network plugin boolean, determine the
# actual boolean value, and append results into a list.
for plugin, default_val in NET_PLUGIN_LIST:
res_temp = self.template_var(hostvars, host, plugin)
if res_temp is None:
res_temp = default_val
res.append(to_bool(res_temp))
if sum(res) not in (0, 1):
plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
raise errors.AnsibleModuleError(msg)
def check_hostname_vars(self, hostvars, host):
"""Checks to ensure openshift_hostname
and openshift_public_hostname
conform to the proper length of 63 characters or less"""
for varname in ('openshift_public_hostname', 'openshift_hostname'):
var_value = self.template_var(hostvars, host, varname)
if var_value and len(var_value) > 63:
msg = '{} must be 63 characters or less'.format(varname)
raise errors.AnsibleModuleError(msg)
def check_supported_ocp_version(self, hostvars, host, openshift_deployment_type):
"""Checks that the OCP version supported"""
if openshift_deployment_type == 'origin':
return None
openshift_version = self.template_var(hostvars, host, 'openshift_version')
for regex_to_match, error_msg in UNSUPPORTED_OCP_VERSIONS.items():
res = re.match(regex_to_match, str(openshift_version))
if res is not None:
raise errors.AnsibleModuleError(error_msg)
return None
def check_session_auth_secrets(self, hostvars, host):
"""Checks session_auth_secrets is correctly formatted"""
sas = self.template_var(hostvars, host,
'openshift_master_session_auth_secrets')
ses = self.template_var(hostvars, host,
'openshift_master_session_encryption_secrets')
# This variable isn't mandatory, only check if set.
if sas is None and ses is None:
return None
if not (
issubclass(type(sas), list) and issubclass(type(ses), list)
) or len(sas) != len(ses):
raise errors.AnsibleModuleError(
'Expects openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets are equal length lists')
for secret in sas:
if len(secret) < 32:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
for secret in ses:
if len(secret) not in [16, 24, 32]:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return None
def check_unsupported_nfs_configs(self, hostvars, host):
"""Fails if nfs storage is in use for any components. This check is
ignored if openshift_enable_unsupported_configurations=True"""
enable_unsupported = self.template_var(
hostvars, host, 'openshift_enable_unsupported_configurations')
if to_bool(enable_unsupported):
return None
for storage in STORAGE_KIND_TUPLE:
kind = self.template_var(hostvars, host, storage)
if kind == 'nfs':
raise errors.AnsibleModuleError(
'nfs is an unsupported type for {}. '
'openshift_enable_unsupported_configurations=True must'
'be specified to continue with this configuration.'
''.format(storage))
return None
def run_checks(self, hostvars, host):
"""Execute the hostvars validations against host"""
distro = self.template_var(hostvars, host, 'ansible_distribution')
odt = self.check_openshift_deployment_type(hostvars, host)
self.check_python_version(hostvars, host, distro)
self.check_image_tag_format(hostvars, host, odt)
self.no_origin_image_version(hostvars, host, odt)
self.network_plugin_check(hostvars, host)
self.check_hostname_vars(hostvars, host)
self.check_supported_ocp_version(hostvars, host, odt)
self.check_session_auth_secrets(hostvars, host)
self.check_unsupported_nfs_configs(hostvars, host)
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
# self.task_vars holds all in-scope variables.
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
# pylint: disable=W0201
self.last_checked_host = "none"
# pylint: disable=W0201
self.last_checked_var = "none"
# self._task.args holds task parameters.
# check_hosts is a parameter to this plugin, and should provide
# a list of hosts.
check_hosts = self._task.args.get('check_hosts')
if not check_hosts:
msg = "check_hosts is required"
raise errors.AnsibleModuleError(msg)
# We need to access each host's variables
hostvars = self.task_vars.get('hostvars')
if not hostvars:
msg = hostvars
raise errors.AnsibleModuleError(msg)
# We loop through each host in the provided list check_hosts
for host in check_hosts:
try:
self.run_checks(hostvars, host)
except Exception as uncaught_e:
msg = "last_checked_host: {}, last_checked_var: {};"
msg = msg.format(self.last_checked_host, self.last_checked_var)
msg += str(uncaught_e)
raise errors.AnsibleModuleError(msg)
result["changed"] = False
result["failed"] = False
result["msg"] = "Sanity Checks passed"
return result
|
the-stack_106_17555
|
from shapely.wkb import loads
import json
from ... import getTile
from ...Core import KnownUnknown
def get_tiles(names, config, coord):
''' Retrieve a list of named TopoJSON layer tiles from a TileStache config.
Check integrity and compatibility of each, looking at known layers,
correct JSON mime-types, "Topology" in the type attributes, and
matching affine transformations.
'''
unknown_layers = set(names) - set(config.layers.keys())
if unknown_layers:
raise KnownUnknown("%s.get_tiles didn't recognize %s when trying to load %s." % (__name__, ', '.join(unknown_layers), ', '.join(names)))
layers = [config.layers[name] for name in names]
mimes, bodies = zip(*[getTile(layer, coord, 'topojson') for layer in layers])
bad_mimes = [(name, mime) for (mime, name) in zip(mimes, names) if not mime.endswith('/json')]
if bad_mimes:
raise KnownUnknown('%s.get_tiles encountered a non-JSON mime-type in %s sub-layer: "%s"' % ((__name__, ) + bad_mimes[0]))
topojsons = map(json.loads, bodies)
bad_types = [(name, topo['type']) for (topo, name) in zip(topojsons, names) if topo['type'] != 'Topology']
if bad_types:
raise KnownUnknown('%s.get_tiles encountered a non-Topology type in %s sub-layer: "%s"' % ((__name__, ) + bad_types[0]))
transforms = [topo['transform'] for topo in topojsons]
unique_xforms = set([tuple(xform['scale'] + xform['translate']) for xform in transforms])
if len(unique_xforms) > 1:
raise KnownUnknown('%s.get_tiles encountered incompatible transforms: %s' % (__name__, list(unique_xforms)))
return topojsons
def update_arc_indexes(geometry, merged_arcs, old_arcs):
''' Updated geometry arc indexes, and add arcs to merged_arcs along the way.
Arguments are modified in-place, and nothing is returned.
'''
if geometry['type'] in ('Point', 'MultiPoint'):
return
elif geometry['type'] == 'LineString':
for (arc_index, old_arc) in enumerate(geometry['arcs']):
geometry['arcs'][arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'Polygon':
for ring in geometry['arcs']:
for (arc_index, old_arc) in enumerate(ring):
ring[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'MultiLineString':
for part in geometry['arcs']:
for (arc_index, old_arc) in enumerate(part):
part[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
elif geometry['type'] == 'MultiPolygon':
for part in geometry['arcs']:
for ring in part:
for (arc_index, old_arc) in enumerate(ring):
ring[arc_index] = len(merged_arcs)
merged_arcs.append(old_arcs[old_arc])
else:
raise NotImplementedError("Can't do %s geometries" % geometry['type'])
def get_transform(bounds, size=1024):
''' Return a TopoJSON transform dictionary and a point-transforming function.
Size is the tile size in pixels and sets the implicit output resolution.
'''
tx, ty = bounds[0], bounds[1]
sx, sy = (bounds[2] - bounds[0]) / size, (bounds[3] - bounds[1]) / size
def forward(lon, lat):
''' Transform a longitude and latitude to TopoJSON integer space.
'''
return int(round((lon - tx) / sx)), int(round((lat - ty) / sy))
return dict(translate=(tx, ty), scale=(sx, sy)), forward
def diff_encode(line, transform):
''' Differentially encode a shapely linestring or ring.
'''
coords = [transform(x, y) for (x, y) in line.coords]
pairs = zip(coords[:], coords[1:])
diffs = [(x2 - x1, y2 - y1) for ((x1, y1), (x2, y2)) in pairs]
return coords[:1] + [(x, y) for (x, y) in diffs if (x, y) != (0, 0)]
def decode(file):
''' Stub function to decode a TopoJSON file into a list of features.
Not currently implemented, modeled on geojson.decode().
'''
raise NotImplementedError('topojson.decode() not yet written')
def encode(file, features, bounds, is_clipped):
''' Encode a list of (WKB, property dict) features into a TopoJSON stream.
Also accept three-element tuples as features: (WKB, property dict, id).
Geometries in the features list are assumed to be unprojected lon, lats.
Bounds are given in geographic coordinates as (xmin, ymin, xmax, ymax).
'''
transform, forward = get_transform(bounds)
geometries, arcs = list(), list()
for feature in features:
shape = loads(feature[0])
geometry = dict(properties=feature[1])
geometries.append(geometry)
if is_clipped:
geometry.update(dict(clipped=True))
if len(feature) >= 2:
# ID is an optional third element in the feature tuple
geometry.update(dict(id=feature[2]))
if shape.type == 'GeometryCollection':
geometries.pop()
continue
elif shape.type == 'Point':
geometry.update(dict(type='Point', coordinates=forward(shape.x, shape.y)))
elif shape.type == 'LineString':
geometry.update(dict(type='LineString', arcs=[len(arcs)]))
arcs.append(diff_encode(shape, forward))
elif shape.type == 'Polygon':
geometry.update(dict(type='Polygon', arcs=[]))
rings = [shape.exterior] + list(shape.interiors)
for ring in rings:
geometry['arcs'].append([len(arcs)])
arcs.append(diff_encode(ring, forward))
elif shape.type == 'MultiPoint':
geometry.update(dict(type='MultiPoint', coordinates=[]))
for point in shape.geoms:
geometry['coordinates'].append(forward(point.x, point.y))
elif shape.type == 'MultiLineString':
geometry.update(dict(type='MultiLineString', arcs=[]))
for line in shape.geoms:
geometry['arcs'].append([len(arcs)])
arcs.append(diff_encode(line, forward))
elif shape.type == 'MultiPolygon':
geometry.update(dict(type='MultiPolygon', arcs=[]))
for polygon in shape.geoms:
rings = [polygon.exterior] + list(polygon.interiors)
polygon_arcs = []
for ring in rings:
polygon_arcs.append([len(arcs)])
arcs.append(diff_encode(ring, forward))
geometry['arcs'].append(polygon_arcs)
else:
raise NotImplementedError("Can't do %s geometries" % shape.type)
result = {
'type': 'Topology',
'transform': transform,
'objects': {
'vectile': {
'type': 'GeometryCollection',
'geometries': geometries
}
},
'arcs': arcs
}
json.dump(result, file, separators=(',', ':'))
def merge(file, names, config, coord):
''' Retrieve a list of TopoJSON tile responses and merge them into one.
get_tiles() retrieves data and performs basic integrity checks.
'''
inputs = get_tiles(names, config, coord)
output = {
'type': 'Topology',
'transform': inputs[0]['transform'],
'objects': dict(),
'arcs': list()
}
for (name, input) in zip(names, inputs):
for (index, object) in enumerate(input['objects'].values()):
if len(input['objects']) > 1:
output['objects']['%(name)s-%(index)d' % locals()] = object
else:
output['objects'][name] = object
for geometry in object['geometries']:
update_arc_indexes(geometry, output['arcs'], input['arcs'])
json.dump(output, file, separators=(',', ':'))
|
the-stack_106_17557
|
import argparse
from glob import glob
import cv2
import numpy as np
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.models import model_from_json
from utils import paths_to_tensor
# Construct the argument parse and parse the arguments.
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the input image")
args = vars(ap.parse_args())
# Input image.
input_image = args["image"]
""" Transfer learning using Inception V3 """
# Load the Inception V3 model as well as the network weights from disk.
print("[INFO] loading {}...".format("CNN Model"))
transfer_model = InceptionV3(include_top=False, weights="imagenet")
""" Retrieve the saved CNN model """
# Load json and create model.
json_file = open("models/CNN_model.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# Load weights into new model.
loaded_model.load_weights("weights/CNN_model.h5")
CNN_model = loaded_model
print("[INFO] Loaded model from the disk.")
# Prediction.
tensor = paths_to_tensor([input_image])
preprocessed_image = preprocess_input(tensor)
feature_extracted_image = transfer_model.predict(preprocessed_image)
prediction = CNN_model.predict(feature_extracted_image)
# Load lables.
label_name = [item[11:-1] for item in sorted(glob("data/train/*/"))]
# print("[INFO] Label names are: {}".format(label_name))
print("[INFO] Analyzing the skin lesion.")
print("[INFO] Please Wait...")
# Show output.
cv2.namedWindow("Classification", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Classification", 1920, 1080)
orig = cv2.imread(args["image"])
label_index = np.argmax(prediction)
label = label_name[label_index]
prob = prediction[0][label_index]
print("[INFO] Analysis Completed!")
print("[INFO] {} detected in the image.".format(label))
cv2.putText(
orig, "Label: {}, {:.2f}%".format(label, prob * 100), (50, 300), cv2.FONT_HERSHEY_SIMPLEX, 5, (255, 255, 255), 2
)
cv2.imshow("Classification", orig)
cv2.waitKey(0)
|
the-stack_106_17559
|
import unittest
import numpy as np
from operator import itemgetter
from AlphaGo.go import GameState
from AlphaGo.mcts import MCTS, TreeNode
class TestTreeNode(unittest.TestCase):
def setUp(self):
self.gs = GameState()
self.node = TreeNode(None, 1.0)
def test_selection(self):
self.node.expand(dummy_policy(self.gs))
action, next_node = self.node.select()
self.assertEqual(action, (18, 18)) # according to the dummy policy below
self.assertIsNotNone(next_node)
def test_expansion(self):
self.assertEqual(0, len(self.node._children))
self.node.expand(dummy_policy(self.gs))
self.assertEqual(19 * 19, len(self.node._children))
for a, p in dummy_policy(self.gs):
self.assertEqual(p, self.node._children[a]._P)
def test_update(self):
self.node.expand(dummy_policy(self.gs))
child = self.node._children[(18, 18)]
# Note: the root must be updated first for the visit count to work.
self.node.update(leaf_value=1.0, c_puct=5.0)
child.update(leaf_value=1.0, c_puct=5.0)
expected_score = 1.0 + 5.0 * dummy_distribution[-1] * 0.5
self.assertEqual(expected_score, child.get_value())
# After a second update, the Q value should be the average of the two, and the u value
# should be multiplied by sqrt(parent visits) / (node visits + 1) (which was simply equal
# to 0.5 before)
self.node.update(leaf_value=0.0, c_puct=5.0)
child.update(leaf_value=0.0, c_puct=5.0)
expected_score = 0.5 + 5.0 * dummy_distribution[-1] * np.sqrt(2.0) / 3.0
self.assertEqual(expected_score, child.get_value())
def test_update_recursive(self):
# Assertions are identical to test_treenode_update.
self.node.expand(dummy_policy(self.gs))
child = self.node._children[(18, 18)]
child.update_recursive(leaf_value=1.0, c_puct=5.0)
expected_score = 1.0 + 5.0 * dummy_distribution[-1] / 2.0
self.assertEqual(expected_score, child.get_value())
child.update_recursive(leaf_value=0.0, c_puct=5.0)
expected_score = 0.5 + 5.0 * dummy_distribution[-1] * np.sqrt(2.0) / 3.0
self.assertEqual(expected_score, child.get_value())
class TestMCTS(unittest.TestCase):
def setUp(self):
self.gs = GameState()
self.mcts = MCTS(dummy_value, dummy_policy, dummy_rollout, n_playout=2)
def _count_expansions(self):
"""Helper function to count the number of expansions past the root using the dummy policy
"""
node = self.mcts._root
expansions = 0
# Loop over actions in decreasing probability.
for action, _ in sorted(dummy_policy(self.gs), key=itemgetter(1), reverse=True):
if action in node._children:
expansions += 1
node = node._children[action]
else:
break
return expansions
def test_playout(self):
self.mcts._playout(self.gs.copy(), 8)
# Assert that the most likely child was visited (according to the dummy policy below).
self.assertEqual(1, self.mcts._root._children[(18, 18)]._n_visits)
# Assert that the search depth expanded nodes 8 times.
self.assertEqual(8, self._count_expansions())
def test_playout_with_pass(self):
# Test that playout handles the end of the game (i.e. passing/no moves). Mock this by
# creating a policy that returns nothing after 4 moves.
def stop_early_policy(state):
if len(state.get_history()) <= 4:
return dummy_policy(state)
else:
return []
self.mcts = MCTS(dummy_value, stop_early_policy, stop_early_policy, n_playout=2)
self.mcts._playout(self.gs.copy(), 8)
# Assert that (18, 18) and (18, 17) are still only visited once.
self.assertEqual(1, self.mcts._root._children[(18, 18)]._n_visits)
# Assert that no expansions happened after reaching the "end" in 4 moves.
self.assertEqual(5, self._count_expansions())
def test_get_move(self):
move = self.mcts.get_move(self.gs)
self.mcts.update_with_move(move)
# success if no errors
def test_update_with_move(self):
move = self.mcts.get_move(self.gs)
self.gs.do_move(move)
self.mcts.update_with_move(move)
# Assert that the new root still has children.
self.assertTrue(len(self.mcts._root._children) > 0)
# Assert that the new root has no parent (the rest of the tree will be garbage collected).
self.assertIsNone(self.mcts._root._parent)
# Assert that the next best move according to the root is (18, 17), according to the
# dummy policy below.
self.assertEqual((18, 17), self.mcts._root.select()[0])
# A distribution over positions that is smallest at (0,0) and largest at (18,18)
dummy_distribution = np.arange(361, dtype=np.float)
dummy_distribution = dummy_distribution / dummy_distribution.sum()
def dummy_policy(state):
moves = state.get_legal_moves(include_eyes=False)
return list(zip(moves, dummy_distribution))
# Rollout is a clone of the policy function.
dummy_rollout = dummy_policy
def dummy_value(state):
# it's not very confident
return 0.0
if __name__ == '__main__':
unittest.main()
|
the-stack_106_17560
|
import speech_recognition
def listen():
with speech_recognition.Microphone() as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
try:
return recognizer.recognize_sphinx(audio)
# or: return recognizer.recognize_google(audio)
except speech_recognition.UnknownValueError:
print("Could not understand audio")
except speech_recognition.RequestError as e:
print("Recog Error; {0}".format(e))
return ""
print("Say something!")
print("I heard you say " + listen())
|
the-stack_106_17561
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
#cache路径
CACHE_PATH = "./cache/"
#缓存合约路径
CACHE_CONTRACT_PATH = "./cache/temp.sol"
#缓存路径信息文件
CACHE_PATHINFO_PATH = "./cache/temp_sol.json"
#缓存抽象语法树文件
CACHE_AST_PATH = "./cache/temp.sol_json.ast"
#源代码保存路径
CONTRACT_PATH = "../../contractExtractor/NonpublicVarAccessdByPublicFuncExtractor/result"
#注入信息保存路径
INJECT_INFO_PATH = "../../contractExtractor/NonpublicVarAccessdByPublicFuncExtractor/injectInfo"
#sol文件后缀
SOL_SUFFIX = ".sol"
#json.ast文件后缀
JSON_AST_SUFFIX = "_json.ast"
from NonpublicVarAccessdByPublicFuncInjector import NonpublicVarAccessdByPublicFuncInjector #注入器
import os
import time
class NonpublicVarAccessdByPublicFunc:
def __init__(self, _injectInfo, _contractPath):
self.injectInfo = _injectInfo #所有文件的路径信息情况
self.targetInfoFile = self.targetPathInfo(self.injectInfo)
self.targetContract = self.targetContractList(self.targetInfoFile, _contractPath) #合约列表
self.targetAstFile = self.targetAstList(self.targetInfoFile, _contractPath) #ast列表
self.nowNum = 0
try:
os.mkdir(CACHE_PATH) #建立缓存文件夹
except:
#print("The cache folder already exists.")
pass
def targetAstList(self, _fileList, _contractPath):
result = list()
for filename in _fileList:
jsonAstName = os.path.splitext(os.path.split(filename)[1])[0] + SOL_SUFFIX + JSON_AST_SUFFIX
result.append(os.path.join(_contractPath, jsonAstName))
return result
def targetContractList(self, _fileList, _contractPath):
result = list()
for filename in _fileList:
contractName = os.path.splitext(os.path.split(filename)[1])[0] + SOL_SUFFIX
result.append(os.path.join(_contractPath, contractName))
return result
def targetPathInfo(self, _pathInfo):
fileList = os.listdir(_pathInfo)
result = list()
for item in fileList:
result.append(os.path.join(_pathInfo, item))
return result
def getInfoFile(self, _contractName, _infoFileList):
preName = os.path.splitext(os.path.split(_contractName)[1])[0]
for file in _infoFileList:
if preName in file:
return file
else:
continue
return str()
def getAstFile(self, _contractName, _astFileList):
preName = os.path.splitext(os.path.split(_contractName)[1])[0]
for file in _astFileList:
if preName in file:
return file
else:
continue
return str()
def cacheFile(self, _contract, _pathInfo, _astPath):
try:
with open(CACHE_CONTRACT_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_contract).read())
with open(CACHE_PATHINFO_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_pathInfo).read())
with open(CACHE_AST_PATH, "w+", encoding = "utf-8") as f:
f.write(open(_astPath).read())
return
except:
raise Exception("Failed to cache contract.")
def run(self):
stime = time.time()
contractNum = 0
for contractFile in self.targetContract:
contractNum += 1
try:
#1. 获取每个合约的源代码, ast和注入信息
pathInfoFile = self.getInfoFile(contractFile, self.targetInfoFile)
astFile = self.getAstFile(contractFile, self.targetAstFile)
print("\r\t Injecting contract: ", os.path.split(contractFile)[1], end = "")
#2. 缓存当前文件
self.cacheFile(contractFile, pathInfoFile, astFile)
#3. 根据目标路径和源代码注入bug
NI = NonpublicVarAccessdByPublicFuncInjector(CACHE_CONTRACT_PATH, CACHE_PATHINFO_PATH, astFile, self.getOriginalContractName(contractFile))
NI.inject()
NI.output()
#4. 输出进度
self.nowNum += 1
#print("\r当前注入进度: %.2f" % (self.nowNum / len(self.targetContract)))
except Exception as e:
self.nowNum += 1
#print(e)
continue
print()
#print(time.time() - stime)
#print(contractNum)
def getOriginalContractName(self, _contractPath):
return os.path.splitext(os.path.split(_contractPath)[1])[0]
#单元测试
if __name__ == "__main__":
nvabpf = NonpublicVarAccessdByPublicFunc(INJECT_INFO_PATH, CONTRACT_PATH)
nvabpf.run()
|
the-stack_106_17562
|
from setuptools import setup
description = "Simple command line pomodoro app with visualization of statistics"
long_description = """
Simple command line pomodoro app with visualization of statistics.
The Pomodoro technique is a time management technique for improving productivity.
Check (https://en.wikipedia.org/wiki/Pomodoro_Technique) for more details.
The code is based on : http://code.activestate.com/recipes/577358-pomodoro-timer/
"""
setup(
name="pomodoro-cli",
version="0.2.0",
author="mehdi cherti",
author_email="[email protected]",
description=description,
long_description=long_description,
license="MIT",
keywords="pomodoro productivity",
url="https://github.com/mehdidc/pomodoro",
zip_safe=False, # the package can run out of an .egg file
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
platforms='any',
scripts=['pomodoro', 'pomostat'],
include_package_data=True,
data_files=['clock.mp3'],
install_requires=['clize', 'sigtools', 'pandas', 'matplotlib'],
)
|
the-stack_106_17564
|
"""保守的指针规划算法"""
import math
from typing import Any
from .algo_base import TouchAction, TouchEvent
from chart import Chart
from note import Note
from utils import distance_of, recalc_pos
class Pointer:
pid: int
pos: tuple[float, float]
timestamp: int
occupied: int
def __init__(self, pid: int, pos: tuple[float, float], timestamp: int):
self.pid = pid
self.pos = pos
self.timestamp = timestamp
self.occupied = 0
def __repr__(self):
return f'Pointer(pid={self.pid}, pos={self.pos}, timestamp={self.timestamp})'
class Pointers:
max_pointer_id: int
pointers: dict[int, Pointer]
begin: int
direction: int
now: int
recycled: set[int]
unused: dict[int, Pointer]
unused_now: dict[int, Pointer]
mark_as_released: list[int]
def __init__(self, begin: int, direction: int = 1):
self.begin = begin
self.max_pointer_id = begin
self.pointers = {}
self.recycled = set()
self.unused = {}
self.direction = direction
self.unused_now = {}
self.mark_as_released = []
def _new(self) -> int:
if not self.recycled:
pid = self.max_pointer_id
self.max_pointer_id += self.direction
return pid
return self.recycled.pop()
def _del(self, pointer_id: int):
self.recycled.add(pointer_id)
if len(self.recycled) == (self.max_pointer_id - self.begin) / self.direction:
self.max_pointer_id = self.begin
self.recycled.clear()
def acquire(self, note: dict[str, Any], new: bool = True) -> tuple[int, bool]:
event_id = note['i']
if event_id in self.pointers:
ptr = self.pointers[event_id]
ptr.timestamp = self.now
ptr.pos = note['p']
return ptr.pid, False
if not new:
nearest_distance = 200
nearest_pid = None
for pid, ptr in self.unused.items():
if (d := distance_of(note['p'], ptr.pos)) < nearest_distance:
nearest_pid = ptr.pid
nearest_distance = d
if nearest_pid is not None:
ptr = self.unused[nearest_pid]
del self.unused[nearest_pid]
ptr.timestamp = self.now
ptr.pos = note['p']
ptr.occupied = 0
self.pointers[event_id] = ptr
return ptr.pid, False
pid = self._new()
self.pointers[event_id] = Pointer(pid, note['p'], self.now)
return pid, True
def release(self, note: dict[str, Any]):
event_id = note['i']
if event_id in self.pointers:
ptr = self.pointers[event_id]
self.unused_now[ptr.pid] = ptr
self.mark_as_released.append(event_id)
def recycle(self, is_keyframe: bool):
marked = []
for event_id in self.mark_as_released:
del self.pointers[event_id]
self.mark_as_released = []
if is_keyframe:
for ptr in self.unused.values():
ptr.occupied += 1
if ptr.occupied > 0:
yield ptr.pid, ptr.timestamp + 1, ptr.pos
self._del(ptr.pid)
marked.append(ptr.pid)
for pid in marked:
del self.unused[pid]
self.unused |= self.unused_now
self.unused_now = {}
if len(self.unused) + len(self.pointers) > 10:
raise RuntimeError(
f'unused: {len(self.unused)} & pointers: {len(self.pointers)} are on screen @ {self.now}')
def finish(self):
for ptr in self.unused.values():
yield ptr.pid, ptr.timestamp + 1, ptr.pos
for ptr in self.unused_now.values():
yield ptr.pid, ptr.timestamp + 1, ptr.pos
for ptr in self.pointers.values():
yield ptr.pid, ptr.timestamp + 1, ptr.pos
def solve(chart: Chart) -> dict[int, list[TouchEvent]]:
flick_start = -30
flick_end = 30
flick_scale_factor = 100
frames: dict[int, list] = {}
result: dict[int, list[TouchEvent]] = {}
def insert(milliseconds: int, event: dict):
if milliseconds not in frames:
frames[milliseconds] = []
frames[milliseconds].append(event)
def ins(milliseconds: int, event: TouchEvent):
if milliseconds not in result:
result[milliseconds] = []
result[milliseconds].append(event)
current_event_id = 0
def flick_pos(px: float, py: float, offset: int) -> tuple[float, float]:
return px + math.sin(offset * math.pi / 10) * flick_scale_factor * sa, py + math.sin(
offset * math.pi / 10) * flick_scale_factor * ca
print('正在统计帧...', end='')
# 统计frames
for line in chart.judge_lines:
for note in line.notes_above + line.notes_below:
ms = round(line.seconds(note.time) * 1000)
off_x = note.x * 72
x, y = line.pos(note.time)
alpha = - line.angle(note.time) * math.pi / 180
sa = math.sin(alpha)
ca = math.cos(alpha)
px, py = x + off_x * ca, y + off_x * sa
if note.typ == Note.TAP:
insert(ms, {
'a': 'tap',
'p': recalc_pos((px, py), sa, ca),
'i': current_event_id
})
elif note.typ == Note.DRAG:
insert(ms, {
'a': 'drag',
'p': recalc_pos((px, py), sa, ca),
'i': current_event_id
})
elif note.typ == Note.FLICK:
insert(ms + flick_start, {
'a': 'flick_start',
# 'p': flick_pos(*line.pos_of(note, line.time(ms + flick_start) / 1000), flick_start),
'p': recalc_pos(flick_pos(px, py, flick_start), sa, ca),
'i': current_event_id
})
for offset in range(flick_start + 1, flick_end):
insert(ms + offset, {
'a': 'flick',
# 'p': flick_pos(*line.pos_of(note, line.time(ms + offset) / 1000), offset),
'p': recalc_pos(flick_pos(px, py, offset), sa, ca),
'i': current_event_id
})
insert(ms + flick_end, {
'a': 'flick_end',
# 'p': flick_pos(*line.pos_of(note, line.time(ms + flick_end) / 1000), flick_end),
'p': recalc_pos(flick_pos(px, py, flick_end), sa, ca),
'i': current_event_id
})
elif note.typ == Note.HOLD:
hold_ms = math.ceil(line.seconds(note.hold) * 1000)
insert(ms, {
'a': 'hold_start',
'p': recalc_pos((px, py), sa, ca),
'i': current_event_id
})
for offset in range(1, hold_ms):
insert(ms + offset, {
'a': 'hold',
'p': recalc_pos(line.pos_of(note, line.time((ms + offset) / 1000)), sa, ca),
'i': current_event_id
})
insert(ms + hold_ms, {
'a': 'hold_end',
'p': recalc_pos(line.pos_of(note, line.time((ms + hold_ms) / 1000)), sa, ca),
'i': current_event_id
})
current_event_id += 1
print(f'统计完毕,当前谱面共计{len(frames)}帧')
pointers = Pointers(0)
print('正在规划触控事件...', end='')
for ms, frame in sorted(frames.items()):
pointers.now = ms
is_keyframe = False
for note in frame:
action = note['a']
if action == 'tap':
ins(ms, TouchEvent(note['p'], TouchAction.DOWN, pointers.acquire(note)[0]))
pointers.release(note)
is_keyframe = True
elif action == 'drag':
pid, new = pointers.acquire(note, new=False)
act = TouchAction.DOWN if new else TouchAction.MOVE
ins(ms, TouchEvent(note['p'], act, pid))
pointers.release(note)
# is_keyframe = True
elif action == 'flick_start':
pid, new = pointers.acquire(note, new=False)
act = TouchAction.DOWN if new else TouchAction.MOVE
ins(ms, TouchEvent(note['p'], act, pid))
elif action == 'flick':
ins(ms, TouchEvent(note['p'], TouchAction.MOVE, pointers.acquire(note)[0]))
elif action == 'flick_end':
ins(ms, TouchEvent(note['p'], TouchAction.MOVE, pointers.acquire(note)[0]))
pointers.release(note)
elif action == 'hold_start':
ins(ms, TouchEvent(note['p'], TouchAction.DOWN, pointers.acquire(note)[0]))
is_keyframe = True
elif action == 'hold':
ins(ms, TouchEvent(note['p'], TouchAction.MOVE, pointers.acquire(note)[0]))
elif action == 'hold_end':
ins(ms, TouchEvent(note['p'], TouchAction.MOVE, pointers.acquire(note)[0]))
pointers.release(note)
for pid, ts, pos in pointers.recycle(is_keyframe):
ins(ts, TouchEvent(pos, TouchAction.UP, pid))
for pid, ts, pos in pointers.finish():
ins(ts, TouchEvent(pos, TouchAction.UP, pid))
print('规划完毕.')
return result
|
the-stack_106_17572
|
"""Unit tests for case detection and application."""
from case_restorer import case
import unittest
class CaseTests(unittest.TestCase):
"""Tests of the helper methods for the casing system."""
# Helpers.
def assertEmpty(self, arg):
self.assertTrue(not arg)
def assertSimpleTcEqual(self, token, expected_case):
(tc, pattern) = case.get_tc(token)
self.assertEqual(tc, expected_case)
self.assertEmpty(pattern)
def assertMixedTcEqual(self, token, expected_pattern):
(tc, pattern) = case.get_tc(token)
self.assertEqual(tc, case.TokenCase.MIXED)
self.assertEqual(pattern, expected_pattern)
# Data definitions.
lower = [
"über",
"año",
"cöoperation",
"résumé",
"être",
"očudit",
"pająk",
"fracoð",
"þæt",
"açai",
"ealneġ",
"2pac",
]
upper = [
"ÜBER",
"AÑO",
"CÖOPERATION",
"RÉSUMÉ",
"ÊTRE",
"OČUDIT",
"PAJĄK",
"FRACOÐ",
"ÞÆT",
"AÇAI",
"EALNEĠ",
"2PAC",
]
title = [
"Über",
"Año",
"Cöoperation",
"Résumé",
"Être",
"Očudit",
"Pająk",
"Fracoð",
"Þæt",
"Açai",
"Ealneġ",
"2Pac",
]
mixed = [
(
"SMiLE",
[
case.CharCase.UPPER,
case.CharCase.UPPER,
case.CharCase.LOWER,
case.CharCase.UPPER,
case.CharCase.UPPER,
],
),
(
"m.A.A.d",
[
case.CharCase.LOWER,
case.CharCase.DC,
case.CharCase.UPPER,
case.CharCase.DC,
case.CharCase.UPPER,
case.CharCase.DC,
case.CharCase.LOWER,
],
),
(
"iFoo",
[
case.CharCase.LOWER,
case.CharCase.UPPER,
case.CharCase.LOWER,
case.CharCase.LOWER,
],
),
(
"IJmuiden",
[
case.CharCase.UPPER,
case.CharCase.UPPER,
case.CharCase.LOWER,
case.CharCase.LOWER,
case.CharCase.LOWER,
case.CharCase.LOWER,
case.CharCase.LOWER,
case.CharCase.LOWER,
],
),
(
"tRuEcasIng",
[
case.CharCase.LOWER,
case.CharCase.UPPER,
case.CharCase.LOWER,
case.CharCase.UPPER,
case.CharCase.LOWER,
case.CharCase.LOWER,
case.CharCase.LOWER,
case.CharCase.UPPER,
case.CharCase.LOWER,
case.CharCase.LOWER,
],
),
]
# Basics.
def testLower(self):
for token in self.lower:
self.assertSimpleTcEqual(token, case.TokenCase.LOWER)
def testUpper(self):
for token in self.upper:
self.assertSimpleTcEqual(token, case.TokenCase.UPPER)
def testTitle(self):
for token in self.title:
self.assertSimpleTcEqual(token, case.TokenCase.TITLE)
def testMixed(self):
for (token, expected_pattern) in self.mixed:
self.assertMixedTcEqual(token, expected_pattern)
# Conversions.
def testLowerToUpper(self):
for (ltoken, utoken) in zip(self.lower, self.upper):
self.assertEqual(
case.apply_tc(utoken, case.TokenCase.LOWER), ltoken
)
self.assertEqual(
case.apply_tc(ltoken, case.TokenCase.UPPER), utoken
)
def testLowerToTitle(self):
for (ltoken, ttoken) in zip(self.lower, self.title):
self.assertEqual(
case.apply_tc(ttoken, case.TokenCase.LOWER), ltoken
)
self.assertEqual(
case.apply_tc(ltoken, case.TokenCase.TITLE), ttoken
)
def testLowerToMixed(self):
for (token, pattern) in self.mixed:
self.assertEqual(
case.apply_tc(token.casefold(), case.TokenCase.MIXED, pattern),
token,
)
# Hard cases.
def testFiLigature(self):
finger = "finger"
self.assertEqual(case.apply_tc(finger, case.TokenCase.UPPER), "FINGER")
self.assertEqual(case.apply_tc(finger, case.TokenCase.TITLE), "Finger")
def testEszet(self):
strasse = "Straße"
self.assertEqual(
case.apply_tc(strasse, case.TokenCase.UPPER), "STRASSE"
)
def testUnicodeFonts(self):
quirky_titlecase = ["The", "𝐓𝐡𝐞", "𝕿𝖍𝖊", "𝑻𝒉𝒆", "𝓣𝓱𝓮", "𝕋𝕙𝕖", "𝚃𝚑𝚎"]
for token in quirky_titlecase:
self.assertSimpleTcEqual(token, case.TokenCase.TITLE)
def testNumbers(self):
numbers = ["212", "97000", "123", "١٢٣"]
for token in numbers:
self.assertSimpleTcEqual(token, case.TokenCase.DC)
def testFuzz(self):
fuzz = [
# Wide characters.
"田中さんにあげて下さい" "パーティーへ行かないか",
"和製漢語",
"部落格",
"사회과학원어학연구소",
"찦차를타고온펲시맨과쑛다리똠방각하",
"社會科學院語學研究所",
"울란바토르",
"𠜎𠜱𠝹𠱓𠱸𠲖𠳏",
"新しい日の誕生",
# Emoji and kaomoji.
"ヽ༼ຈل͜ຈ༽ノ ヽ༼ຈل͜ຈ༽ノ",
"(。◕ ∀ ◕。)",
"`ィ( ́∀`∩",
"__ロ(,_,*)",
"・( ̄∀ ̄)・:*:",
"・✿ヾ╲(。◕‿◕。)╱✿・゚",
"(╯°□°)╯( ┻━┻)",
"😍",
"👾 🙇 💁 🙅 🙆 🙋 🙎 🙍",
"🐵 🙈 🙉 ❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙",
]
for token in fuzz:
self.assertSimpleTcEqual(token, case.TokenCase.DC)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_17573
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: panos_vlan_interface
short_description: configure VLAN interfaces
description:
- Configure VLAN interfaces.
author: "Garfield Lee Freeman (@shinmog)"
version_added: '1.0.0'
requirements:
- pan-python
- pandevice
notes:
- Checkmode is supported.
- If the PAN-OS device is a firewall and I(vsys) is not specified, then
the vsys will default to I(vsys=vsys1).
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.state
- paloaltonetworks.panos.fragments.vsys_import
- paloaltonetworks.panos.fragments.template_only
options:
name:
description:
- Name of the interface to configure.
- This should be in the format "vlan.<some_number>".
type: str
required: true
ip:
description:
- List of static IP addresses.
type: list
elements: str
ipv6_enabled:
description:
- Enable IPv6.
type: bool
management_profile:
description:
- Interface management profile name.
type: str
mtu:
description:
- MTU for layer3 interface.
type: int
adjust_tcp_mss:
description:
- Adjust TCP MSS for layer3 interface.
type: bool
netflow_profile:
description:
- Netflow profile for layer3 interface.
type: str
comment:
description:
- Interface comment.
type: str
ipv4_mss_adjust:
description:
- (7.1+) TCP MSS adjustment for IPv4.
type: int
ipv6_mss_adjust:
description:
- (7.1+) TCP MSS adjustment for IPv6.
type: int
enable_dhcp:
description:
- Enable DHCP on this interface.
type: bool
create_dhcp_default_route:
description:
- Whether or not to add default route with router learned via DHCP.
type: bool
dhcp_default_route_metric:
description:
- Metric for the DHCP default route.
type: int
zone_name:
description:
- Name of the zone for the interface.
- If the zone does not exist it is created.
- If the zone already exists it should be I(mode=layer3).
type: str
vlan_name:
description:
- The VLAN to put this interface in.
- If the VLAN does not exist it is created.
type: str
vr_name:
description:
- Name of the virtual router
type: str
'''
EXAMPLES = '''
# Create vlan.2 as DHCP
- name: enable DHCP client on ethernet1/1 in zone public
panos_vlan_interface:
provider: '{{ provider }}'
name: "vlan.2"
zone_name: "public"
enable_dhcp: true
create_default_route: true
# Set vlan.7 with a static IP
- name: Configure vlan.7
panos_vlan_interface:
provider: '{{ provider }}'
name: "vlan.7"
ip: ["10.1.1.1/24"]
management_profile: "allow ping"
vlan_name: "dmz"
zone_name: "L3-untrust"
vr_name: "default"
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import get_connection
try:
from panos.network import Vlan
from panos.network import VlanInterface
from panos.errors import PanDeviceError
except ImportError:
try:
from pandevice.network import Vlan
from pandevice.network import VlanInterface
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
vsys_importable=True,
template=True,
with_classic_provider_spec=True,
with_state=True,
min_pandevice_version=(0, 9, 0),
argument_spec=dict(
name=dict(required=True),
ip=dict(type='list', elements='str'),
ipv6_enabled=dict(type='bool'),
management_profile=dict(),
mtu=dict(type='int'),
adjust_tcp_mss=dict(type='bool'),
netflow_profile=dict(),
comment=dict(),
ipv4_mss_adjust=dict(type='int'),
ipv6_mss_adjust=dict(type='int'),
enable_dhcp=dict(type='bool'),
create_dhcp_default_route=dict(type='bool'),
dhcp_default_route_metric=dict(type='int'),
zone_name=dict(),
vlan_name=dict(),
vr_name=dict(),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Get the object params.
spec = {
'name': module.params['name'],
'ip': module.params['ip'],
'ipv6_enabled': module.params['ipv6_enabled'],
'management_profile': module.params['management_profile'],
'mtu': module.params['mtu'],
'adjust_tcp_mss': module.params['adjust_tcp_mss'],
'netflow_profile': module.params['netflow_profile'],
'comment': module.params['comment'],
'ipv4_mss_adjust': module.params['ipv4_mss_adjust'],
'ipv6_mss_adjust': module.params['ipv6_mss_adjust'],
'enable_dhcp': True if module.params['enable_dhcp'] else None,
'create_dhcp_default_route': True if module.params['create_dhcp_default_route'] else None,
'dhcp_default_route_metric': module.params['dhcp_default_route_metric'],
}
# Get other info.
zone_name = module.params['zone_name']
vlan_name = module.params['vlan_name']
vr_name = module.params['vr_name']
vsys = module.params['vsys']
if vsys is None:
vsys = 'vsys1'
module.params['vsys'] = vsys
# Verify libs are present, get the parent object.
parent = helper.get_pandevice_parent(module)
# Retrieve the current config.
try:
listing = VlanInterface.refreshall(
parent, add=False, matching_vsys=False)
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
# Build the object based on the user spec.
obj = VlanInterface(**spec)
parent.add(obj)
# Which action should we take on the interface?
changed = False
reference_params = {
'refresh': True,
'update': not module.check_mode,
'return_type': 'bool',
}
if module.params['state'] == 'present':
for item in listing:
if item.name != obj.name:
continue
# Interfaces have children, so don't compare them.
if not item.equal(obj, compare_children=False):
changed = True
obj.extend(item.children)
if not module.check_mode:
try:
obj.apply()
except PanDeviceError as e:
module.fail_json(msg='Failed apply: {0}'.format(e))
break
else:
changed = True
if not module.check_mode:
try:
obj.create()
except PanDeviceError as e:
module.fail_json(msg='Failed create: {0}'.format(e))
# Set references.
try:
changed |= obj.set_vsys(vsys, **reference_params)
changed |= obj.set_vlan_interface(vlan_name, **reference_params)
changed |= obj.set_zone(zone_name, mode='layer3', **reference_params)
changed |= obj.set_virtual_router(vr_name, **reference_params)
except PanDeviceError as e:
module.fail_json(msg='Failed setref: {0}'.format(e))
elif module.params['state'] == 'absent':
# Remove references.
try:
changed |= obj.set_virtual_router(None, **reference_params)
changed |= obj.set_zone(None, mode='layer3', **reference_params)
changed |= obj.set_vlan_interface(None, **reference_params)
changed |= obj.set_vsys(None, **reference_params)
except PanDeviceError as e:
module.fail_json(msg='Failed setref: {0}'.format(e))
# Remove the interface.
if obj.name in [x.name for x in listing]:
changed = True
if not module.check_mode:
try:
obj.delete()
except PanDeviceError as e:
module.fail_json(msg='Failed delete: {0}'.format(e))
# Done!
module.exit_json(changed=changed, msg='done')
if __name__ == '__main__':
main()
|
the-stack_106_17574
|
import re
from base64 import b64encode
from functools import partial
from typing import Any, Callable, Dict, Optional, Tuple, Union
from urllib.parse import quote_plus
from hypothesis import strategies as st
from hypothesis_jsonschema import from_schema
from requests.auth import _basic_auth_str
from ... import utils
from ...exceptions import InvalidSchema
from ...hooks import GLOBAL_HOOK_DISPATCHER, HookContext, HookDispatcher
from ...models import Case, Endpoint
from ...stateful import Feedback
PARAMETERS = frozenset(("path_parameters", "headers", "cookies", "query", "body", "form_data"))
SLASH = "/"
STRING_FORMATS = {}
def register_string_format(name: str, strategy: st.SearchStrategy) -> None:
"""Register a new strategy for generating data for specific string "format"."""
if not isinstance(name, str):
raise TypeError(f"name must be of type {str}, not {type(name)}")
if not isinstance(strategy, st.SearchStrategy):
raise TypeError(f"strategy must be of type {st.SearchStrategy}, not {type(strategy)}")
STRING_FORMATS[name] = strategy
def init_default_strategies() -> None:
"""Register all default "format" strategies."""
register_string_format("binary", st.binary())
register_string_format("byte", st.binary().map(lambda x: b64encode(x).decode()))
def make_basic_auth_str(item: Tuple[str, str]) -> str:
return _basic_auth_str(*item)
latin1_text = st.text(alphabet=st.characters(min_codepoint=0, max_codepoint=255))
register_string_format("_basic_auth", st.tuples(latin1_text, latin1_text).map(make_basic_auth_str)) # type: ignore
register_string_format("_bearer_auth", st.text().map("Bearer {}".format))
def is_valid_header(headers: Dict[str, Any]) -> bool:
"""Verify if the generated headers are valid."""
for name, value in headers.items():
if not utils.is_latin_1_encodable(value):
return False
if utils.has_invalid_characters(name, value):
return False
return True
def is_illegal_surrogate(item: Any) -> bool:
return isinstance(item, str) and bool(re.search(r"[\ud800-\udfff]", item))
def is_valid_query(query: Dict[str, Any]) -> bool:
"""Surrogates are not allowed in a query string.
`requests` and `werkzeug` will fail to send it to the application.
"""
for name, value in query.items():
if is_illegal_surrogate(name) or is_illegal_surrogate(value):
return False
return True
def get_case_strategy(
endpoint: Endpoint, hooks: Optional[HookDispatcher] = None, feedback: Optional[Feedback] = None
) -> st.SearchStrategy:
"""Create a strategy for a complete test case.
Path & endpoint are static, the others are JSON schemas.
"""
strategies = {}
static_kwargs: Dict[str, Any] = {"feedback": feedback}
for parameter in PARAMETERS:
value = getattr(endpoint, parameter)
if value is not None:
location = {"headers": "header", "cookies": "cookie", "path_parameters": "path"}.get(parameter, parameter)
strategies[parameter] = prepare_strategy(parameter, value, endpoint.get_hypothesis_conversions(location))
else:
static_kwargs[parameter] = None
return _get_case_strategy(endpoint, static_kwargs, strategies, hooks)
def to_bytes(value: Union[str, bytes, int, bool, float]) -> bytes:
return str(value).encode(errors="ignore")
def prepare_form_data(form_data: Dict[str, Any]) -> Dict[str, Any]:
for name, value in form_data.items():
if isinstance(value, list):
form_data[name] = [to_bytes(item) if not isinstance(item, (bytes, str, int)) else item for item in value]
elif not isinstance(value, (bytes, str, int)):
form_data[name] = to_bytes(value)
return form_data
def prepare_headers_schema(value: Dict[str, Any]) -> Dict[str, Any]:
"""Improve schemas for headers.
Headers are strings, but it is not always explicitly defined in the schema. By preparing them properly we
can achieve significant performance improvements for such cases.
For reference (my machine) - running a single test with 100 examples with the resulting strategy:
- without: 4.37 s
- with: 294 ms
It also reduces the number of cases when the "filter_too_much" health check fails during testing.
"""
for schema in value.get("properties", {}).values():
schema.setdefault("type", "string")
return value
def prepare_strategy(parameter: str, value: Dict[str, Any], map_func: Optional[Callable]) -> st.SearchStrategy:
"""Create a strategy for a schema and add location-specific filters & maps."""
if parameter in ("headers", "cookies"):
value = prepare_headers_schema(value)
if parameter == "form_data":
value.setdefault("type", "object")
strategy = from_schema(value, custom_formats=STRING_FORMATS)
if map_func is not None:
strategy = strategy.map(map_func)
if parameter == "path_parameters":
strategy = strategy.filter(filter_path_parameters).map(quote_all) # type: ignore
elif parameter in ("headers", "cookies"):
strategy = strategy.filter(is_valid_header) # type: ignore
elif parameter == "query":
strategy = strategy.filter(is_valid_query) # type: ignore
elif parameter == "form_data":
strategy = strategy.map(prepare_form_data) # type: ignore
return strategy
def filter_path_parameters(parameters: Dict[str, Any]) -> bool:
"""Single "." chars and empty strings "" are excluded from path by urllib3.
A path containing to "/" or "%2F" will lead to ambiguous path resolution in
many frameworks and libraries, such behaviour have been observed in both
WSGI and ASGI applications.
In this case one variable in the path template will be empty, which will lead to 404 in most of the cases.
Because of it this case doesn't bring much value and might lead to false positives results of Schemathesis runs.
"""
path_parameter_blacklist = (".", SLASH, "")
return not any(
(value in path_parameter_blacklist or is_illegal_surrogate(value) or isinstance(value, str) and SLASH in value)
for value in parameters.values()
)
def quote_all(parameters: Dict[str, Any]) -> Dict[str, Any]:
"""Apply URL quotation for all values in a dictionary."""
return {key: quote_plus(value) if isinstance(value, str) else value for key, value in parameters.items()}
def _get_case_strategy(
endpoint: Endpoint,
extra_static_parameters: Dict[str, Any],
strategies: Dict[str, st.SearchStrategy],
hook_dispatcher: Optional[HookDispatcher] = None,
) -> st.SearchStrategy[Case]:
static_parameters: Dict[str, Any] = {"endpoint": endpoint, **extra_static_parameters}
if endpoint.schema.validate_schema and endpoint.method.upper() == "GET":
if endpoint.body is not None:
raise InvalidSchema("Body parameters are defined for GET request.")
static_parameters["body"] = None
strategies.pop("body", None)
context = HookContext(endpoint)
_apply_hooks(strategies, GLOBAL_HOOK_DISPATCHER, context)
_apply_hooks(strategies, endpoint.schema.hooks, context)
if hook_dispatcher is not None:
_apply_hooks(strategies, hook_dispatcher, context)
return st.builds(partial(Case, **static_parameters), **strategies)
def _apply_hooks(strategies: Dict[str, st.SearchStrategy], dispatcher: HookDispatcher, context: HookContext) -> None:
for key in strategies:
for hook in dispatcher.get_all_by_name(f"before_generate_{key}"):
# Get the strategy on each hook to pass the first hook output as an input to the next one
strategy = strategies[key]
strategies[key] = hook(context, strategy)
|
the-stack_106_17575
|
#!/usr/bin/env python3
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/..'))
from asyncdbus import MessageBus
import anyio
async def main():
async with MessageBus().connect() as bus:
# the introspection xml would normally be included in your project, but
# this is convenient for development
introspection = await bus.introspect('org.mpris.MediaPlayer2.vlc',
'/org/mpris/MediaPlayer2')
obj = await bus.get_proxy_object('org.mpris.MediaPlayer2.vlc', '/org/mpris/MediaPlayer2',
introspection)
player = await obj.get_interface('org.mpris.MediaPlayer2.Player')
properties = await obj.get_interface('org.freedesktop.DBus.Properties')
# call methods on the interface (this causes the media player to play)
await player.call_play()
volume = await player.get_volume()
print(f'current volume: {volume}, setting to 0.5')
await player.set_volume(0.5)
# listen to signals
def on_properties_changed(interface_name, changed_properties, invalidated_properties):
for changed, variant in changed_properties.items():
print(f'property changed: {changed} - {variant.value}')
properties.on_properties_changed(on_properties_changed)
await anyio.sleep(99999)
anyio.run(main)
|
the-stack_106_17576
|
import uuid
from datetime import datetime
from django.test import TestCase
from pillowtop.es_utils import initialize_index_and_mapping
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.userreports.app_manager.helpers import clean_table_name
from corehq.apps.userreports.models import DataSourceConfiguration
from corehq.apps.userreports.pillow import get_location_pillow
from corehq.apps.userreports.tasks import rebuild_indicators
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.apps.users.models import CommCareUser
from corehq.elastic import get_es_new
from corehq.pillows.mappings.user_mapping import USER_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted
from corehq.util.test_utils import trap_extra_setup
class TestLocationDataSource(TestCase):
domain = "delos_corp"
def setUp(self):
self.domain_obj = create_domain(self.domain)
es = get_es_new()
initialize_index_and_mapping(es, USER_INDEX_INFO)
self.region = LocationType.objects.create(domain=self.domain, name="region")
self.town = LocationType.objects.create(domain=self.domain, name="town", parent_type=self.region)
self.data_source_config = DataSourceConfiguration(
domain=self.domain,
display_name='Locations in Westworld',
referenced_doc_type='Location',
table_id=clean_table_name(self.domain, str(uuid.uuid4().hex)),
configured_filter={},
configured_indicators=[{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": "name"
},
"column_id": "location_name",
"display_name": "location_name",
"datatype": "string"
}],
)
self.data_source_config.validate()
self.data_source_config.save()
self.pillow = get_location_pillow(ucr_configs=[self.data_source_config])
self.pillow.get_change_feed().get_latest_offsets()
def tearDown(self):
ensure_index_deleted(USER_INDEX_INFO.index)
self.domain_obj.delete()
self.data_source_config.delete()
def _make_loc(self, name, location_type):
return SQLLocation.objects.create(
domain=self.domain, name=name, site_code=name, location_type=location_type)
def assertDataSourceAccurate(self, expected_locations):
adapter = get_indicator_adapter(self.data_source_config)
query = adapter.get_query_object()
data_source = query.all()
self.assertItemsEqual(
expected_locations,
[row[-1] for row in data_source]
)
def test_location_data_source(self):
self._make_loc("Westworld", self.region)
sweetwater = self._make_loc("Sweetwater", self.town)
las_mudas = self._make_loc("Las Mudas", self.town)
rebuild_indicators(self.data_source_config._id)
self.assertDataSourceAccurate(["Westworld", "Sweetwater", "Las Mudas"])
# Insert new location
since = self.pillow.get_change_feed().get_latest_offsets()
self._make_loc("Blood Arroyo", self.town)
# Change an existing location
sweetwater.name = "Pariah"
sweetwater.save()
# Process both changes together and verify that they went through
self.pillow.process_changes(since=since, forever=False)
self.assertDataSourceAccurate(["Westworld", "Pariah", "Las Mudas", "Blood Arroyo"])
# Delete a location
since = self.pillow.get_change_feed().get_latest_offsets()
las_mudas.delete()
self.pillow.process_changes(since=since, forever=False)
self.assertDataSourceAccurate(["Westworld", "Pariah", "Blood Arroyo"])
|
the-stack_106_17581
|
"""
Know more, visit my Python tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
Dependencies:
tensorflow: 1.1.0
matplotlib
numpy
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
LR = 0.1
REAL_PARAMS = [1.2, 2.5]
INIT_PARAMS = [[5, 4],
[5, 1],
[2, 4.5]][2]
x = np.linspace(-1, 1, 200, dtype=np.float32) # x data
# Test (1): Visualize a simple linear function with two parameters,
# you can change LR to 1 to see the different pattern in gradient descent.
# y_fun = lambda a, b: a * x + b
# tf_y_fun = lambda a, b: a * x + b
# Test (2): Using Tensorflow as a calibrating tool for empirical formula like following.
# y_fun = lambda a, b: a * x**3 + b * x**2
# tf_y_fun = lambda a, b: a * x**3 + b * x**2
# Test (3): Most simplest two parameters and two layers Neural Net, and their local & global minimum,
# you can try different INIT_PARAMS set to visualize the gradient descent.
def y_fun(a, b):
return np.sin(b*np.cos(a*x))
def tf_y_fun(a, b):
return tf.sin(b*tf.cos(a*x))
noise = np.random.randn(200)/10
y = y_fun(*REAL_PARAMS) + noise # target
# tensorflow graph
a, b = [tf.Variable(initial_value=p, dtype=tf.float32) for p in INIT_PARAMS]
pred = tf_y_fun(a, b)
mse = tf.reduce_mean(tf.square(y-pred))
train_op = tf.train.GradientDescentOptimizer(LR).minimize(mse)
a_list, b_list, cost_list = [], [], []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for t in range(400):
a_, b_, mse_ = sess.run([a, b, mse])
a_list.append(a_)
b_list.append(b_)
cost_list.append(mse_) # record parameter changes
result, _ = sess.run([pred, train_op]) # training
# visualization codes:
print('a=', a_, 'b=', b_)
plt.figure(1)
plt.scatter(x, y, c='b') # plot data
plt.plot(x, result, 'r-', lw=2) # plot line fitting
# 3D cost figure
fig = plt.figure(2)
ax = Axes3D(fig)
a3D, b3D = np.meshgrid(np.linspace(-2, 7, 30), np.linspace(-2, 7, 30)) # parameter space
cost3D = np.array([np.mean(np.square(y_fun(a_, b_) - y)) for a_, b_ in zip(a3D.flatten(), b3D.flatten())])
cost3D = cost3D.reshape(a3D.shape)
ax.plot_surface(a3D, b3D, cost3D, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'), alpha=0.5)
ax.scatter(a_list[0], b_list[0], zs=cost_list[0], s=300, c='r') # initial parameter place
ax.set_xlabel('a')
ax.set_ylabel('b')
ax.plot(a_list, b_list, zs=cost_list, zdir='z', c='r', lw=3) # plot 3D gradient descent
plt.show()
sess.close()
|
the-stack_106_17582
|
# -*- coding: utf-8 -*-
import json
import urllib.request
headers = {
"Host": "flights.ctrip.com",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Referer": "http://flights.ctrip.com/booking/SHA-BJS-day-1.html?DDate1=2018-2-16",
"Connection": "keep-alive",
}
url = "http://flights.ctrip.com/domesticsearch/search/SearchFirstRouteFlights?DCity1=SHA&ACity1=CTU&SearchType=S&DDate1=2018-03-05"
res = urllib.request.Request(url, headers=headers)
data = urllib.request.urlopen(res).read().decode("gb2312") # 获取json数据
jsondata = json.loads(data) # 将json数据解码成python字典
fh = open("E:/allFile/Python_Project/Spider/ctripSpider_Normal/data/data.json",
"w", encoding="utf-8")
airLineMarketingDict = {} # 航空公司信息
for key in jsondata["als"]:
airLineMarketingDict[key] = jsondata["als"][key]
spaceDict = {
"F": "头等舱",
"Y": "经济舱",
"C": "公务舱",
}
i = 1
for eachinfo in jsondata["fis"]:
airLineMarketing = eachinfo["alc"] # 航空公司
takeoffOTD = json.loads(eachinfo["confort"])["HistoryPunctuality"] # 出发准点率
arriveOTD = json.loads(eachinfo["confort"])[
"HistoryPunctualityArr"] # 到达准点率
flightNumber = eachinfo["fn"] # 航班号
takeoffPlace = eachinfo["dpbn"] # 起飞机场
takeoffTime = eachinfo["dt"] # 起飞时间
arrivePlace = eachinfo["apbn"] # 到达机场
arriveTime = eachinfo["at"] # 到达时间
shareState = eachinfo["sdft"] # 共享航班状态
planType1 = eachinfo["cf"]["c"] # 飞机型号
planType2 = eachinfo["cf"]["dn"] # 飞机型号备注
planTypeSize = eachinfo["cf"]["s"] # 飞机大小
if (eachinfo["sts"]):
transfer = {} # 转机信息
for transferlist in eachinfo["sts"]:
transfer["转机到达时间"] = transferlist["at"] # 转机到达时间
transfer["转机起飞时间"] = transferlist["dt"] # 转机起飞时间
transfer["转机地点"] = transferlist["cn"] # 转机地点
basicInfo = {
"航空公司": airLineMarketingDict.get(airLineMarketing),
"航班号": flightNumber,
"起飞机场": takeoffPlace,
"起飞时间": takeoffTime,
"达到机场": arrivePlace,
"到达时间": arriveTime,
"出发准点率": takeoffOTD,
"到达准点率": arriveOTD,
"飞机型号": planType1 + "(" + planTypeSize + ")",
"飞机型号备注": planType2,
"共享航班状态": shareState,
}
if (transfer):
basicInfo = dict(basicInfo, **transfer) # 合并字典
fh.write('——————————————————————第'+str(i)+'个航班——————————————————'+'\n')
#print("——————————————————————第%d个航班——————————————————" %i)
fh.write(json.dumps(basicInfo, ensure_ascii=False)+'\n')
# print(basicInfo)
i += 1
fh.writelines('——————————————————————票价信息————————————————————'+'\n')
# print("——————————————————————票价信息————————————————————")
priceInfo = {} # 各票价信息
for pricelist in eachinfo["scs"]:
priceInfo["舱位类型"] = spaceDict.get(pricelist["c"]) # 舱位类型
priceInfo["舱位类型备注"] = pricelist["son"] # 舱位类型备注
priceInfo["折扣"] = pricelist["rt"] # 折扣
priceInfo["票价"] = pricelist["isair"]["p"] # 票价
priceInfo["退改手续费说明"] = pricelist["tgq"]["rrn"] # 退改手续费说明
priceInfo["产品退订费包含内容"] = pricelist["tgq"]["rfn"] # 产品退订费包含内容
priceInfo["产品签转条件"] = pricelist["tgq"]["edn"] # 产品签转条件
priceInfo["产品更改费"] = pricelist["tgq"]["mef"] # 产品更改费
if (pricelist["hotel"]):
priceInfo["机票所含酒店名称"] = pricelist["hotel"]["hn"] # 机票所含酒店名称
priceInfo["机票所含酒店地址"] = pricelist["hotel"]["ads"] # 机票所含酒店地址
priceInfo["机票所含酒店地址备注"] = pricelist["hotel"]["bd"] # 机票所含酒店地址备注
priceInfo["酒店电话"] = pricelist["hotel"]["tel"] # 酒店电话
priceInfo["酒店星级"] = pricelist["hotel"]["star"] # 酒店星级
for roomlist in pricelist["hotel"]["rooms"]:
priceInfo["酒店房间类型"] = roomlist["name"] # 酒店房间类型
priceInfo["酒店房间类型备注"] = roomlist["bed"] # 酒店房间类型备注
priceInfo["入住时间"] = roomlist["ci"] # 入住时间
priceInfo["离店时间"] = roomlist["co"] # 离店时间
fh.writelines(json.dumps(priceInfo, ensure_ascii=False)+'\n')
# print(priceInfo)
# print("\n")
priceInfo.clear()
transfer.clear()
fh.close()
#
|
the-stack_106_17584
|
def quicksort(lista, inicio=0, fim=None):
if fim is None:
fim = len(lista) - 1
if inicio < fim:
p = lista[fim]
indice = inicio
for j in range(inicio, fim):
if lista[j] <= p:
lista[j], lista[indice] = lista[indice], lista[j]
indice += 1
lista[indice], lista[fim] = lista[fim], lista[indice]
quicksort(lista, inicio, indice-1)
quicksort(lista, indice + 1, fim)
if __name__ == "__main__":
lista = [2, 1, 3, 4, 6, 5]
quicksort(lista)
print(lista)
|
the-stack_106_17585
|
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
predictions = model(x_train[:1]).numpy()
print(predictions)
print(tf.nn.softmax(predictions).numpy())
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
print(loss_fn)
loss_fn(y_train[:1], predictions).numpy()
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
probability_model = tf.keras.Sequential([
model,
tf.keras.layers.Softmax()
])
print(probability_model(x_test[:5]))
|
the-stack_106_17586
|
import importlib.abc
import importlib.util
import os
import platform
import re
import string
import sys
import tokenize
import traceback
import webbrowser
from tkinter import *
from tkinter.font import Font
from tkinter.ttk import Scrollbar
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
from idlelib.config import idleConf
from idlelib import configdialog
from idlelib import grep
from idlelib import help
from idlelib import help_about
from idlelib import macosx
from idlelib.multicall import MultiCallCreator
from idlelib import pyparse
from idlelib import query
from idlelib import replace
from idlelib import search
from idlelib.tree import wheel_event
from idlelib import window
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
_py_version = ' (%s)' % platform.python_version()
darwin = sys.platform == 'darwin'
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
class EditorWindow(object):
from idlelib.percolator import Percolator
from idlelib.colorizer import ColorDelegator, color_config
from idlelib.undo import UndoDelegator
from idlelib.iomenu import IOBinding, encoding
from idlelib import mainmenu
from idlelib.statusbar import MultiStatusBar
from idlelib.autocomplete import AutoComplete
from idlelib.autoexpand import AutoExpand
from idlelib.calltip import Calltip
from idlelib.codecontext import CodeContext
from idlelib.sidebar import LineNumbers
from idlelib.format import FormatParagraph, FormatRegion, Indents, Rstrip
from idlelib.parenmatch import ParenMatch
from idlelib.squeezer import Squeezer
from idlelib.zoomheight import ZoomHeight
filesystemencoding = sys.getfilesystemencoding() # for file names
help_url = None
allow_code_context = True
allow_line_numbers = True
def __init__(self, flist=None, filename=None, key=None, root=None):
# Delay import: runscript imports pyshell imports EditorWindow.
from idlelib.runscript import ScriptBinding
if EditorWindow.help_url is None:
dochome = os.path.join(sys.base_prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.base_prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif sys.platform == 'darwin':
# documentation may be stored inside a python framework
dochome = os.path.join(sys.base_prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = ("https://docs.python.org/%d.%d/"
% sys.version_info[:2])
self.flist = flist
root = root or flist.root
self.root = root
self.menubar = Menu(root)
self.top = top = window.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configdialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = idleConf.userdir and os.path.join(
idleConf.userdir, 'recent-files.lst')
self.prompt_last_line = '' # Override in PyShell
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
width = idleConf.GetOption('main', 'EditorWindow', 'width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'highlightthickness': 0,
'width': width,
'tabstyle': 'wordprocessor', # new in 8.5
'height': idleConf.GetOption(
'main', 'EditorWindow', 'height', type='int'),
}
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosx.isAquaTk():
# Command-W on editor windows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button, so use
# control-click for popup context menus there. For two
# buttons, AquaTk defines <2> as the right button, not <3>.
text.bind("<Control-Button-1>",self.right_menu_event)
text.bind("<2>", self.right_menu_event)
else:
# Elsewhere, use right-click for popup menus.
text.bind("<3>",self.right_menu_event)
text.bind('<MouseWheel>', wheel_event)
text.bind('<Button-4>', wheel_event)
text.bind('<Button-5>', wheel_event)
text.bind('<Configure>', self.handle_winconfig)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module_event)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
self.fregion = fregion = self.FormatRegion(self)
# self.fregion used in smart_indent_event to access indent_region.
text.bind("<<indent-region>>", fregion.indent_region_event)
text.bind("<<dedent-region>>", fregion.dedent_region_event)
text.bind("<<comment-region>>", fregion.comment_region_event)
text.bind("<<uncomment-region>>", fregion.uncomment_region_event)
text.bind("<<tabify-region>>", fregion.tabify_region_event)
text.bind("<<untabify-region>>", fregion.untabify_region_event)
indents = self.Indents(self)
text.bind("<<toggle-tabs>>", indents.toggle_tabs_event)
text.bind("<<change-indentwidth>>", indents.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_module_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
text.bind("<<open-turtle-demo>>", self.open_turtle_demo)
self.set_status_bar()
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text_frame.rowconfigure(1, weight=1)
text_frame.columnconfigure(1, weight=1)
vbar['command'] = self.handle_yview
vbar.grid(row=1, column=2, sticky=NSEW)
text['yscrollcommand'] = vbar.set
text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow')
text.grid(row=1, column=1, sticky=NSEW)
text.focus_set()
self.set_width()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent',
'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# Store the current value of the insertofftime now so we can restore
# it if needed.
if not hasattr(idleConf, 'blink_off_time'):
idleConf.blink_off_time = self.text['insertofftime']
self.update_cursor_blink()
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
self.good_load = False
self.set_indentation_params(False)
self.color = None # initialized below in self.ResetColorizer
self.code_context = None # optionally initialized later below
self.line_numbers = None # optionally initialized later below
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
if io.loadfile(filename):
self.good_load = True
is_py_src = self.ispythonsource(filename)
self.set_indentation_params(is_py_src)
else:
io.set_filename(filename)
self.good_load = True
self.ResetColorizer()
self.saved_change_hook()
self.update_recent_files_list()
self.load_extensions()
menu = self.menudict.get('window')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
window.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
# Add pseudoevents for former extension fixed keys.
# (This probably needs to be done once in the process.)
text.event_add('<<autocomplete>>', '<Key-Tab>')
text.event_add('<<try-open-completions>>', '<KeyRelease-period>',
'<KeyRelease-slash>', '<KeyRelease-backslash>')
text.event_add('<<try-open-calltip>>', '<KeyRelease-parenleft>')
text.event_add('<<refresh-calltip>>', '<KeyRelease-parenright>')
text.event_add('<<paren-closed>>', '<KeyRelease-parenright>',
'<KeyRelease-bracketright>', '<KeyRelease-braceright>')
# Former extension bindings depends on frame.text being packed
# (called from self.ResetColorizer()).
autocomplete = self.AutoComplete(self)
text.bind("<<autocomplete>>", autocomplete.autocomplete_event)
text.bind("<<try-open-completions>>",
autocomplete.try_open_completions_event)
text.bind("<<force-open-completions>>",
autocomplete.force_open_completions_event)
text.bind("<<expand-word>>", self.AutoExpand(self).expand_word_event)
text.bind("<<format-paragraph>>",
self.FormatParagraph(self).format_paragraph_event)
parenmatch = self.ParenMatch(self)
text.bind("<<flash-paren>>", parenmatch.flash_paren_event)
text.bind("<<paren-closed>>", parenmatch.paren_closed_event)
scriptbinding = ScriptBinding(self)
text.bind("<<check-module>>", scriptbinding.check_module_event)
text.bind("<<run-module>>", scriptbinding.run_module_event)
text.bind("<<run-custom>>", scriptbinding.run_custom_event)
text.bind("<<do-rstrip>>", self.Rstrip(self).do_rstrip)
self.ctip = ctip = self.Calltip(self)
text.bind("<<try-open-calltip>>", ctip.try_open_calltip_event)
#refresh-calltip must come after paren-closed to work right
text.bind("<<refresh-calltip>>", ctip.refresh_calltip_event)
text.bind("<<force-open-calltip>>", ctip.force_open_calltip_event)
text.bind("<<zoom-height>>", self.ZoomHeight(self).zoom_height_event)
if self.allow_code_context:
self.code_context = self.CodeContext(self)
text.bind("<<toggle-code-context>>",
self.code_context.toggle_code_context_event)
else:
self.update_menu_state('options', '*Code Context', 'disabled')
if self.allow_line_numbers:
self.line_numbers = self.LineNumbers(self)
if idleConf.GetOption('main', 'EditorWindow',
'line-numbers-default', type='bool'):
self.toggle_line_numbers_event()
text.bind("<<toggle-line-numbers>>", self.toggle_line_numbers_event)
else:
self.update_menu_state('options', '*Line Numbers', 'disabled')
def handle_winconfig(self, event=None):
self.set_width()
def set_width(self):
text = self.text
inner_padding = sum(map(text.tk.getint, [text.cget('border'),
text.cget('padx')]))
pixel_width = text.winfo_width() - 2 * inner_padding
# Divide the width of the Text widget by the font width,
# which is taken to be the width of '0' (zero).
# http://www.tcl.tk/man/tcl8.6/TkCmd/text.htm#M21
zero_char_width = \
Font(text, font=text.cget('font')).measure('0')
self.width = pixel_width // zero_char_width
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return None
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
# there was no previous selection
self.text.mark_set("my_anchor", "insert")
else:
if self.text.compare(self.text.index("sel.first"), "<",
self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
sep = Frame(self.top, height=1, borderwidth=1, background='grey75')
if sys.platform == "darwin":
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
sep.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name, tearoff=0)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosx.isCarbonTk():
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple',
tearoff=0)
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.recent_files_menu = Menu(self.menubar, tearoff=0)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Window menu exists
menu = self.menudict['window']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
window.add_windows_to_menu(menu)
def update_menu_label(self, menu, index, label):
"Update label for menu item at index."
menuitem = self.menudict[menu]
menuitem.entryconfig(index, label=label)
def update_menu_state(self, menu, index, state):
"Update state for menu item at index."
menuitem = self.menudict[menu]
menuitem.entryconfig(index, state=state)
def handle_yview(self, event, *args):
"Handle scrollbar."
if event == 'moveto':
fraction = float(args[0])
lines = (round(self.getlineno('end') * fraction) -
self.getlineno('@0,0'))
event = 'scroll'
args = (lines, 'units')
self.text.yview(event, *args)
return 'break'
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for item in self.rmenu_specs:
try:
label, eventname, verify_state = item
except ValueError: # see issue1207589
continue
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
return "break"
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for item in self.rmenu_specs:
label, eventname = item[0], item[1]
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
"Handle Help 'About IDLE' event."
# Synchronize with macosx.overrideRootMenu.about_dialog.
help_about.AboutDialog(self.top)
return "break"
def config_dialog(self, event=None):
"Handle Options 'Configure IDLE' event."
# Synchronize with macosx.overrideRootMenu.config_dialog.
configdialog.ConfigDialog(self.top,'Settings')
return "break"
def help_dialog(self, event=None):
"Handle Help 'IDLE Help' event."
# Synchronize with macosx.overrideRootMenu.help_dialog.
if self.root:
parent = self.root
else:
parent = self.top
help.show_idlehelp(parent)
return "break"
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return None
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
return "break"
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
search.find(self.text)
return "break"
def find_again_event(self, event):
search.find_again(self.text)
return "break"
def find_selection_event(self, event):
search.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
grep.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
replace.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
return "break"
def open_module(self):
"""Get module name from user and open it.
Return module path or None for calls by open_module_browser
when latter is not invoked in named editor window.
"""
# XXX This, open_module_browser, and open_path_browser
# would fit better in iomenu.IOBinding.
try:
name = self.text.get("sel.first", "sel.last").strip()
except TclError:
name = ''
file_path = query.ModuleName(
self.text, "Open Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
name).result
if file_path is not None:
if self.flist:
self.flist.open(file_path)
else:
self.io.loadfile(file_path)
return file_path
def open_module_event(self, event):
self.open_module()
return "break"
def open_module_browser(self, event=None):
filename = self.io.filename
if not (self.__class__.__name__ == 'PyShellEditorWindow'
and filename):
filename = self.open_module()
if filename is None:
return "break"
from idlelib import browser
browser.ModuleBrowser(self.root, filename)
return "break"
def open_path_browser(self, event=None):
from idlelib import pathbrowser
pathbrowser.PathBrowser(self.root)
return "break"
def open_turtle_demo(self, event = None):
import subprocess
cmd = [sys.executable,
'-c',
'from turtledemo.__main__ import main; main()']
subprocess.Popen(cmd, shell=False)
return "break"
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
line = self.text.get('1.0', '1.0 lineend')
return line.startswith('#!') and 'python' in line
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the color theme"
# Called from self.filename_change_hook and from configdialog.py
self._rmcolorizer()
self._addcolorizer()
EditorWindow.color_config(self.text)
if self.code_context is not None:
self.code_context.update_highlight_colors()
if self.line_numbers is not None:
self.line_numbers.update_colors()
IDENTCHARS = string.ascii_letters + string.digits + "_"
def colorize_syntax_error(self, text, pos):
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in self.IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def update_cursor_blink(self):
"Update the cursor blink configuration."
cursorblink = idleConf.GetOption(
'main', 'EditorWindow', 'cursor-blink', type='bool')
if not cursorblink:
self.text['insertofftime'] = 0
else:
# Restore the original value
self.text['insertofftime'] = idleConf.blink_off_time
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configdialog.py
# Update the code context widget first, since its height affects
# the height of the text widget. This avoids double re-rendering.
if self.code_context is not None:
self.code_context.update_font()
# Next, update the line numbers widget, since its width affects
# the width of the text widget.
if self.line_numbers is not None:
self.line_numbers.update_font()
# Finally, update the main text widget.
new_font = idleConf.GetFont(self.root, 'main', 'EditorWindow')
self.text['font'] = new_font
self.set_width()
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configdialog.py
self.mainmenu.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configdialog.py
self.mainmenu.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.mainmenu.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict:
menu = self.menudict[menubarItem]
end = menu.index(END)
if end is None:
# Skip empty menus
continue
end += 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configdialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
# TODO: move to iomenu.
rf_list = []
file_path = self.recent_files_path
if file_path and os.path.exists(file_path):
with open(file_path, 'r',
encoding='utf_8', errors='replace') as rf_list_file:
rf_list = rf_list_file.readlines()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
if file_path:
try:
with open(file_path, 'w',
encoding='utf_8', errors='replace') as rf_file:
rf_file.writelines(rf_list)
except OSError as err:
if not getattr(self.root, "recentfiles_message", False):
self.root.recentfiles_message = True
tkMessageBox.showwarning(title='IDLE Warning',
message="Cannot save Recent Files list to disk.\n"
f" {err}\n"
"Select OK to continue.",
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + file_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long + _py_version
elif short:
title = short
elif long:
title = long
else:
title = "untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
return os.path.basename(filename) if filename else "untitled"
def long_title(self):
return self.io.filename or ""
def center_insert_event(self, event):
self.center()
return "break"
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
return list(map(int, m.groups()))
def close_event(self, event):
self.close()
return "break"
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
try:
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
except AttributeError: # bpo-35379: close called twice
pass
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
window.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close()
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in list(self.extensions.values()):
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print("Failed to load extension", repr(name))
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
extfiles = { # Map built-in config-extension section names to file names.
'ZzDummy': 'zzdummy',
}
def load_extension(self, name):
fname = self.extfiles.get(name, name)
try:
try:
mod = importlib.import_module('.' + fname, package=__package__)
except (ImportError, TypeError):
mod = importlib.import_module(fname)
except ImportError:
print("\nFailed to import extension: ", name)
raise
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs:
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.mainmenu.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.mainmenu.menudefs
if keydefs is None:
keydefs = self.mainmenu.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError(name)
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError(name)
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tk_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tk_tabwidth(self, newtabwidth):
text = self.text
if self.get_tk_tabwidth() != newtabwidth:
# Set text widget tab width
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
### begin autoindent code ### (configuration was moved to beginning of class)
def set_indentation_params(self, is_py_src, guess=True):
if is_py_src and guess:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tk_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
ncharsdeleted = 0
while 1:
if chars == self.prompt_last_line: # '' unless PyShell
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.fregion.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = get_line_indent(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
"""Insert a newline and indentation after Enter keypress event.
Properly position the cursor on the new line based on information
from the current line. This takes into account if the current line
is a shell prompt, is empty, has selected text, contains a block
opener, contains a block closer, is a continuation line, or
is inside a string.
"""
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try: # Close undo block and expose new line in finally clause.
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
# Count leading whitespace for indent size.
i, n = 0, len(line)
while i < n and line[i] in " \t":
i += 1
if i == n:
# The cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start.
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# Strip whitespace before insert point unless it's in the prompt.
i = 0
while line and line[-1] in " \t" and line != self.prompt_last_line:
line = line[:-1]
i += 1
if i:
text.delete("insert - %d chars" % i, "insert")
# Strip whitespace after insert point.
while text.get("insert") in " \t":
text.delete("insert")
# Insert new line.
text.insert("insert", '\n')
# Adjust indentation for continuations and block open/close.
# First need to find the last statement.
lno = index2line(text.index('insert'))
y = pyparse.Parser(self.indentwidth, self.tabwidth)
if not self.prompt_last_line:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_code(rawtext)
bod = y.find_good_parse_start(
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_code(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != pyparse.C_NONE:
# The current statement hasn't ended yet.
if c == pyparse.C_STRING_FIRST_LINE:
# After the first line of a string do not indent at all.
pass
elif c == pyparse.C_STRING_NEXT_LINES:
# Inside a string which started before this line;
# just mimic the current indent.
text.insert("insert", indent)
elif c == pyparse.C_BRACKET:
# Line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket.
self.reindent_to(y.compute_bracket_indent())
elif c == pyparse.C_BACKSLASH:
# If more than one line in this statement already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line.
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new statement; indent relative to
# indentation of initial line of closest preceding
# interesting statement.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides an is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = get_line_indent(opener, self.tabwidth)
raw, indentlarge = get_line_indent(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
def toggle_line_numbers_event(self, event=None):
if self.line_numbers is None:
return
if self.line_numbers.is_shown:
self.line_numbers.hide_sidebar()
menu_label = "Show"
else:
self.line_numbers.show_sidebar()
menu_label = "Hide"
self.update_menu_label(menu='options', index='*Line Numbers',
label=f'{menu_label} Line Numbers')
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
_line_indent_re = re.compile(r'[ \t]*')
def get_line_indent(line, tabwidth):
"""Return a line's indentation as (# chars, effective # of spaces).
The effective # of spaces is the length after properly "expanding"
the tabs into spaces, as done by str.expandtabs(tabwidth).
"""
m = _line_indent_re.match(line)
return m.end(), len(m.group().expandtabs(tabwidth))
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=tokenize.INDENT,
NAME=tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = tokenize.tabsize
tokenize.tabsize = self.tabwidth
try:
try:
tokens = tokenize.generate_tokens(self.readline)
for token in tokens:
self.tokeneater(*token)
except (tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosx.isCocoaTk() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # [email protected]
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# On Windows, tcl/tk breaks 'words' only on spaces, as in Command Prompt.
# We want Motif style everywhere. See #21474, msg218992 and followup.
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', r'\w')
tk.call('set', 'tcl_nonwordchars', r'\W')
def _editor_window(parent): # htest #
# error if close master window first - timer event, after script
root = parent
fixwordbreaks(root)
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
macosx.setupApp(root, None)
edit = EditorWindow(root=root, filename=filename)
text = edit.text
text['height'] = 10
for i in range(20):
text.insert('insert', ' '*i + str(i) + '\n')
# text.bind("<<close-all-windows>>", edit.close_event)
# Does not stop error, neither does following
# edit.text.bind("<<close-window>>", edit.close_event)
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_editor', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_editor_window)
|
the-stack_106_17587
|
from flask import Flask, render_template, request, url_for, redirect
import csv
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/<string:page_name>')
def page_name(page_name ):
return render_template(page_name)
def write_to_file(data):
with open('database.txt', mode='a') as database:
email = data['email']
subject = data['subject']
message = data['message']
file = database.write(f'\n {email}, {subject}, {message}')
def write_to_csv(data):
with open('database.csv', mode='a', newline='') as database2:
email = data['email']
subject = data['subject']
message = data['message']
csv_writer = csv.writer(database2, delimiter = ',', quotechar = '"', quoting = csv. QUOTE_MINIMAL)
csv_writer.writerow([email,subject,message])
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_form():
if request.method == 'POST':
data = request.form.to_dict()
write_to_csv(data)
write_to_file(data)
return redirect('/thank.html')
else:
return 'Something went wrong!!please try again!!'
|
the-stack_106_17588
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import re
import time
import token
from tokenize import generate_tokens, untokenize
from robot.utils import StringIO
from robot.api import logger
from robot.errors import (ContinueForLoop, DataError, ExecutionFailed,
ExecutionFailures, ExecutionPassed, ExitForLoop,
PassExecution, ReturnFromKeyword)
from robot.running import Keyword, RUN_KW_REGISTER
from robot.running.context import EXECUTION_CONTEXTS
from robot.running.usererrorhandler import UserErrorHandler
from robot.utils import (asserts, DotDict, escape, format_assign_message,
get_error_message, get_time, is_falsy, is_integer,
is_string, is_truthy, is_unicode, JYTHON, Matcher,
normalize, NormalizedDict, parse_time, prepr,
RERAISED_EXCEPTIONS, plural_or_not as s,
secs_to_timestr, seq2str, split_from_equals,
timestr_to_secs, type_name, unic)
from robot.variables import (is_list_var, is_var, DictVariableTableValue,
VariableTableValue, VariableSplitter,
variable_not_found)
from robot.version import get_version
if JYTHON:
from java.lang import String, Number
# TODO: The name of this decorator should be changed. It is used for avoiding
# arguments to be resolved by many other keywords than run keyword variants.
# Should also consider:
# - Exposing this functionality to external libraries. Would require doc
# enhancements and clean way to expose variables to make resolving them
# based on needs easier.
# - Removing the functionality that run keyword variants can be overridded
# by custom keywords without a warning.
def run_keyword_variant(resolve):
def decorator(method):
RUN_KW_REGISTER.register_run_keyword('BuiltIn', method.__name__, resolve)
return method
return decorator
class _BuiltInBase(object):
@property
def _context(self):
return self._get_context()
def _get_context(self, top=False):
ctx = EXECUTION_CONTEXTS.current if not top else EXECUTION_CONTEXTS.top
if ctx is None:
raise RobotNotRunningError('Cannot access execution context')
return ctx
@property
def _namespace(self):
return self._get_context().namespace
def _get_namespace(self, top=False):
return self._get_context(top).namespace
@property
def _variables(self):
return self._namespace.variables
def _matches(self, string, pattern):
# Must use this instead of fnmatch when string may contain newlines.
matcher = Matcher(pattern, caseless=False, spaceless=False)
return matcher.match(string)
def _is_true(self, condition):
if is_string(condition):
condition = self.evaluate(condition, modules='os,sys')
return bool(condition)
def _log_types(self, *args):
self._log_types_at_level('DEBUG', *args)
def _log_types_at_level(self, level, *args):
msg = ["Argument types are:"] + [self._get_type(a) for a in args]
self.log('\n'.join(msg), level)
def _get_type(self, arg):
# In IronPython type(u'x') is str. We want to report unicode anyway.
if is_unicode(arg):
return "<type 'unicode'>"
return str(type(arg))
class _Converter(_BuiltInBase):
def convert_to_integer(self, item, base=None):
"""Converts the given item to an integer number.
If the given item is a string, it is by default expected to be an
integer in base 10. There are two ways to convert from other bases:
- Give base explicitly to the keyword as ``base`` argument.
- Prefix the given string with the base so that ``0b`` means binary
(base 2), ``0o`` means octal (base 8), and ``0x`` means hex (base 16).
The prefix is considered only when ``base`` argument is not given and
may itself be prefixed with a plus or minus sign.
The syntax is case-insensitive and possible spaces are ignored.
Examples:
| ${result} = | Convert To Integer | 100 | | # Result is 100 |
| ${result} = | Convert To Integer | FF AA | 16 | # Result is 65450 |
| ${result} = | Convert To Integer | 100 | 8 | # Result is 64 |
| ${result} = | Convert To Integer | -100 | 2 | # Result is -4 |
| ${result} = | Convert To Integer | 0b100 | | # Result is 4 |
| ${result} = | Convert To Integer | -0x100 | | # Result is -256 |
See also `Convert To Number`, `Convert To Binary`, `Convert To Octal`,
`Convert To Hex`, and `Convert To Bytes`.
"""
self._log_types(item)
return self._convert_to_integer(item, base)
def _convert_to_integer(self, orig, base=None):
try:
item = self._handle_java_numbers(orig)
item, base = self._get_base(item, base)
if base:
return int(item, self._convert_to_integer(base))
return int(item)
except:
raise RuntimeError("'%s' cannot be converted to an integer: %s"
% (orig, get_error_message()))
def _handle_java_numbers(self, item):
if not JYTHON:
return item
if isinstance(item, String):
return unic(item)
if isinstance(item, Number):
return item.doubleValue()
return item
def _get_base(self, item, base):
if not is_string(item):
return item, base
item = normalize(item)
if item.startswith(('-', '+')):
sign = item[0]
item = item[1:]
else:
sign = ''
bases = {'0b': 2, '0o': 8, '0x': 16}
if base or not item.startswith(tuple(bases)):
return sign+item, base
return sign+item[2:], bases[item[:2]]
def convert_to_binary(self, item, base=None, prefix=None, length=None):
"""Converts the given item to a binary string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a binary number (base 2) represented as a
string such as ``1011``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Binary | 10 | | | # Result is 1010 |
| ${result} = | Convert To Binary | F | base=16 | prefix=0b | # Result is 0b1111 |
| ${result} = | Convert To Binary | -2 | prefix=B | length=4 | # Result is -B0010 |
See also `Convert To Integer`, `Convert To Octal` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(bin, item, base, prefix, length)
def convert_to_octal(self, item, base=None, prefix=None, length=None):
"""Converts the given item to an octal string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to an octal number (base 8) represented as a
string such as ``775``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Octal | 10 | | | # Result is 12 |
| ${result} = | Convert To Octal | -F | base=16 | prefix=0 | # Result is -017 |
| ${result} = | Convert To Octal | 16 | prefix=oct | length=4 | # Result is oct0020 |
See also `Convert To Integer`, `Convert To Binary` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(oct, item, base, prefix, length)
def convert_to_hex(self, item, base=None, prefix=None, length=None,
lowercase=False):
"""Converts the given item to a hexadecimal string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a hexadecimal number (base 16) represented as
a string such as ``FF0A``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
By default the value is returned as an upper case string, but the
``lowercase`` argument a true value (see `Boolean arguments`) turns
the value (but not the given prefix) to lower case.
Examples:
| ${result} = | Convert To Hex | 255 | | | # Result is FF |
| ${result} = | Convert To Hex | -10 | prefix=0x | length=2 | # Result is -0x0A |
| ${result} = | Convert To Hex | 255 | prefix=X | lowercase=yes | # Result is Xff |
See also `Convert To Integer`, `Convert To Binary` and `Convert To Octal`.
"""
return self._convert_to_bin_oct_hex(hex, item, base, prefix, length,
lowercase)
def _convert_to_bin_oct_hex(self, method, item, base, prefix, length,
lowercase=False):
self._log_types(item)
ret = method(self._convert_to_integer(item, base)).upper().rstrip('L')
prefix = prefix or ''
if ret[0] == '-':
prefix = '-' + prefix
ret = ret[1:]
if len(ret) > 1: # oct(0) -> '0' (i.e. has no prefix)
prefix_length = {bin: 2, oct: 1, hex: 2}[method]
ret = ret[prefix_length:]
if length:
ret = ret.rjust(self._convert_to_integer(length), '0')
if is_truthy(lowercase):
ret = ret.lower()
return prefix + ret
def convert_to_number(self, item, precision=None):
"""Converts the given item to a floating point number.
If the optional ``precision`` is positive or zero, the returned number
is rounded to that number of decimal digits. Negative precision means
that the number is rounded to the closest multiple of 10 to the power
of the absolute precision.
Examples:
| ${result} = | Convert To Number | 42.512 | | # Result is 42.512 |
| ${result} = | Convert To Number | 42.512 | 1 | # Result is 42.5 |
| ${result} = | Convert To Number | 42.512 | 0 | # Result is 43.0 |
| ${result} = | Convert To Number | 42.512 | -1 | # Result is 40.0 |
Notice that machines generally cannot store floating point numbers
accurately. This may cause surprises with these numbers in general
and also when they are rounded. For more information see, for example,
these resources:
- http://docs.python.org/2/tutorial/floatingpoint.html
- http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition
If you need an integer number, use `Convert To Integer` instead.
"""
self._log_types(item)
return self._convert_to_number(item, precision)
def _convert_to_number(self, item, precision=None):
number = self._convert_to_number_without_precision(item)
if precision:
number = round(number, self._convert_to_integer(precision))
return number
def _convert_to_number_without_precision(self, item):
try:
if JYTHON:
item = self._handle_java_numbers(item)
return float(item)
except:
error = get_error_message()
try:
return float(self._convert_to_integer(item))
except RuntimeError:
raise RuntimeError("'%s' cannot be converted to a floating "
"point number: %s" % (item, error))
def convert_to_string(self, item):
"""Converts the given item to a Unicode string.
Uses ``__unicode__`` or ``__str__`` method with Python objects and
``toString`` with Java objects.
Use `Encode String To Bytes` and `Decode Bytes To String` keywords
in ``String`` library if you need to convert between Unicode and byte
strings using different encodings. Use `Convert To Bytes` if you just
want to create byte strings.
"""
self._log_types(item)
return self._convert_to_string(item)
def _convert_to_string(self, item):
return unic(item)
def convert_to_boolean(self, item):
"""Converts the given item to Boolean true or false.
Handles strings ``True`` and ``False`` (case-insensitive) as expected,
otherwise returns item's
[http://docs.python.org/2/library/stdtypes.html#truth|truth value]
using Python's ``bool()`` method.
"""
self._log_types(item)
if is_string(item):
if item.upper() == 'TRUE':
return True
if item.upper() == 'FALSE':
return False
return bool(item)
def convert_to_bytes(self, input, input_type='text'):
u"""Converts the given ``input`` to bytes according to the ``input_type``.
Valid input types are listed below:
- ``text:`` Converts text to bytes character by character. All
characters with ordinal below 256 can be used and are converted to
bytes with same values. Many characters are easiest to represent
using escapes like ``\\x00`` or ``\\xff``.
- ``int:`` Converts integers separated by spaces to bytes. Similarly as
with `Convert To Integer`, it is possible to use binary, octal, or
hex values by prefixing the values with ``0b``, ``0o``, or ``0x``,
respectively.
- ``hex:`` Converts hexadecimal values to bytes. Single byte is always
two characters long (e.g. ``01`` or ``FF``). Spaces are ignored and
can be used freely as a visual separator.
- ``bin:`` Converts binary values to bytes. Single byte is always eight
characters long (e.g. ``00001010``). Spaces are ignored and can be
used freely as a visual separator.
In addition to giving the input as a string, it is possible to use
lists or other iterables containing individual characters or numbers.
In that case numbers do not need to be padded to certain length and
they cannot contain extra spaces.
Examples (last column shows returned bytes):
| ${bytes} = | Convert To Bytes | hyv\xe4 | | # hyv\\xe4 |
| ${bytes} = | Convert To Bytes | \\xff\\x07 | | # \\xff\\x07 |
| ${bytes} = | Convert To Bytes | 82 70 | int | # RF |
| ${bytes} = | Convert To Bytes | 0b10 0x10 | int | # \\x02\\x10 |
| ${bytes} = | Convert To Bytes | ff 00 07 | hex | # \\xff\\x00\\x07 |
| ${bytes} = | Convert To Bytes | 5246212121 | hex | # RF!!! |
| ${bytes} = | Convert To Bytes | 0000 1000 | bin | # \\x08 |
| ${input} = | Create List | 1 | 2 | 12 |
| ${bytes} = | Convert To Bytes | ${input} | int | # \\x01\\x02\\x0c |
| ${bytes} = | Convert To Bytes | ${input} | hex | # \\x01\\x02\\x12 |
Use `Encode String To Bytes` in ``String`` library if you need to
convert text to bytes using a certain encoding.
New in Robot Framework 2.8.2.
"""
try:
try:
ordinals = getattr(self, '_get_ordinals_from_%s' % input_type)
except AttributeError:
raise RuntimeError("Invalid input type '%s'." % input_type)
return bytes(bytearray(o for o in ordinals(input)))
except:
raise RuntimeError("Creating bytes failed: %s" % get_error_message())
def _get_ordinals_from_text(self, input):
for char in input:
yield self._test_ordinal(ord(char), char, 'Character')
def _test_ordinal(self, ordinal, original, type):
if 0 <= ordinal <= 255:
return ordinal
raise RuntimeError("%s '%s' cannot be represented as a byte."
% (type, original))
def _get_ordinals_from_int(self, input):
if is_string(input):
input = input.split()
elif is_integer(input):
input = [input]
for integer in input:
ordinal = self._convert_to_integer(integer)
yield self._test_ordinal(ordinal, integer, 'Integer')
def _get_ordinals_from_hex(self, input):
for token in self._input_to_tokens(input, length=2):
ordinal = self._convert_to_integer(token, base=16)
yield self._test_ordinal(ordinal, token, 'Hex value')
def _get_ordinals_from_bin(self, input):
for token in self._input_to_tokens(input, length=8):
ordinal = self._convert_to_integer(token, base=2)
yield self._test_ordinal(ordinal, token, 'Binary value')
def _input_to_tokens(self, input, length):
if not is_string(input):
return input
input = ''.join(input.split())
if len(input) % length != 0:
raise RuntimeError('Expected input to be multiple of %d.' % length)
return (input[i:i+length] for i in range(0, len(input), length))
def create_list(self, *items):
"""Returns a list containing given items.
The returned list can be assigned both to ``${scalar}`` and ``@{list}``
variables.
Examples:
| @{list} = | Create List | a | b | c |
| ${scalar} = | Create List | a | b | c |
| ${ints} = | Create List | ${1} | ${2} | ${3} |
"""
return list(items)
@run_keyword_variant(resolve=0)
def create_dictionary(self, *items):
"""Creates and returns a dictionary based on given items.
Items are given using ``key=value`` syntax same way as ``&{dictionary}``
variables are created in the Variable table. Both keys and values
can contain variables, and possible equal sign in key can be escaped
with a backslash like ``escaped\\=key=value``. It is also possible to
get items from existing dictionaries by simply using them like
``&{dict}``.
If same key is used multiple times, the last value has precedence.
The returned dictionary is ordered, and values with strings as keys
can also be accessed using convenient dot-access syntax like
``${dict.key}``.
Examples:
| &{dict} = | Create Dictionary | key=value | foo=bar |
| Should Be True | ${dict} == {'key': 'value', 'foo': 'bar'} |
| &{dict} = | Create Dictionary | ${1}=${2} | &{dict} | foo=new |
| Should Be True | ${dict} == {1: 2, 'key': 'value', 'foo': 'new'} |
| Should Be Equal | ${dict.key} | value |
This keyword was changed in Robot Framework 2.9 in many ways:
- Moved from ``Collections`` library to ``BuiltIn``.
- Support also non-string keys in ``key=value`` syntax.
- Deprecated old syntax to give keys and values separately.
- Returned dictionary is ordered and dot-accessible.
"""
separate, combined = self._split_dict_items(items)
if separate:
self.log("Giving keys and values separately to 'Create Dictionary' "
"keyword is deprecated. Use 'key=value' syntax instead.",
level='WARN')
separate = self._format_separate_dict_items(separate)
combined = DictVariableTableValue(combined).resolve(self._variables)
result = DotDict(separate)
result.update(combined)
return result
def _split_dict_items(self, items):
separate = []
for item in items:
name, value = split_from_equals(item)
if value is not None or VariableSplitter(item).is_dict_variable():
break
separate.append(item)
return separate, items[len(separate):]
def _format_separate_dict_items(self, separate):
separate = self._variables.replace_list(separate)
if len(separate) % 2 != 0:
raise DataError('Expected even number of keys and values, got %d.'
% len(separate))
return [separate[i:i+2] for i in range(0, len(separate), 2)]
class _Verify(_BuiltInBase):
def _set_and_remove_tags(self, tags):
set_tags = [tag for tag in tags if not tag.startswith('-')]
remove_tags = [tag[1:] for tag in tags if tag.startswith('-')]
if remove_tags:
self.remove_tags(*remove_tags)
if set_tags:
self.set_tags(*set_tags)
def fail(self, msg=None, *tags):
"""Fails the test with the given message and optionally alters its tags.
The error message is specified using the ``msg`` argument.
It is possible to use HTML in the given error message, similarly
as with any other keyword accepting an error message, by prefixing
the error with ``*HTML*``.
It is possible to modify tags of the current test case by passing tags
after the message. Tags starting with a hyphen (e.g. ``-regression``)
are removed and others added. Tags are modified using `Set Tags` and
`Remove Tags` internally, and the semantics setting and removing them
are the same as with these keywords.
Examples:
| Fail | Test not ready | | | # Fails with the given message. |
| Fail | *HTML*<b>Test not ready</b> | | | # Fails using HTML in the message. |
| Fail | Test not ready | not-ready | | # Fails and adds 'not-ready' tag. |
| Fail | OS not supported | -regression | | # Removes tag 'regression'. |
| Fail | My message | tag | -t* | # Removes all tags starting with 't' except the newly added 'tag'. |
See `Fatal Error` if you need to stop the whole test execution.
Support for modifying tags was added in Robot Framework 2.7.4 and
HTML message support in 2.8.
"""
self._set_and_remove_tags(tags)
raise AssertionError(msg) if msg else AssertionError()
def fatal_error(self, msg=None):
"""Stops the whole test execution.
The test or suite where this keyword is used fails with the provided
message, and subsequent tests fail with a canned message.
Possible teardowns will nevertheless be executed.
See `Fail` if you only want to stop one test case unconditionally.
"""
error = AssertionError(msg) if msg else AssertionError()
error.ROBOT_EXIT_ON_FAILURE = True
raise error
def should_not_be_true(self, condition, msg=None):
"""Fails if the given condition is true.
See `Should Be True` for details about how ``condition`` is evaluated
and how ``msg`` can be used to override the default error message.
"""
if not msg:
msg = "'%s' should not be true." % condition
asserts.fail_if(self._is_true(condition), msg)
def should_be_true(self, condition, msg=None):
"""Fails if the given condition is not true.
If ``condition`` is a string (e.g. ``${rc} < 10``), it is evaluated as
a Python expression as explained in `Evaluating expressions` and the
keyword status is decided based on the result. If a non-string item is
given, the status is got directly from its
[http://docs.python.org/2/library/stdtypes.html#truth|truth value].
The default error message (``<condition> should be true``) is not very
informative, but it can be overridden with the ``msg`` argument.
Examples:
| Should Be True | ${rc} < 10 |
| Should Be True | '${status}' == 'PASS' | # Strings must be quoted |
| Should Be True | ${number} | # Passes if ${number} is not zero |
| Should Be True | ${list} | # Passes if ${list} is not empty |
Variables used like ``${variable}``, as in the examples above, are
replaced in the expression before evaluation. Variables are also
available in the evaluation namespace and can be accessed using special
syntax ``$variable``. This is a new feature in Robot Framework 2.9
and it is explained more thoroughly in `Evaluating expressions`.
Examples:
| Should Be True | $rc < 10 |
| Should Be True | $status == 'PASS' | # Expected string must be quoted |
Starting from Robot Framework 2.8, `Should Be True` automatically
imports Python's [http://docs.python.org/2/library/os.html|os] and
[http://docs.python.org/2/library/sys.html|sys] modules that contain
several useful attributes:
| Should Be True | os.linesep == '\\n' | # Unixy |
| Should Be True | os.linesep == '\\r\\n' | # Windows |
| Should Be True | sys.platform == 'darwin' | # OS X |
| Should Be True | sys.platform.startswith('java') | # Jython |
"""
if not msg:
msg = "'%s' should be true." % condition
asserts.fail_unless(self._is_true(condition), msg)
def should_be_equal(self, first, second, msg=None, values=True):
"""Fails if the given objects are unequal.
Optional ``msg`` and ``values`` arguments specify how to construct
the error message if this keyword fails:
- If ``msg`` is not given, the error message is ``<first> != <second>``.
- If ``msg`` is given and ``values`` gets a true value, the error
message is ``<msg>: <first> != <second>``.
- If ``msg`` is given and ``values`` gets a false value, the error
message is simply ``<msg>``.
``values`` is true by default, but can be turned to false by using,
for example, string ``false`` or ``no values``. See `Boolean arguments`
section for more details.
If both arguments are multiline strings, the comparison is done using
`multiline string comparisons`.
"""
self._log_types_at_info_if_different(first, second)
self._should_be_equal(first, second, msg, values)
def _should_be_equal(self, first, second, msg, values):
if first == second:
return
include_values = self._include_values(values)
if include_values and is_string(first) and is_string(second):
self._raise_multi_diff(first, second)
asserts.fail_unless_equal(first, second, msg, include_values)
def _log_types_at_info_if_different(self, first, second):
level = 'DEBUG' if type(first) == type(second) else 'INFO'
self._log_types_at_level(level, first, second)
def _raise_multi_diff(self, first, second):
first_lines, second_lines = first.splitlines(), second.splitlines()
if len(first_lines) < 3 or len(second_lines) < 3:
return
self.log("%s\n!=\n%s" % (first, second))
err = 'Multiline strings are different:\n'
for line in difflib.unified_diff(first_lines, second_lines,
fromfile='first', tofile='second',
lineterm=''):
err += line + '\n'
raise AssertionError(err)
def _include_values(self, values):
return is_truthy(values) and str(values).upper() != 'NO VALUES'
def should_not_be_equal(self, first, second, msg=None, values=True):
"""Fails if the given objects are equal.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types_at_info_if_different(first, second)
self._should_not_be_equal(first, second, msg, values)
def _should_not_be_equal(self, first, second, msg, values):
asserts.fail_if_equal(first, second, msg, self._include_values(values))
def should_not_be_equal_as_integers(self, first, second, msg=None,
values=True, base=None):
"""Fails if objects are equal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
See `Should Be Equal As Integers` for some usage examples.
"""
self._log_types_at_info_if_different(first, second)
self._should_not_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_be_equal_as_integers(self, first, second, msg=None, values=True,
base=None):
"""Fails if objects are unequal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
Examples:
| Should Be Equal As Integers | 42 | ${42} | Error message |
| Should Be Equal As Integers | ABCD | abcd | base=16 |
| Should Be Equal As Integers | 0b1011 | 11 |
"""
self._log_types_at_info_if_different(first, second)
self._should_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_not_be_equal_as_numbers(self, first, second, msg=None,
values=True, precision=6):
"""Fails if objects are equal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given ``precision``.
See `Should Be Equal As Numbers` for examples on how to use
``precision`` and why it does not always work as expected. See also
`Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types_at_info_if_different(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_numbers(self, first, second, msg=None, values=True,
precision=6):
"""Fails if objects are unequal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given ``precision``.
Examples:
| Should Be Equal As Numbers | ${x} | 1.1 | | # Passes if ${x} is 1.1 |
| Should Be Equal As Numbers | 1.123 | 1.1 | precision=1 | # Passes |
| Should Be Equal As Numbers | 1.123 | 1.4 | precision=0 | # Passes |
| Should Be Equal As Numbers | 112.3 | 75 | precision=-2 | # Passes |
As discussed in the documentation of `Convert To Number`, machines
generally cannot store floating point numbers accurately. Because of
this limitation, comparing floats for equality is problematic and
a correct approach to use depends on the context. This keyword uses
a very naive approach of rounding the numbers before comparing them,
which is both prone to rounding errors and does not work very well if
numbers are really big or small. For more information about comparing
floats, and ideas on how to implement your own context specific
comparison algorithm, see
http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/.
See `Should Not Be Equal As Numbers` for a negative version of this
keyword and `Should Be Equal` for an explanation on how to override
the default error message with ``msg`` and ``values``.
"""
self._log_types_at_info_if_different(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_be_equal(first, second, msg, values)
def should_not_be_equal_as_strings(self, first, second, msg=None, values=True):
"""Fails if objects are equal after converting them to strings.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types_at_info_if_different(first, second)
first, second = [self._convert_to_string(i) for i in (first, second)]
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_strings(self, first, second, msg=None, values=True):
"""Fails if objects are unequal after converting them to strings.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
If both arguments are multiline strings, the comparison is done using
`multiline string comparisons`.
"""
self._log_types_at_info_if_different(first, second)
first, second = [self._convert_to_string(i) for i in (first, second)]
self._should_be_equal(first, second, msg, values)
def should_not_start_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` starts with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'starts with')
asserts.fail_if(str1.startswith(str2), msg)
def should_start_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` does not start with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'does not start with')
asserts.fail_unless(str1.startswith(str2), msg)
def should_not_end_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` ends with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'ends with')
asserts.fail_if(str1.endswith(str2), msg)
def should_end_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` does not end with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'does not end with')
asserts.fail_unless(str1.endswith(str2), msg)
def should_not_contain(self, item1, item2, msg=None, values=True):
"""Fails if ``item1`` contains ``item2`` one or more times.
Works with strings, lists, and anything that supports Python's ``in``
operator. See `Should Be Equal` for an explanation on how to override
the default error message with ``msg`` and ``values``.
Examples:
| Should Not Contain | ${output} | FAILED |
| Should Not Contain | ${some_list} | value |
"""
msg = self._get_string_msg(item1, item2, msg, values, 'contains')
asserts.fail_if(item2 in item1, msg)
def should_contain(self, item1, item2, msg=None, values=True):
"""Fails if ``item1`` does not contain ``item2`` one or more times.
Works with strings, lists, and anything that supports Python's ``in``
operator. See `Should Be Equal` for an explanation on how to override
the default error message with ``msg`` and ``values``.
Examples:
| Should Contain | ${output} | PASS |
| Should Contain | ${some_list} | value |
"""
msg = self._get_string_msg(item1, item2, msg, values, 'does not contain')
asserts.fail_unless(item2 in item1, msg)
def should_contain_x_times(self, item1, item2, count, msg=None):
"""Fails if ``item1`` does not contain ``item2`` ``count`` times.
Works with strings, lists and all objects that `Get Count` works
with. The default error message can be overridden with ``msg`` and
the actual count is always logged.
Examples:
| Should Contain X Times | ${output} | hello | 2 |
| Should Contain X Times | ${some list} | value | 3 |
"""
count = self._convert_to_integer(count)
x = self.get_count(item1, item2)
if not msg:
msg = "'%s' contains '%s' %d time%s, not %d time%s." \
% (unic(item1), unic(item2), x, s(x), count, s(count))
self.should_be_equal_as_integers(x, count, msg, values=False)
def get_count(self, item1, item2):
"""Returns and logs how many times ``item2`` is found from ``item1``.
This keyword works with Python strings and lists and all objects
that either have ``count`` method or can be converted to Python lists.
Example:
| ${count} = | Get Count | ${some item} | interesting value |
| Should Be True | 5 < ${count} < 10 |
"""
if not hasattr(item1, 'count'):
try:
item1 = list(item1)
except:
raise RuntimeError("Converting '%s' to list failed: %s"
% (item1, get_error_message()))
count = item1.count(item2)
self.log('Item found from the first item %d time%s' % (count, s(count)))
return count
def should_not_match(self, string, pattern, msg=None, values=True):
"""Fails if the given ``string`` matches the given ``pattern``.
Pattern matching is similar as matching files in a shell, and it is
always case-sensitive. In the pattern ``*`` matches to anything and
``?`` matches to any single character.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(string, pattern, msg, values, 'matches')
asserts.fail_if(self._matches(string, pattern), msg)
def should_match(self, string, pattern, msg=None, values=True):
"""Fails unless the given ``string`` matches the given ``pattern``.
Pattern matching is similar as matching files in a shell, and it is
always case-sensitive. In the pattern, ``*`` matches to anything and
``?`` matches to any single character.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(string, pattern, msg, values,
'does not match')
asserts.fail_unless(self._matches(string, pattern), msg)
def should_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if ``string`` does not match ``pattern`` as a regular expression.
Regular expression check is implemented using the Python
[http://docs.python.org/2/library/re.html|re module]. Python's regular
expression syntax is derived from Perl, and it is thus also very
similar to the syntax used, for example, in Java, Ruby and .NET.
Things to note about the regexp syntax in Robot Framework test data:
1) Backslash is an escape character in the test data, and possible
backslashes in the pattern must thus be escaped with another backslash
(e.g. ``\\\\d\\\\w+``).
2) Strings that may contain special characters, but should be handled
as literal strings, can be escaped with the `Regexp Escape` keyword.
3) The given pattern does not need to match the whole string. For
example, the pattern ``ello`` matches the string ``Hello world!``. If
a full match is needed, the ``^`` and ``$`` characters can be used to
denote the beginning and end of the string, respectively. For example,
``^ello$`` only matches the exact string ``ello``.
4) Possible flags altering how the expression is parsed (e.g.
``re.IGNORECASE``, ``re.MULTILINE``) can be set by prefixing the
pattern with the ``(?iLmsux)`` group like ``(?im)pattern``. The
available flags are ``i`` (case-insensitive), ``m`` (multiline mode),
``s`` (dotall mode), ``x`` (verbose), ``u`` (Unicode dependent) and
``L`` (locale dependent).
If this keyword passes, it returns the portion of the string that
matched the pattern. Additionally, the possible captured groups are
returned.
See the `Should Be Equal` keyword for an explanation on how to override
the default error message with the ``msg`` and ``values`` arguments.
Examples:
| Should Match Regexp | ${output} | \\\\d{6} | # Output contains six numbers |
| Should Match Regexp | ${output} | ^\\\\d{6}$ | # Six numbers and nothing more |
| ${ret} = | Should Match Regexp | Foo: 42 | (?i)foo: \\\\d+ |
| ${match} | ${group1} | ${group2} = |
| ... | Should Match Regexp | Bar: 43 | (Foo|Bar): (\\\\d+) |
=>
| ${ret} = 'Foo: 42'
| ${match} = 'Bar: 43'
| ${group1} = 'Bar'
| ${group2} = '43'
"""
msg = self._get_string_msg(string, pattern, msg, values, 'does not match')
res = re.search(pattern, string)
asserts.fail_if_none(res, msg, values=False)
match = res.group(0)
groups = res.groups()
if groups:
return [match] + list(groups)
return match
def should_not_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if ``string`` matches ``pattern`` as a regular expression.
See `Should Match Regexp` for more information about arguments.
"""
msg = self._get_string_msg(string, pattern, msg, values, 'matches')
asserts.fail_unless_none(re.search(pattern, string), msg, values=False)
def get_length(self, item):
"""Returns and logs the length of the given item as an integer.
The item can be anything that has a length, for example, a string,
a list, or a mapping. The keyword first tries to get the length with
the Python function ``len``, which calls the item's ``__len__`` method
internally. If that fails, the keyword tries to call the item's
possible ``length`` and ``size`` methods directly. The final attempt is
trying to get the value of the item's ``length`` attribute. If all
these attempts are unsuccessful, the keyword fails.
Examples:
| ${length} = | Get Length | Hello, world! | |
| Should Be Equal As Integers | ${length} | 13 |
| @{list} = | Create List | Hello, | world! |
| ${length} = | Get Length | ${list} | |
| Should Be Equal As Integers | ${length} | 2 |
See also `Length Should Be`, `Should Be Empty` and `Should Not Be
Empty`.
"""
length = self._get_length(item)
self.log('Length is %d' % length)
return length
def _get_length(self, item):
try:
return len(item)
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.length()
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.size()
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.length
except RERAISED_EXCEPTIONS:
raise
except:
raise RuntimeError("Could not get length of '%s'." % item)
def length_should_be(self, item, length, msg=None):
"""Verifies that the length of the given item is correct.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
length = self._convert_to_integer(length)
actual = self.get_length(item)
if actual != length:
raise AssertionError(msg or "Length of '%s' should be %d but is %d."
% (item, length, actual))
def should_be_empty(self, item, msg=None):
"""Verifies that the given item is empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
if self.get_length(item) > 0:
raise AssertionError(msg or "'%s' should be empty." % item)
def should_not_be_empty(self, item, msg=None):
"""Verifies that the given item is not empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
if self.get_length(item) == 0:
raise AssertionError(msg or "'%s' should not be empty." % item)
def _get_string_msg(self, str1, str2, msg, values, delim):
default = "'%s' %s '%s'" % (unic(str1), delim, unic(str2))
if not msg:
msg = default
elif self._include_values(values):
msg = '%s: %s' % (msg, default)
return msg
class _Variables(_BuiltInBase):
def get_variables(self, no_decoration=False):
"""Returns a dictionary containing all variables in the current scope.
Variables are returned as a special dictionary that allows accessing
variables in space, case, and underscore insensitive manner similarly
as accessing variables in the test data. This dictionary supports all
same operations as normal Python dictionaries and, for example,
Collections library can be used to access or modify it. Modifying the
returned dictionary has no effect on the variables available in the
current scope.
By default variables are returned with ``${}``, ``@{}`` or ``&{}``
decoration based on variable types. Giving a true value (see `Boolean
arguments`) to the optional argument ``no_decoration`` will return
the variables without the decoration. This option is new in Robot
Framework 2.9.
Example:
| ${example_variable} = | Set Variable | example value |
| ${variables} = | Get Variables | |
| Dictionary Should Contain Key | ${variables} | \\${example_variable} |
| Dictionary Should Contain Key | ${variables} | \\${ExampleVariable} |
| Set To Dictionary | ${variables} | \\${name} | value |
| Variable Should Not Exist | \\${name} | | |
| ${no decoration} = | Get Variables | no_decoration=Yes |
| Dictionary Should Contain Key | ${no decoration} | example_variable |
Note: Prior to Robot Framework 2.7.4 variables were returned as
a custom object that did not support all dictionary methods.
"""
return self._variables.as_dict(decoration=is_falsy(no_decoration))
@run_keyword_variant(resolve=0)
def get_variable_value(self, name, default=None):
"""Returns variable value or ``default`` if the variable does not exist.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
Examples:
| ${x} = | Get Variable Value | ${a} | default |
| ${y} = | Get Variable Value | ${a} | ${b} |
| ${z} = | Get Variable Value | ${z} | |
=>
| ${x} gets value of ${a} if ${a} exists and string 'default' otherwise
| ${y} gets value of ${a} if ${a} exists and value of ${b} otherwise
| ${z} is set to Python None if it does not exist previously
See `Set Variable If` for another keyword to set variables dynamically.
"""
try:
return self._variables[self._get_var_name(name)]
except DataError:
return self._variables.replace_scalar(default)
def log_variables(self, level='INFO'):
"""Logs all variables in the current scope with given log level."""
variables = self.get_variables()
for name in sorted(variables, key=lambda s: s[2:-1].lower()):
msg = format_assign_message(name, variables[name], cut_long=False)
self.log(msg, level)
@run_keyword_variant(resolve=0)
def variable_should_exist(self, name, msg=None):
"""Fails unless the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Not Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
msg = self._variables.replace_string(msg) if msg \
else "Variable %s does not exist." % name
try:
self._variables[name]
except DataError:
raise AssertionError(msg)
@run_keyword_variant(resolve=0)
def variable_should_not_exist(self, name, msg=None):
"""Fails if the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
msg = self._variables.replace_string(msg) if msg \
else "Variable %s exists." % name
try:
self._variables[name]
except DataError:
pass
else:
raise AssertionError(msg)
def replace_variables(self, text):
"""Replaces variables in the given text with their current values.
If the text contains undefined variables, this keyword fails.
If the given ``text`` contains only a single variable, its value is
returned as-is and it can be any object. Otherwise this keyword
always returns a string.
Example:
The file ``template.txt`` contains ``Hello ${NAME}!`` and variable
``${NAME}`` has the value ``Robot``.
| ${template} = | Get File | ${CURDIR}/template.txt |
| ${message} = | Replace Variables | ${template} |
| Should Be Equal | ${message} | Hello Robot! |
"""
return self._variables.replace_scalar(text)
def set_variable(self, *values):
"""Returns the given values which can then be assigned to a variables.
This keyword is mainly used for setting scalar variables.
Additionally it can be used for converting a scalar variable
containing a list to a list variable or to multiple scalar variables.
It is recommended to use `Create List` when creating new lists.
Examples:
| ${hi} = | Set Variable | Hello, world! |
| ${hi2} = | Set Variable | I said: ${hi} |
| ${var1} | ${var2} = | Set Variable | Hello | world |
| @{list} = | Set Variable | ${list with some items} |
| ${item1} | ${item2} = | Set Variable | ${list with 2 items} |
Variables created with this keyword are available only in the
scope where they are created. See `Set Global Variable`,
`Set Test Variable` and `Set Suite Variable` for information on how to
set variables so that they are available also in a larger scope.
"""
if len(values) == 0:
return ''
elif len(values) == 1:
return values[0]
else:
return list(values)
@run_keyword_variant(resolve=0)
def set_test_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current test.
Variables set with this keyword are available everywhere within the
scope of the currently executed test case. For example, if you set a
variable in a user keyword, it is available both in the test case level
and also in all other user keywords used in the current test. Other
test cases will not see variables set with this keyword.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self._variables.set_test(name, value)
self._log_set_variable(name, value)
@run_keyword_variant(resolve=0)
def set_suite_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current suite.
Variables set with this keyword are available everywhere within the
scope of the currently executed test suite. Setting variables with this
keyword thus has the same effect as creating them using the Variable
table in the test data file or importing them from variable files.
Possible child test suites do not see variables set with this keyword
by default. Starting from Robot Framework 2.9, that can be controlled
by using ``children=<option>`` as the last argument. If the specified
``<option>`` is a non-empty string or any other value considered true
in Python, the variable is set also to the child suites. Parent and
sibling suites will never see variables set with this keyword.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format as ``\\${NAME}`` or ``$NAME``.
Variable value can be given using the same syntax as when variables
are created in the Variable table.
If a variable already exists within the new scope, its value will be
overwritten. Otherwise a new variable is created. If a variable already
exists within the current scope, the value can be left empty and the
variable within the new scope gets the value within the current scope.
Examples:
| Set Suite Variable | ${SCALAR} | Hello, world! |
| Set Suite Variable | ${SCALAR} | Hello, world! | children=true |
| Set Suite Variable | @{LIST} | First item | Second item |
| Set Suite Variable | &{DICT} | key=value | foo=bar |
| ${ID} = | Get ID |
| Set Suite Variable | ${ID} |
To override an existing value with an empty value, use built-in
variables ``${EMPTY}``, ``@{EMPTY}`` or ``&{EMPTY}``:
| Set Suite Variable | ${SCALAR} | ${EMPTY} |
| Set Suite Variable | @{LIST} | @{EMPTY} | # New in RF 2.7.4 |
| Set Suite Variable | &{DICT} | &{EMPTY} | # New in RF 2.9 |
*NOTE:* If the variable has value which itself is a variable (escaped
or not), you must always use the escaped format to set the variable:
Example:
| ${NAME} = | Set Variable | \\${var} |
| Set Suite Variable | ${NAME} | value | # Sets variable ${var} |
| Set Suite Variable | \\${NAME} | value | # Sets variable ${NAME} |
This limitation applies also to `Set Test Variable`, `Set Global
Variable`, `Variable Should Exist`, `Variable Should Not Exist` and
`Get Variable Value` keywords.
"""
name = self._get_var_name(name)
if (values and is_string(values[-1]) and
values[-1].startswith('children=')):
children = self._variables.replace_scalar(values[-1][9:])
children = is_truthy(children)
values = values[:-1]
else:
children = False
value = self._get_var_value(name, values)
self._variables.set_suite(name, value, children=children)
self._log_set_variable(name, value)
@run_keyword_variant(resolve=0)
def set_global_variable(self, name, *values):
"""Makes a variable available globally in all tests and suites.
Variables set with this keyword are globally available in all test
cases and suites executed after setting them. Setting variables with
this keyword thus has the same effect as creating from the command line
using the options ``--variable`` or ``--variablefile``. Because this
keyword can change variables everywhere, it should be used with care.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self._variables.set_global(name, value)
self._log_set_variable(name, value)
# Helpers
def _get_var_name(self, orig):
name = self._resolve_possible_variable(orig)
try:
return self._unescape_variable_if_needed(name)
except ValueError:
raise RuntimeError("Invalid variable syntax '%s'." % orig)
def _resolve_possible_variable(self, name):
try:
resolved = self._variables.replace_string(name)
return self._unescape_variable_if_needed(resolved)
except (KeyError, ValueError, DataError):
return name
def _unescape_variable_if_needed(self, name):
if name.startswith('\\'):
name = name[1:]
if len(name) < 2:
raise ValueError
if name[0] in '$@&' and name[1] != '{':
name = '%s{%s}' % (name[0], name[1:])
if is_var(name):
return name
# Support for possible internal variables (issue 397)
name = '%s{%s}' % (name[0], self.replace_variables(name[2:-1]))
if is_var(name):
return name
raise ValueError
def _get_var_value(self, name, values):
if not values:
return self._variables[name]
# TODO: In RF 2.10/3.0 the if branch below can be removed and
# VariableTableValue used with all variables. See issue #1919.
if name[0] == '$':
if len(values) != 1 or VariableSplitter(values[0]).is_list_variable():
raise DataError("Setting list value to scalar variable '%s' "
"is not supported anymore. Create list "
"variable '@%s' instead." % (name, name[1:]))
return self._variables.replace_scalar(values[0])
return VariableTableValue(values, name).resolve(self._variables)
def _log_set_variable(self, name, value):
self.log(format_assign_message(name, value))
class _RunKeyword(_BuiltInBase):
# If you use any of these run keyword variants from another library, you
# should register those keywords with 'register_run_keyword' method. See
# the documentation of that method at the end of this file. There are also
# other run keyword variant keywords in BuiltIn which can also be seen
# at the end of this file.
@run_keyword_variant(resolve=1)
def run_keyword(self, name, *args):
"""Executes the given keyword with the given arguments.
Because the name of the keyword to execute is given as an argument, it
can be a variable and thus set dynamically, e.g. from a return value of
another keyword or from the command line.
"""
if not is_string(name):
raise RuntimeError('Keyword name must be a string.')
kw = Keyword(name, args=args)
return kw.run(self._context)
@run_keyword_variant(resolve=0)
def run_keywords(self, *keywords):
"""Executes all the given keywords in a sequence.
This keyword is mainly useful in setups and teardowns when they need
to take care of multiple actions and creating a new higher level user
keyword would be an overkill.
By default all arguments are expected to be keywords to be executed.
Examples:
| Run Keywords | Initialize database | Start servers | Clear logs |
| Run Keywords | ${KW 1} | ${KW 2} |
| Run Keywords | @{KEYWORDS} |
Starting from Robot Framework 2.7.6, keywords can also be run with
arguments using upper case ``AND`` as a separator between keywords.
The keywords are executed so that the first argument is the first
keyword and proceeding arguments until the first ``AND`` are arguments
to it. First argument after the first ``AND`` is the second keyword and
proceeding arguments until the next ``AND`` are its arguments. And so on.
Examples:
| Run Keywords | Initialize database | db1 | AND | Start servers | server1 | server2 |
| Run Keywords | Initialize database | ${DB NAME} | AND | Start servers | @{SERVERS} | AND | Clear logs |
| Run Keywords | ${KW} | AND | @{KW WITH ARGS} |
Notice that the ``AND`` control argument must be used explicitly and
cannot itself come from a variable. If you need to use literal ``AND``
string as argument, you can either use variables or escape it with
a backslash like ``\\AND``.
"""
self._run_keywords(self._split_run_keywords(list(keywords)))
def _run_keywords(self, iterable):
errors = []
for kw, args in iterable:
try:
self.run_keyword(kw, *args)
except ExecutionPassed as err:
err.set_earlier_failures(errors)
raise err
except ExecutionFailed as err:
errors.extend(err.get_errors())
if not err.can_continue(self._context.in_teardown):
break
if errors:
raise ExecutionFailures(errors)
def _split_run_keywords(self, keywords):
if 'AND' not in keywords:
for name in self._variables.replace_list(keywords):
yield name, ()
else:
for name, args in self._split_run_keywords_from_and(keywords):
yield name, args
def _split_run_keywords_from_and(self, keywords):
while 'AND' in keywords:
index = keywords.index('AND')
yield self._resolve_run_keywords_name_and_args(keywords[:index])
keywords = keywords[index+1:]
yield self._resolve_run_keywords_name_and_args(keywords)
def _resolve_run_keywords_name_and_args(self, kw_call):
kw_call = self._variables.replace_list(kw_call, replace_until=1)
if not kw_call:
raise DataError('Incorrect use of AND')
return kw_call[0], kw_call[1:]
@run_keyword_variant(resolve=2)
def run_keyword_if(self, condition, name, *args):
"""Runs the given keyword with the given arguments, if ``condition`` is true.
The given ``condition`` is evaluated in Python as explained in
`Evaluating expressions`, and ``name`` and ``*args`` have same
semantics as with `Run Keyword`.
Example, a simple if/else construct:
| ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` |
| `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg |
| `Run Keyword Unless` | '${status}' == 'PASS' | `Another Action` |
In this example, only either `Some Action` or `Another Action` is
executed, based on the status of `My Keyword`. Instead of `Run Keyword
And Ignore Error` you can also use `Run Keyword And Return Status`.
Variables used like ``${variable}``, as in the examples above, are
replaced in the expression before evaluation. Variables are also
available in the evaluation namespace and can be accessed using special
syntax ``$variable``. This is a new feature in Robot Framework 2.9
and it is explained more thoroughly in `Evaluating expressions`.
Example:
| `Run Keyword If` | $result is None or $result == 'FAIL' | `Keyword` |
Starting from Robot version 2.7.4, this keyword supports also optional
ELSE and ELSE IF branches. Both of these are defined in ``*args`` and
must use exactly format ``ELSE`` or ``ELSE IF``, respectively. ELSE
branches must contain first the name of the keyword to execute and then
its possible arguments. ELSE IF branches must first contain a condition,
like the first argument to this keyword, and then the keyword to execute
and its possible arguments. It is possible to have ELSE branch after
ELSE IF and to have multiple ELSE IF branches.
Given previous example, if/else construct can also be created like this:
| ${status} | ${value} = | `Run Keyword And Ignore Error` | My Keyword |
| `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | ELSE | `Another Action` |
The return value is the one of the keyword that was executed or None if
no keyword was executed (i.e. if ``condition`` was false). Hence, it is
recommended to use ELSE and/or ELSE IF branches to conditionally assign
return values from keyword to variables (to conditionally assign fixed
values to variables, see `Set Variable If`). This is illustrated by the
example below:
| ${var1} = | `Run Keyword If` | ${rc} == 0 | `Some keyword returning a value` |
| ... | ELSE IF | 0 < ${rc} < 42 | `Another keyword` |
| ... | ELSE IF | ${rc} < 0 | `Another keyword with args` | ${rc} | arg2 |
| ... | ELSE | `Final keyword to handle abnormal cases` | ${rc} |
| ${var2} = | `Run Keyword If` | ${condition} | `Some keyword` |
In this example, ${var2} will be set to None if ${condition} is false.
Notice that ``ELSE`` and ``ELSE IF`` control words must be used
explicitly and thus cannot come from variables. If you need to use
literal ``ELSE`` and ``ELSE IF`` strings as arguments, you can escape
them with a backslash like ``\\ELSE`` and ``\\ELSE IF``.
Starting from Robot Framework 2.8, Python's
[http://docs.python.org/2/library/os.html|os] and
[http://docs.python.org/2/library/sys.html|sys] modules are
automatically imported when evaluating the ``condition``.
Attributes they contain can thus be used in the condition:
| `Run Keyword If` | os.sep == '/' | `Unix Keyword` |
| ... | ELSE IF | sys.platform.startswith('java') | `Jython Keyword` |
| ... | ELSE | `Windows Keyword` |
"""
args, branch = self._split_elif_or_else_branch(args)
if self._is_true(condition):
return self.run_keyword(name, *args)
return branch()
def _split_elif_or_else_branch(self, args):
if 'ELSE IF' in args:
args, branch = self._split_branch(args, 'ELSE IF', 2,
'condition and keyword')
return args, lambda: self.run_keyword_if(*branch)
if 'ELSE' in args:
args, branch = self._split_branch(args, 'ELSE', 1, 'keyword')
return args, lambda: self.run_keyword(*branch)
return args, lambda: None
def _split_branch(self, args, control_word, required, required_error):
index = list(args).index(control_word)
branch = self._variables.replace_list(args[index+1:], required)
if len(branch) < required:
raise DataError('%s requires %s.' % (control_word, required_error))
return args[:index], branch
@run_keyword_variant(resolve=2)
def run_keyword_unless(self, condition, name, *args):
"""Runs the given keyword with the given arguments, if ``condition`` is false.
See `Run Keyword If` for more information and an example.
"""
if not self._is_true(condition):
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_and_ignore_error(self, name, *args):
"""Runs the given keyword with the given arguments and ignores possible error.
This keyword returns two values, so that the first is either string
``PASS`` or ``FAIL``, depending on the status of the executed keyword.
The second value is either the return value of the keyword or the
received error message. See `Run Keyword And Return Status` If you are
only interested in the execution status.
The keyword name and arguments work as in `Run Keyword`. See
`Run Keyword If` for a usage example.
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword. Otherwise this keyword itself never fails.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
return 'PASS', self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
return 'FAIL', unic(err)
@run_keyword_variant(resolve=1)
def run_keyword_and_return_status(self, name, *args):
"""Runs the given keyword with given arguments and returns the status as a Boolean value.
This keyword returns Boolean ``True`` if the keyword that is executed
succeeds and ``False`` if it fails. This is useful, for example, in
combination with `Run Keyword If`. If you are interested in the error
message or return value, use `Run Keyword And Ignore Error` instead.
The keyword name and arguments work as in `Run Keyword`.
Example:
| ${passed} = | `Run Keyword And Return Status` | Keyword | args |
| `Run Keyword If` | ${passed} | Another keyword |
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword. Otherwise this keyword itself never fails.
New in Robot Framework 2.7.6.
"""
status, _ = self.run_keyword_and_ignore_error(name, *args)
return status == 'PASS'
@run_keyword_variant(resolve=1)
def run_keyword_and_continue_on_failure(self, name, *args):
"""Runs the keyword and continues execution even if a failure occurs.
The keyword name and arguments work as with `Run Keyword`.
Example:
| Run Keyword And Continue On Failure | Fail | This is a stupid example |
| Log | This keyword is executed |
The execution is not continued if the failure is caused by invalid syntax,
timeout, or fatal exception.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
return self.run_keyword(name, *args)
except ExecutionFailed as err:
if not err.dont_continue:
err.continue_on_failure = True
raise err
@run_keyword_variant(resolve=2)
def run_keyword_and_expect_error(self, expected_error, name, *args):
"""Runs the keyword and checks that the expected error occurred.
The expected error must be given in the same format as in
Robot Framework reports. It can be a pattern containing
characters ``?``, which matches to any single character and
``*``, which matches to any number of any characters. ``name`` and
``*args`` have same semantics as with `Run Keyword`.
If the expected error occurs, the error message is returned and it can
be further processed/tested, if needed. If there is no error, or the
error does not match the expected error, this keyword fails.
Examples:
| Run Keyword And Expect Error | My error | Some Keyword | arg1 | arg2 |
| ${msg} = | Run Keyword And Expect Error | * | My KW |
| Should Start With | ${msg} | Once upon a time in |
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
error = err
else:
raise AssertionError("Expected error '%s' did not occur."
% expected_error)
if not self._matches(unic(error), expected_error):
raise AssertionError("Expected error '%s' but got '%s'."
% (expected_error, error))
return unic(error)
@run_keyword_variant(resolve=2)
def repeat_keyword(self, times, name, *args):
"""Executes the specified keyword multiple times.
``name`` and ``args`` define the keyword that is executed
similarly as with `Run Keyword`, and ``times`` specifies how many
the keyword should be executed. ``times`` can be given as an
integer or as a string that can be converted to an integer. If it is
a string, it can have postfix ``times`` or ``x`` (case and space
insensitive) to make the expression more explicit.
If ``times`` is zero or negative, the keyword is not executed at
all. This keyword fails immediately if any of the execution
rounds fails.
Examples:
| Repeat Keyword | 5 times | Go to Previous Page |
| Repeat Keyword | ${var} | Some Keyword | arg1 | arg2 |
"""
times = self._get_times_to_repeat(times)
self._run_keywords(self._yield_repeated_keywords(times, name, args))
def _get_times_to_repeat(self, times, require_postfix=False):
times = normalize(str(times))
if times.endswith('times'):
times = times[:-5]
elif times.endswith('x'):
times = times[:-1]
elif require_postfix:
raise ValueError
return self._convert_to_integer(times)
def _yield_repeated_keywords(self, times, name, args):
if times <= 0:
self.log("Keyword '%s' repeated zero times." % name)
for i in range(times):
self.log("Repeating keyword, round %d/%d." % (i+1, times))
yield name, args
@run_keyword_variant(resolve=3)
def wait_until_keyword_succeeds(self, retry, retry_interval, name, *args):
"""Runs the specified keyword and retries if it fails.
``name`` and ``args`` define the keyword that is executed similarly
as with `Run Keyword`. How long to retry running the keyword is
defined using ``retry`` argument either as timeout or count.
``retry_interval`` is the time to wait before trying to run the
keyword again after the previous run has failed.
If ``retry`` is given as timeout, it must be in Robot Framework's
time format (e.g. ``1 minute``, ``2 min 3 s``, ``4.5``) that is
explained in an appendix of Robot Framework User Guide. If it is
given as count, it must have ``times`` or ``x`` postfix (e.g.
``5 times``, ``10 x``). ``retry_interval`` must always be given in
Robot Framework's time format.
If the keyword does not succeed regardless of retries, this keyword
fails. If the executed keyword passes, its return value is returned.
Examples:
| Wait Until Keyword Succeeds | 2 min | 5 sec | My keyword | argument |
| ${result} = | Wait Until Keyword Succeeds | 3x | 200ms | My keyword |
All normal failures are caught by this keyword. Errors caused by
invalid syntax, test or keyword timeouts, or fatal exceptions (caused
e.g. by `Fatal Error`) are not caught.
Running the same keyword multiple times inside this keyword can create
lots of output and considerably increase the size of the generated
output files. Starting from Robot Framework 2.7, it is possible to
remove unnecessary keywords from the outputs using
``--RemoveKeywords WUKS`` command line option.
Support for specifying ``retry`` as a number of times to retry is
a new feature in Robot Framework 2.9.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
maxtime = count = -1
try:
count = self._get_times_to_repeat(retry, require_postfix=True)
except ValueError:
timeout = timestr_to_secs(retry)
maxtime = time.time() + timeout
message = 'for %s' % secs_to_timestr(timeout)
else:
if count <= 0:
raise ValueError('Retry count %d is not positive.' % count)
message = '%d time%s' % (count, s(count))
retry_interval = timestr_to_secs(retry_interval)
while True:
try:
return self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
count -= 1
if time.time() > maxtime > 0 or count == 0:
raise AssertionError("Keyword '%s' failed after retrying "
"%s. The last error was: %s"
% (name, message, err))
self._sleep_in_parts(retry_interval)
@run_keyword_variant(resolve=1)
def set_variable_if(self, condition, *values):
"""Sets variable based on the given condition.
The basic usage is giving a condition and two values. The
given condition is first evaluated the same way as with the
`Should Be True` keyword. If the condition is true, then the
first value is returned, and otherwise the second value is
returned. The second value can also be omitted, in which case
it has a default value None. This usage is illustrated in the
examples below, where ``${rc}`` is assumed to be zero.
| ${var1} = | Set Variable If | ${rc} == 0 | zero | nonzero |
| ${var2} = | Set Variable If | ${rc} > 0 | value1 | value2 |
| ${var3} = | Set Variable If | ${rc} > 0 | whatever | |
=>
| ${var1} = 'zero'
| ${var2} = 'value2'
| ${var3} = None
It is also possible to have 'else if' support by replacing the
second value with another condition, and having two new values
after it. If the first condition is not true, the second is
evaluated and one of the values after it is returned based on
its truth value. This can be continued by adding more
conditions without a limit.
| ${var} = | Set Variable If | ${rc} == 0 | zero |
| ... | ${rc} > 0 | greater than zero | less then zero |
| |
| ${var} = | Set Variable If |
| ... | ${rc} == 0 | zero |
| ... | ${rc} == 1 | one |
| ... | ${rc} == 2 | two |
| ... | ${rc} > 2 | greater than two |
| ... | ${rc} < 0 | less than zero |
Use `Get Variable Value` if you need to set variables
dynamically based on whether a variable exist or not.
"""
values = self._verify_values_for_set_variable_if(list(values))
if self._is_true(condition):
return self._variables.replace_scalar(values[0])
values = self._verify_values_for_set_variable_if(values[1:], True)
if len(values) == 1:
return self._variables.replace_scalar(values[0])
return self.run_keyword('BuiltIn.Set Variable If', *values[0:])
def _verify_values_for_set_variable_if(self, values, default=False):
if not values:
if default:
return [None]
raise RuntimeError('At least one value is required')
if is_list_var(values[0]):
values[:1] = [escape(item) for item in self._variables[values[0]]]
return self._verify_values_for_set_variable_if(values)
return values
@run_keyword_variant(resolve=1)
def run_keyword_if_test_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test failed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
Prior to Robot Framework 2.9 failures in test teardown itself were
not detected by this keyword.
"""
test = self._get_test_in_teardown('Run Keyword If Test Failed')
if not test.passed:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_test_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test passed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
Prior to Robot Framework 2.9 failures in test teardown itself were
not detected by this keyword.
"""
test = self._get_test_in_teardown('Run Keyword If Test Passed')
if test.passed:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_timeout_occurred(self, name, *args):
"""Runs the given keyword if either a test or a keyword timeout has occurred.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
self._get_test_in_teardown('Run Keyword If Timeout Occurred')
if self._context.timeout_occurred:
return self.run_keyword(name, *args)
def _get_test_in_teardown(self, kwname):
ctx = self._context
if ctx.test and ctx.in_test_teardown:
return ctx.test
raise RuntimeError("Keyword '%s' can only be used in test teardown."
% kwname)
@run_keyword_variant(resolve=1)
def run_keyword_if_all_critical_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all critical tests passed.
This keyword can only be used in suite teardown. Trying to use it in
any other place will result in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'All Critical Tests Passed')
if suite.statistics.critical.failed == 0:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_any_critical_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if any critical tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'Any Critical Tests Failed')
if suite.statistics.critical.failed > 0:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_all_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all tests passed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If All Tests Passed')
if suite.statistics.all.failed == 0:
return self.run_keyword(name, *args)
@run_keyword_variant(resolve=1)
def run_keyword_if_any_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if one or more tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If Any Tests Failed')
if suite.statistics.all.failed > 0:
return self.run_keyword(name, *args)
def _get_suite_in_teardown(self, kwname):
if not self._context.in_suite_teardown:
raise RuntimeError("Keyword '%s' can only be used in suite teardown."
% kwname)
return self._context.suite
class _Control(_BuiltInBase):
def continue_for_loop(self):
"""Skips the current for loop iteration and continues from the next.
Skips the remaining keywords in the current for loop iteration and
continues from the next one. Can be used directly in a for loop or
in a keyword that the loop uses.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Run Keyword If | '${var}' == 'CONTINUE' | Continue For Loop |
| | Do Something | ${var} |
See `Continue For Loop If` to conditionally continue a for loop without
using `Run Keyword If` or other wrapper keywords.
New in Robot Framework 2.8.
"""
self.log("Continuing for loop from the next iteration.")
raise ContinueForLoop()
def continue_for_loop_if(self, condition):
"""Skips the current for loop iteration if the ``condition`` is true.
A wrapper for `Continue For Loop` to continue a for loop based on
the given condition. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Continue For Loop If | '${var}' == 'CONTINUE' |
| | Do Something | ${var} |
New in Robot Framework 2.8.
"""
if self._is_true(condition):
self.continue_for_loop()
def exit_for_loop(self):
"""Stops executing the enclosing for loop.
Exits the enclosing for loop and continues execution after it.
Can be used directly in a for loop or in a keyword that the loop uses.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Run Keyword If | '${var}' == 'EXIT' | Exit For Loop |
| | Do Something | ${var} |
See `Exit For Loop If` to conditionally exit a for loop without
using `Run Keyword If` or other wrapper keywords.
"""
self.log("Exiting for loop altogether.")
raise ExitForLoop()
def exit_for_loop_if(self, condition):
"""Stops executing the enclosing for loop if the ``condition`` is true.
A wrapper for `Exit For Loop` to exit a for loop based on
the given condition. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Exit For Loop If | '${var}' == 'EXIT' |
| | Do Something | ${var} |
New in Robot Framework 2.8.
"""
if self._is_true(condition):
self.exit_for_loop()
@run_keyword_variant(resolve=0)
def return_from_keyword(self, *return_values):
"""Returns from the enclosing user keyword.
This keyword can be used to return from a user keyword with PASS status
without executing it fully. It is also possible to return values
similarly as with the ``[Return]`` setting. For more detailed information
about working with the return values, see the User Guide.
This keyword is typically wrapped to some other keyword, such as
`Run Keyword If` or `Run Keyword If Test Passed`, to return based
on a condition:
| Run Keyword If | ${rc} < 0 | Return From Keyword |
| Run Keyword If Test Passed | Return From Keyword |
It is possible to use this keyword to return from a keyword also inside
a for loop. That, as well as returning values, is demonstrated by the
`Find Index` keyword in the following somewhat advanced example.
Notice that it is often a good idea to move this kind of complicated
logic into a test library.
| ***** Variables *****
| @{LIST} = foo baz
|
| ***** Test Cases *****
| Example
| ${index} = Find Index baz @{LIST}
| Should Be Equal ${index} ${1}
| ${index} = Find Index non existing @{LIST}
| Should Be Equal ${index} ${-1}
|
| ***** Keywords *****
| Find Index
| [Arguments] ${element} @{items}
| ${index} = Set Variable ${0}
| :FOR ${item} IN @{items}
| \\ Run Keyword If '${item}' == '${element}' Return From Keyword ${index}
| \\ ${index} = Set Variable ${index + 1}
| Return From Keyword ${-1} # Also [Return] would work here.
The most common use case, returning based on an expression, can be
accomplished directly with `Return From Keyword If`. Both of these
keywords are new in Robot Framework 2.8.
See also `Run Keyword And Return` and `Run Keyword And Return If`.
"""
self.log('Returning from the enclosing user keyword.')
raise ReturnFromKeyword(return_values)
@run_keyword_variant(resolve=1)
def return_from_keyword_if(self, condition, *return_values):
"""Returns from the enclosing user keyword if ``condition`` is true.
A wrapper for `Return From Keyword` to return based on the given
condition. The condition is evaluated using the same semantics as
with `Should Be True` keyword.
Given the same example as in `Return From Keyword`, we can rewrite the
`Find Index` keyword as follows:
| ***** Keywords *****
| Find Index
| [Arguments] ${element} @{items}
| ${index} = Set Variable ${0}
| :FOR ${item} IN @{items}
| \\ Return From Keyword If '${item}' == '${element}' ${index}
| \\ ${index} = Set Variable ${index + 1}
| Return From Keyword ${-1} # Also [Return] would work here.
See also `Run Keyword And Return` and `Run Keyword And Return If`.
New in Robot Framework 2.8.
"""
if self._is_true(condition):
self.return_from_keyword(*return_values)
@run_keyword_variant(resolve=1)
def run_keyword_and_return(self, name, *args):
"""Runs the specified keyword and returns from the enclosing user keyword.
The keyword to execute is defined with ``name`` and ``*args`` exactly
like with `Run Keyword`. After running the keyword, returns from the
enclosing user keyword and passes possible return value from the
executed keyword further. Returning from a keyword has exactly same
semantics as with `Return From Keyword`.
Example:
| `Run Keyword And Return` | `My Keyword` | arg1 | arg2 |
| # Above is equivalent to: |
| ${result} = | `My Keyword` | arg1 | arg2 |
| `Return From Keyword` | ${result} | | |
Use `Run Keyword And Return If` if you want to run keyword and return
based on a condition.
New in Robot Framework 2.8.2.
"""
ret = self.run_keyword(name, *args)
self.return_from_keyword(escape(ret))
@run_keyword_variant(resolve=2)
def run_keyword_and_return_if(self, condition, name, *args):
"""Runs the specified keyword and returns from the enclosing user keyword.
A wrapper for `Run Keyword And Return` to run and return based on
the given ``condition``. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| `Run Keyword And Return If` | ${rc} > 0 | `My Keyword` | arg1 | arg2 |
| # Above is equivalent to: |
| `Run Keyword If` | ${rc} > 0 | `Run Keyword And Return` | `My Keyword ` | arg1 | arg2 |
Use `Return From Keyword If` if you want to return a certain value
based on a condition.
New in Robot Framework 2.8.2.
"""
if self._is_true(condition):
self.run_keyword_and_return(name, *args)
def pass_execution(self, message, *tags):
"""Skips rest of the current test, setup, or teardown with PASS status.
This keyword can be used anywhere in the test data, but the place where
used affects the behavior:
- When used in any setup or teardown (suite, test or keyword), passes
that setup or teardown. Possible keyword teardowns of the started
keywords are executed. Does not affect execution or statuses
otherwise.
- When used in a test outside setup or teardown, passes that particular
test case. Possible test and keyword teardowns are executed.
Possible continuable failures before this keyword is used, as well as
failures in executed teardowns, will fail the execution.
It is mandatory to give a message explaining why execution was passed.
By default the message is considered plain text, but starting it with
``*HTML*`` allows using HTML formatting.
It is also possible to modify test tags passing tags after the message
similarly as with `Fail` keyword. Tags starting with a hyphen
(e.g. ``-regression``) are removed and others added. Tags are modified
using `Set Tags` and `Remove Tags` internally, and the semantics
setting and removing them are the same as with these keywords.
Examples:
| Pass Execution | All features available in this version tested. |
| Pass Execution | Deprecated test. | deprecated | -regression |
This keyword is typically wrapped to some other keyword, such as
`Run Keyword If`, to pass based on a condition. The most common case
can be handled also with `Pass Execution If`:
| Run Keyword If | ${rc} < 0 | Pass Execution | Negative values are cool. |
| Pass Execution If | ${rc} < 0 | Negative values are cool. |
Passing execution in the middle of a test, setup or teardown should be
used with care. In the worst case it leads to tests that skip all the
parts that could actually uncover problems in the tested application.
In cases where execution cannot continue do to external factors,
it is often safer to fail the test case and make it non-critical.
New in Robot Framework 2.8.
"""
message = message.strip()
if not message:
raise RuntimeError('Message cannot be empty.')
self._set_and_remove_tags(tags)
log_message, level = self._get_logged_test_message_and_level(message)
self.log('Execution passed with message:\n%s' % log_message, level)
raise PassExecution(message)
@run_keyword_variant(resolve=1)
def pass_execution_if(self, condition, message, *tags):
"""Conditionally skips rest of the current test, setup, or teardown with PASS status.
A wrapper for `Pass Execution` to skip rest of the current test,
setup or teardown based the given ``condition``. The condition is
evaluated similarly as with `Should Be True` keyword, and ``message``
and ``*tags`` have same semantics as with `Pass Execution`.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Pass Execution If | '${var}' == 'EXPECTED' | Correct value was found |
| | Do Something | ${var} |
New in Robot Framework 2.8.
"""
if self._is_true(condition):
message = self._variables.replace_string(message)
tags = [self._variables.replace_string(tag) for tag in tags]
self.pass_execution(message, *tags)
class _Misc(_BuiltInBase):
def no_operation(self):
"""Does absolutely nothing."""
def sleep(self, time_, reason=None):
"""Pauses the test executed for the given time.
``time`` may be either a number or a time string. Time strings are in
a format such as ``1 day 2 hours 3 minutes 4 seconds 5milliseconds`` or
``1d 2h 3m 4s 5ms``, and they are fully explained in an appendix of
Robot Framework User Guide. Optional `reason` can be used to explain why
sleeping is necessary. Both the time slept and the reason are logged.
Examples:
| Sleep | 42 |
| Sleep | 1.5 |
| Sleep | 2 minutes 10 seconds |
| Sleep | 10s | Wait for a reply |
"""
seconds = timestr_to_secs(time_)
# Python hangs with negative values
if seconds < 0:
seconds = 0
self._sleep_in_parts(seconds)
self.log('Slept %s' % secs_to_timestr(seconds))
if reason:
self.log(reason)
def _sleep_in_parts(self, seconds):
# time.sleep can't be stopped in windows
# to ensure that we can signal stop (with timeout)
# split sleeping to small pieces
endtime = time.time() + float(seconds)
while True:
remaining = endtime - time.time()
if remaining <= 0:
break
time.sleep(min(remaining, 0.5))
def catenate(self, *items):
"""Catenates the given items together and returns the resulted string.
By default, items are catenated with spaces, but if the first item
contains the string ``SEPARATOR=<sep>``, the separator ``<sep>`` is
used instead. Items are converted into strings when necessary.
Examples:
| ${str1} = | Catenate | Hello | world | |
| ${str2} = | Catenate | SEPARATOR=--- | Hello | world |
| ${str3} = | Catenate | SEPARATOR= | Hello | world |
=>
| ${str1} = 'Hello world'
| ${str2} = 'Hello---world'
| ${str3} = 'Helloworld'
"""
if not items:
return ''
items = [unic(item) for item in items]
if items[0].startswith('SEPARATOR='):
sep = items[0][len('SEPARATOR='):]
items = items[1:]
else:
sep = ' '
return sep.join(items)
def log(self, message, level='INFO', html=False, console=False, repr=False):
u"""Logs the given message with the given level.
Valid levels are TRACE, DEBUG, INFO (default), HTML, WARN, and ERROR.
Messages below the current active log level are ignored. See
`Set Log Level` keyword and ``--loglevel`` command line option
for more details about setting the level.
Messages logged with the WARN or ERROR levels will be automatically
visible also in the console and in the Test Execution Errors section
in the log file.
Logging can be configured using optional ``html``, ``console`` and
``repr`` arguments. They are off by default, but can be enabled
by giving them a true value. See `Boolean arguments` section for more
information about true and false values.
If the ``html`` argument is given a true value, the message will be
considered HTML and special characters such as ``<`` in it are not
escaped. For example, logging ``<img src="image.png">`` creates an
image when ``html`` is true, but otherwise the message is that exact
string. An alternative to using the ``html`` argument is using the HTML
pseudo log level. It logs the message as HTML using the INFO level.
If the ``console`` argument is true, the message will be written to
the console where test execution was started from in addition to
the log file. This keyword always uses the standard output stream
and adds a newline after the written message. Use `Log To Console`
instead if either of these is undesirable,
If the ``repr`` argument is true, the given item will be passed through
a custom version of Python's ``pprint.pformat()`` function before
logging it. This is useful, for example, when working with strings or
bytes containing invisible characters, or when working with nested data
structures. The custom version differs from the standard one so that it
omits the ``u`` prefix from Unicode strings and adds ``b`` prefix to
byte strings.
Examples:
| Log | Hello, world! | | | # Normal INFO message. |
| Log | Warning, world! | WARN | | # Warning. |
| Log | <b>Hello</b>, world! | html=yes | | # INFO message as HTML. |
| Log | <b>Hello</b>, world! | HTML | | # Same as above. |
| Log | <b>Hello</b>, world! | DEBUG | html=true | # DEBUG as HTML. |
| Log | Hello, console! | console=yes | | # Log also to the console. |
| Log | Hyv\xe4 \\x00 | repr=yes | | # Log ``'Hyv\\xe4 \\x00'``. |
See `Log Many` if you want to log multiple messages in one go, and
`Log To Console` if you only want to write to the console.
Arguments ``html``, ``console``, and ``repr`` are new in Robot Framework
2.8.2.
Pprint support when ``repr`` is used is new in Robot Framework 2.8.6,
and it was changed to drop the ``u`` prefix and add the ``b`` prefix
in Robot Framework 2.9.
"""
if is_truthy(repr):
message = prepr(message, width=80)
logger.write(message, level, is_truthy(html))
if is_truthy(console):
logger.console(message)
@run_keyword_variant(resolve=0)
def log_many(self, *messages):
"""Logs the given messages as separate entries using the INFO level.
Supports also logging list and dictionary variable items individually.
Examples:
| Log Many | Hello | ${var} |
| Log Many | @{list} | &{dict} |
See `Log` and `Log To Console` keywords if you want to use alternative
log levels, use HTML, or log to the console.
"""
for msg in self._yield_logged_messages(messages):
self.log(msg)
def _yield_logged_messages(self, messages):
for msg in messages:
var = VariableSplitter(msg)
value = self._variables.replace_scalar(msg)
if var.is_list_variable():
for item in value:
yield item
elif var.is_dict_variable():
for name, value in value.items():
yield '%s=%s' % (name, value)
else:
yield value
def log_to_console(self, message, stream='STDOUT', no_newline=False):
"""Logs the given message to the console.
By default uses the standard output stream. Using the standard error
stream is possibly by giving the ``stream`` argument value ``STDERR``
(case-insensitive).
By default appends a newline to the logged message. This can be
disabled by giving the ``no_newline`` argument a true value (see
`Boolean arguments`).
Examples:
| Log To Console | Hello, console! | |
| Log To Console | Hello, stderr! | STDERR |
| Log To Console | Message starts here and is | no_newline=true |
| Log To Console | continued without newline. | |
This keyword does not log the message to the normal log file. Use
`Log` keyword, possibly with argument ``console``, if that is desired.
New in Robot Framework 2.8.2.
"""
logger.console(message, newline=is_falsy(no_newline), stream=stream)
@run_keyword_variant(resolve=0)
def comment(self, *messages):
"""Displays the given messages in the log file as keyword arguments.
This keyword does nothing with the arguments it receives, but as they
are visible in the log, this keyword can be used to display simple
messages. Given arguments are ignored so thoroughly that they can even
contain non-existing variables. If you are interested about variable
values, you can use the `Log` or `Log Many` keywords.
"""
pass
def set_log_level(self, level):
"""Sets the log threshold to the specified level and returns the old level.
Messages below the level will not logged. The default logging level is
INFO, but it can be overridden with the command line option
``--loglevel``.
The available levels: TRACE, DEBUG, INFO (default), WARN, ERROR and NONE (no
logging).
"""
try:
old = self._context.output.set_log_level(level)
except DataError as err:
raise RuntimeError(unic(err))
self._namespace.variables.set_global('${LOG_LEVEL}', level.upper())
self.log('Log level changed from %s to %s' % (old, level.upper()))
return old
def reload_library(self, name_or_instance):
"""Rechecks what keywords the specified library provides.
Can be called explicitly in the test data or by a library itself
when keywords it provides have changed.
The library can be specified by its name or as the active instance of
the library. The latter is especially useful if the library itself
calls this keyword as a method.
New in Robot Framework 2.9.
"""
library = self._namespace.reload_library(name_or_instance)
self.log('Reloaded library %s with %s keywords.' % (library.name,
len(library)))
@run_keyword_variant(resolve=0)
def import_library(self, name, *args):
"""Imports a library with the given name and optional arguments.
This functionality allows dynamic importing of libraries while tests
are running. That may be necessary, if the library itself is dynamic
and not yet available when test data is processed. In a normal case,
libraries should be imported using the Library setting in the Setting
table.
This keyword supports importing libraries both using library
names and physical paths. When paths are used, they must be
given in absolute format. Forward slashes can be used as path
separators in all operating systems.
It is possible to pass arguments to the imported library and also
named argument syntax works if the library supports it. ``WITH NAME``
syntax can be used to give a custom name to the imported library.
Examples:
| Import Library | MyLibrary |
| Import Library | ${CURDIR}/../Library.py | arg1 | named=arg2 |
| Import Library | ${LIBRARIES}/Lib.java | arg | WITH NAME | JavaLib |
"""
try:
self._namespace.import_library(name, list(args))
except DataError as err:
raise RuntimeError(unic(err))
@run_keyword_variant(resolve=0)
def import_variables(self, path, *args):
"""Imports a variable file with the given path and optional arguments.
Variables imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Variables
setting. These variables override possible existing variables with
the same names. This functionality can thus be used to import new
variables, for example, for each test in a test suite.
The given path must be absolute. Forward slashes can be used as path
separator regardless the operating system.
Examples:
| Import Variables | ${CURDIR}/variables.py | | |
| Import Variables | ${CURDIR}/../vars/env.py | arg1 | arg2 |
"""
try:
self._namespace.import_variables(path, list(args), overwrite=True)
except DataError as err:
raise RuntimeError(unic(err))
@run_keyword_variant(resolve=0)
def import_resource(self, path):
"""Imports a resource file with the given path.
Resources imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Resource
setting.
The given path must be absolute. Forward slashes can be used as path
separator regardless the operating system.
Examples:
| Import Resource | ${CURDIR}/resource.txt |
| Import Resource | ${CURDIR}/../resources/resource.html |
"""
try:
self._namespace.import_resource(path)
except DataError as err:
raise RuntimeError(unic(err))
def set_library_search_order(self, *search_order):
"""Sets the resolution order to use when a name matches multiple keywords.
The library search order is used to resolve conflicts when a keyword
name in the test data matches multiple keywords. The first library
(or resource, see below) containing the keyword is selected and that
keyword implementation used. If the keyword is not found from any library
(or resource), test executing fails the same way as when the search
order is not set.
When this keyword is used, there is no need to use the long
``LibraryName.Keyword Name`` notation. For example, instead of
having
| MyLibrary.Keyword | arg |
| MyLibrary.Another Keyword |
| MyLibrary.Keyword | xxx |
you can have
| Set Library Search Order | MyLibrary |
| Keyword | arg |
| Another Keyword |
| Keyword | xxx |
This keyword can be used also to set the order of keywords in different
resource files. In this case resource names must be given without paths
or extensions like:
| Set Library Search Order | resource | another_resource |
*NOTE:*
- The search order is valid only in the suite where this keywords is used.
- Keywords in resources always have higher priority than
keywords in libraries regardless the search order.
- The old order is returned and can be used to reset the search order later.
- Library and resource names in the search order are both case and space
insensitive.
"""
return self._namespace.set_search_order(search_order)
def keyword_should_exist(self, name, msg=None):
"""Fails unless the given keyword exists in the current scope.
Fails also if there are more than one keywords with the same name.
Works both with the short name (e.g. ``Log``) and the full name
(e.g. ``BuiltIn.Log``).
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Exist`.
"""
try:
handler = self._namespace.get_handler(name)
if isinstance(handler, UserErrorHandler):
handler.run()
except DataError as err:
raise AssertionError(msg or unic(err))
def get_time(self, format='timestamp', time_='NOW'):
"""Returns the given time in the requested format.
*NOTE:* DateTime library added in Robot Framework 2.8.5 contains
much more flexible keywords for getting the current date and time
and for date and time handling in general.
How time is returned is determined based on the given ``format``
string as follows. Note that all checks are case-insensitive.
1) If ``format`` contains the word ``epoch``, the time is returned
in seconds after the UNIX epoch (1970-01-01 00:00:00 UTC).
The return value is always an integer.
2) If ``format`` contains any of the words ``year``, ``month``,
``day``, ``hour``, ``min``, or ``sec``, only the selected parts are
returned. The order of the returned parts is always the one
in the previous sentence and the order of words in ``format``
is not significant. The parts are returned as zero-padded
strings (e.g. May -> ``05``).
3) Otherwise (and by default) the time is returned as a
timestamp string in the format ``2006-02-24 15:08:31``.
By default this keyword returns the current local time, but
that can be altered using ``time`` argument as explained below.
Note that all checks involving strings are case-insensitive.
1) If ``time`` is a number, or a string that can be converted to
a number, it is interpreted as seconds since the UNIX epoch.
This documentation was originally written about 1177654467
seconds after the epoch.
2) If ``time`` is a timestamp, that time will be used. Valid
timestamp formats are ``YYYY-MM-DD hh:mm:ss`` and
``YYYYMMDD hhmmss``.
3) If ``time`` is equal to ``NOW`` (default), the current local
time is used. This time is got using Python's ``time.time()``
function.
4) If ``time`` is equal to ``UTC``, the current time in
[http://en.wikipedia.org/wiki/Coordinated_Universal_Time|UTC]
is used. This time is got using ``time.time() + time.altzone``
in Python.
5) If ``time`` is in the format like ``NOW - 1 day`` or ``UTC + 1 hour
30 min``, the current local/UTC time plus/minus the time
specified with the time string is used. The time string format
is described in an appendix of Robot Framework User Guide.
Examples (expecting the current local time is 2006-03-29 15:06:21):
| ${time} = | Get Time | | | |
| ${secs} = | Get Time | epoch | | |
| ${year} = | Get Time | return year | | |
| ${yyyy} | ${mm} | ${dd} = | Get Time | year,month,day |
| @{time} = | Get Time | year month day hour min sec | | |
| ${y} | ${s} = | Get Time | seconds and year | |
=>
| ${time} = '2006-03-29 15:06:21'
| ${secs} = 1143637581
| ${year} = '2006'
| ${yyyy} = '2006', ${mm} = '03', ${dd} = '29'
| @{time} = ['2006', '03', '29', '15', '06', '21']
| ${y} = '2006'
| ${s} = '21'
Examples (expecting the current local time is 2006-03-29 15:06:21 and
UTC time is 2006-03-29 12:06:21):
| ${time} = | Get Time | | 1177654467 | # Time given as epoch seconds |
| ${secs} = | Get Time | sec | 2007-04-27 09:14:27 | # Time given as a timestamp |
| ${year} = | Get Time | year | NOW | # The local time of execution |
| @{time} = | Get Time | hour min sec | NOW + 1h 2min 3s | # 1h 2min 3s added to the local time |
| @{utc} = | Get Time | hour min sec | UTC | # The UTC time of execution |
| ${hour} = | Get Time | hour | UTC - 1 hour | # 1h subtracted from the UTC time |
=>
| ${time} = '2007-04-27 09:14:27'
| ${secs} = 27
| ${year} = '2006'
| @{time} = ['16', '08', '24']
| @{utc} = ['12', '06', '21']
| ${hour} = '11'
Support for UTC time was added in Robot Framework 2.7.5 but it did not
work correctly until 2.7.7.
"""
return get_time(format, parse_time(time_))
def evaluate(self, expression, modules=None, namespace=None):
"""Evaluates the given expression in Python and returns the results.
``expression`` is evaluated in Python as explained in `Evaluating
expressions`.
``modules`` argument can be used to specify a comma separated
list of Python modules to be imported and added to the evaluation
namespace.
``namespace`` argument can be used to pass a custom evaluation
namespace as a dictionary. Possible ``modules`` are added to this
namespace. This is a new feature in Robot Framework 2.8.4.
Variables used like ``${variable}`` are replaced in the expression
before evaluation. Variables are also available in the evaluation
namespace and can be accessed using special syntax ``$variable``.
This is a new feature in Robot Framework 2.9 and it is explained more
thoroughly in `Evaluating expressions`.
Examples (expecting ``${result}`` is 3.14):
| ${status} = | Evaluate | 0 < ${result} < 10 | # Would also work with string '3.14' |
| ${status} = | Evaluate | 0 < $result < 10 | # Using variable itself, not string representation |
| ${random} = | Evaluate | random.randint(0, sys.maxint) | modules=random, sys |
| ${ns} = | Create Dictionary | x=${4} | y=${2} |
| ${result} = | Evaluate | x*10 + y | namespace=${ns} |
=>
| ${status} = True
| ${random} = <random integer>
| ${result} = 42
"""
variables = self._variables.as_dict(decoration=False)
expression = self._handle_variables_in_expression(expression, variables)
namespace = self._create_evaluation_namespace(namespace, modules)
variables = self._decorate_variables_for_evaluation(variables)
try:
if not is_string(expression):
raise TypeError("Expression must be string, got %s."
% type_name(expression))
if not expression:
raise ValueError("Expression cannot be empty.")
return eval(expression, namespace, variables)
except:
raise RuntimeError("Evaluating expression '%s' failed: %s"
% (expression, get_error_message()))
def _handle_variables_in_expression(self, expression, variables):
if not is_string(expression):
return expression
tokens = []
variable_started = seen_variable = False
generated = generate_tokens(StringIO(expression).readline)
for toknum, tokval, _, _, _ in generated:
if variable_started:
if toknum == token.NAME:
if tokval not in variables:
variable_not_found('$%s' % tokval, variables,
deco_braces=False)
tokval = 'RF_VAR_' + tokval
seen_variable = True
else:
tokens.append((token.ERRORTOKEN, '$'))
variable_started = False
if toknum == token.ERRORTOKEN and tokval == '$':
variable_started = True
else:
tokens.append((toknum, tokval))
if seen_variable:
return untokenize(tokens).strip()
return expression
def _create_evaluation_namespace(self, namespace, modules):
namespace = dict(namespace or {})
modules = modules.replace(' ', '').split(',') if modules else []
namespace.update((m, __import__(m)) for m in modules if m)
return namespace
def _decorate_variables_for_evaluation(self, variables):
decorated = [('RF_VAR_' + name, value)
for name, value in variables.items()]
return NormalizedDict(decorated, ignore='_')
def call_method(self, object, method_name, *args, **kwargs):
"""Calls the named method of the given object with the provided arguments.
The possible return value from the method is returned and can be
assigned to a variable. Keyword fails both if the object does not have
a method with the given name or if executing the method raises an
exception.
Support for ``**kwargs`` is new in Robot Framework 2.9. Since that
possible equal signs in other arguments must be escaped with a
backslash like ``\\=``.
Examples:
| Call Method | ${hashtable} | put | myname | myvalue |
| ${isempty} = | Call Method | ${hashtable} | isEmpty | |
| Should Not Be True | ${isempty} | | | |
| ${value} = | Call Method | ${hashtable} | get | myname |
| Should Be Equal | ${value} | myvalue | | |
| Call Method | ${object} | kwargs | name=value | foo=bar |
| Call Method | ${object} | positional | escaped\\=equals |
"""
try:
method = getattr(object, method_name)
except AttributeError:
raise RuntimeError("Object '%s' does not have method '%s'."
% (object, method_name))
try:
return method(*args, **kwargs)
except:
raise RuntimeError("Calling method '%s' failed: %s"
% (method_name, get_error_message()))
def regexp_escape(self, *patterns):
"""Returns each argument string escaped for use as a regular expression.
This keyword can be used to escape strings to be used with
`Should Match Regexp` and `Should Not Match Regexp` keywords.
Escaping is done with Python's ``re.escape()`` function.
Examples:
| ${escaped} = | Regexp Escape | ${original} |
| @{strings} = | Regexp Escape | @{strings} |
"""
if len(patterns) == 0:
return ''
if len(patterns) == 1:
return re.escape(patterns[0])
return [re.escape(p) for p in patterns]
def set_test_message(self, message, append=False):
"""Sets message for the current test case.
If the optional ``append`` argument is given a true value (see `Boolean
arguments`), the given ``message`` is added after the possible earlier
message by joining the messages with a space.
In test teardown this keyword can alter the possible failure message,
but otherwise failures override messages set by this keyword. Notice
that in teardown the initial message is available as a built-in variable
``${TEST MESSAGE}``.
It is possible to use HTML format in the message by starting the message
with ``*HTML*``.
Examples:
| Set Test Message | My message | |
| Set Test Message | is continued. | append=yes |
| Should Be Equal | ${TEST MESSAGE} | My message is continued. |
| Set Test Message | `*`HTML`*` <b>Hello!</b> | |
This keyword can not be used in suite setup or suite teardown.
Support for ``append`` was added in Robot Framework 2.7.7 and support
for HTML format in 2.8.
"""
test = self._namespace.test
if not test:
raise RuntimeError("'Set Test Message' keyword cannot be used in "
"suite setup or teardown.")
test.message = self._get_possibly_appended_value(test.message, message,
append)
message, level = self._get_logged_test_message_and_level(test.message)
self.log('Set test message to:\n%s' % message, level)
def _get_possibly_appended_value(self, initial, new, append):
if not is_unicode(new):
new = unic(new)
if is_truthy(append) and initial:
return '%s %s' % (initial, new)
return new
def _get_logged_test_message_and_level(self, message):
if message.startswith('*HTML*'):
return message[6:].lstrip(), 'HTML'
return message, 'INFO'
def set_test_documentation(self, doc, append=False):
"""Sets documentation for the current test case.
By default the possible existing documentation is overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
The current test documentation is available as a built-in variable
``${TEST DOCUMENTATION}``. This keyword can not be used in suite
setup or suite teardown.
New in Robot Framework 2.7. Support for ``append`` was added in 2.7.7.
"""
test = self._namespace.test
if not test:
raise RuntimeError("'Set Test Documentation' keyword cannot be "
"used in suite setup or teardown.")
test.doc = self._get_possibly_appended_value(test.doc, doc, append)
self._variables.set_test('${TEST_DOCUMENTATION}', test.doc)
self.log('Set test documentation to:\n%s' % test.doc)
def set_suite_documentation(self, doc, append=False, top=False):
"""Sets documentation for the current test suite.
By default the possible existing documentation is overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
This keyword sets the documentation of the current suite by default.
If the optional ``top`` argument is given a true value (see `Boolean
arguments`), the documentation of the top level suite is altered
instead.
The documentation of the current suite is available as a built-in
variable ``${SUITE DOCUMENTATION}``.
New in Robot Framework 2.7. Support for ``append`` and ``top`` were
added in 2.7.7.
"""
top = is_truthy(top)
suite = self._get_namespace(top).suite
suite.doc = self._get_possibly_appended_value(suite.doc, doc, append)
self._variables.set_suite('${SUITE_DOCUMENTATION}', suite.doc, top)
self.log('Set suite documentation to:\n%s' % suite.doc)
def set_suite_metadata(self, name, value, append=False, top=False):
"""Sets metadata for the current test suite.
By default possible existing metadata values are overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
This keyword sets the metadata of the current suite by default.
If the optional ``top`` argument is given a true value (see `Boolean
arguments`), the metadata of the top level suite is altered instead.
The metadata of the current suite is available as a built-in variable
``${SUITE METADATA}`` in a Python dictionary. Notice that modifying this
variable directly has no effect on the actual metadata the suite has.
New in Robot Framework 2.7.4. Support for ``append`` and ``top`` were
added in 2.7.7.
"""
top = is_truthy(top)
if not is_unicode(name):
name = unic(name)
metadata = self._get_namespace(top).suite.metadata
original = metadata.get(name, '')
metadata[name] = self._get_possibly_appended_value(original, value, append)
self._variables.set_suite('${SUITE_METADATA}', metadata.copy(), top)
self.log("Set suite metadata '%s' to value '%s'." % (name, metadata[name]))
def set_tags(self, *tags):
"""Adds given ``tags`` for the current test or all tests in a suite.
When this keyword is used inside a test case, that test gets
the specified tags and other tests are not affected.
If this keyword is used in a suite setup, all test cases in
that suite, recursively, gets the given tags. It is a failure
to use this keyword in a suite teardown.
The current tags are available as a built-in variable ``@{TEST TAGS}``.
See `Remove Tags` if you want to remove certain tags and `Fail` if
you want to fail the test case after setting and/or removing tags.
"""
ctx = self._context
if ctx.test:
ctx.test.tags.add(tags)
ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags))
elif not ctx.in_suite_teardown:
ctx.suite.set_tags(tags, persist=True)
else:
raise RuntimeError("'Set Tags' cannot be used in suite teardown.")
self.log('Set tag%s %s.' % (s(tags), seq2str(tags)))
def remove_tags(self, *tags):
"""Removes given ``tags`` from the current test or all tests in a suite.
Tags can be given exactly or using a pattern where ``*`` matches
anything and ``?`` matches one character.
This keyword can affect either one test case or all test cases in a
test suite similarly as `Set Tags` keyword.
The current tags are available as a built-in variable ``@{TEST TAGS}``.
Example:
| Remove Tags | mytag | something-* | ?ython |
See `Set Tags` if you want to add certain tags and `Fail` if you want
to fail the test case after setting and/or removing tags.
"""
ctx = self._context
if ctx.test:
ctx.test.tags.remove(tags)
ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags))
elif not ctx.in_suite_teardown:
ctx.suite.set_tags(remove=tags, persist=True)
else:
raise RuntimeError("'Remove Tags' cannot be used in suite teardown.")
self.log('Removed tag%s %s.' % (s(tags), seq2str(tags)))
def get_library_instance(self, name=None, all=False):
"""Returns the currently active instance of the specified test library.
This keyword makes it easy for test libraries to interact with
other test libraries that have state. This is illustrated by
the Python example below:
| from robot.libraries.BuiltIn import BuiltIn
|
| def title_should_start_with(expected):
| seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary')
| title = seleniumlib.get_title()
| if not title.startswith(expected):
| raise AssertionError("Title '%s' did not start with '%s'"
| % (title, expected))
It is also possible to use this keyword in the test data and
pass the returned library instance to another keyword. If a
library is imported with a custom name, the ``name`` used to get
the instance must be that name and not the original library name.
If the optional argument ``all`` is given a true value, then a
dictionary mapping all library names to instances will be returned.
This feature is new in Robot Framework 2.9.2.
Example:
| &{all libs} = | Get library instance | all=True |
"""
if is_truthy(all):
return self._namespace.get_library_instances()
try:
return self._namespace.get_library_instance(name)
except DataError as err:
raise RuntimeError(unic(err))
class BuiltIn(_Verify, _Converter, _Variables, _RunKeyword, _Control, _Misc):
"""An always available standard library with often needed keywords.
``BuiltIn`` is Robot Framework's standard library that provides a set
of generic keywords needed often. It is imported automatically and
thus always available. The provided keywords can be used, for example,
for verifications (e.g. `Should Be Equal`, `Should Contain`),
conversions (e.g. `Convert To Integer`) and for various other purposes
(e.g. `Log`, `Sleep`, `Run Keyword If`, `Set Global Variable`).
== Table of contents ==
- `HTML error messages`
- `Evaluating expressions`
- `Boolean arguments`
- `Multiline string comparisons`
- `Shortcuts`
- `Keywords`
= HTML error messages =
Many of the keywords accept an optional error message to use if the keyword
fails. Starting from Robot Framework 2.8, it is possible to use HTML in
these messages by prefixing them with ``*HTML*``. See `Fail` keyword for
a usage example. Notice that using HTML in messages is not limited to
BuiltIn library but works with any error message.
= Evaluating expressions =
Many keywords, such as `Evaluate`, `Run Keyword If` and `Should Be True`,
accept an expression that is evaluated in Python. These expressions are
evaluated using Python's
[https://docs.python.org/2/library/functions.html#eval|eval] function so
that all Python built-ins like ``len()`` and ``int()`` are available.
`Evaluate` allows configuring the execution namespace with custom modules,
and other keywords have [https://docs.python.org/2/library/os.html|os]
and [https://docs.python.org/2/library/sys.html|sys] modules available
automatically.
Examples:
| `Run Keyword If` | os.sep == '/' | Log | Not on Windows |
| ${random int} = | `Evaluate` | random.randint(0, 5) | modules=random |
When a variable is used in the expressing using the normal ``${variable}``
syntax, its value is replaces before the expression is evaluated. This
means that the value used in the expression will be the string
representation of the variable value, not the variable value itself.
This is not a problem with numbers and other objects that have a string
representation that can be evaluated directly, but with other objects
the behavior depends on the string representation. Most importantly,
strings must always be quoted, and if they can contain newlines, they must
be triple quoted.
Examples:
| `Should Be True` | ${rc} < 10 | Return code greater than 10 |
| `Run Keyword If` | '${status}' == 'PASS' | Log | Passed |
| `Run Keyword If` | 'FAIL' in '''${output}''' | Log | Output contains FAIL |
Starting from Robot Framework 2.9, variables themselves are automatically
available in the evaluation namespace. They can be accessed using special
variable syntax without the curly braces like ``$variable``. These
variables should never be quoted, and in fact they are not even replaced
inside strings.
Examples:
| `Should Be True` | $rc < 10 | Return code greater than 10 |
| `Run Keyword If` | $status == 'PASS' | `Log` | Passed |
| `Run Keyword If` | 'FAIL' in $output | `Log` | Output contains FAIL |
| `Should Be True` | len($result) > 1 and $result[1] == 'OK' |
Notice that instead of creating complicated expressions, it is often better
to move the logic into a test library.
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values true or
false. If such an argument is given as a string, it is considered false if
it is either empty or case-insensitively equal to ``false`` or ``no``.
Keywords verifying something that allow dropping actual and expected values
from the possible error message also consider string ``no values`` as false.
Other strings are considered true regardless their value, and other
argument types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
True examples:
| `Should Be Equal` | ${x} | ${y} | Custom error | values=True | # Strings are generally true. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=yes | # Same as the above. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${TRUE} | # Python ``True`` is true. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${42} | # Numbers other than 0 are true. |
False examples:
| `Should Be Equal` | ${x} | ${y} | Custom error | values=False | # String ``false`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=no | # Also string ``no`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${EMPTY} | # Empty string is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${FALSE} | # Python ``False`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument |
Note that prior to Robot Framework 2.9 some keywords considered all
non-empty strings, including ``false`` and ``no``, to be true.
= Multiline string comparisons =
`Should Be Equal` and `Should Be Equal As Strings` report the failures using
[https://en.wikipedia.org/wiki/Diff_utility#Unified_format|unified diff
format] if both strings have more than two lines. New in Robot Framework
2.9.1.
Example:
| ${first} = | `Catenate` | SEPARATOR=\\n | Not in second | Same | Differs | Same |
| ${second} = | `Catenate` | SEPARATOR=\\n | Same | Differs2 | Same | Not in first |
| `Should Be Equal` | ${first} | ${second} |
Results in the following error message:
| Multiline strings are different:
| --- first
| +++ second
| @@ -1,4 +1,4 @@
| -Not in second
| Same
| -Differs
| +Differs2
| Same
| +Not in first
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
class RobotNotRunningError(AttributeError):
"""Used when something cannot be done because Robot is not running.
Based on AttributeError to be backwards compatible with RF < 2.8.5.
May later be based directly on Exception, so new code should except
this exception explicitly.
"""
pass
def register_run_keyword(library, keyword, args_to_process=None):
"""Registers 'run keyword' so that its arguments can be handled correctly.
1) Why is this method needed
Keywords running other keywords internally (normally using `Run Keyword`
or some variants of it in BuiltIn) must have the arguments meant to the
internally executed keyword handled specially to prevent processing them
twice. This is done ONLY for keywords registered using this method.
If the register keyword has same name as any keyword from Robot Framework
standard libraries, it can be used without getting warnings. Normally
there is a warning in such cases unless the keyword is used in long
format (e.g. MyLib.Keyword).
Keywords executed by registered run keywords can be tested in dry-run mode
if they have 'name' argument which takes the name of the executed keyword.
2) How to use this method
`library` is the name of the library where the registered keyword is
implemented.
`keyword` can be either a function or method implementing the
keyword, or name of the implemented keyword as a string.
`args_to_process` is needed when `keyword` is given as a string, and it
defines how many of the arguments to the registered keyword must be
processed normally. When `keyword` is a method or function, this
information is got directly from it so that varargs (those specified with
syntax '*args') are not processed but others are.
3) Examples
from robot.libraries.BuiltIn import BuiltIn, register_run_keyword
def my_run_keyword(name, *args):
# do something
return BuiltIn().run_keyword(name, *args)
# Either one of these works
register_run_keyword(__name__, my_run_keyword)
register_run_keyword(__name__, 'My Run Keyword', 1)
-------------
from robot.libraries.BuiltIn import BuiltIn, register_run_keyword
class MyLibrary:
def my_run_keyword_if(self, expression, name, *args):
# do something
return BuiltIn().run_keyword_if(expression, name, *args)
# Either one of these works
register_run_keyword('MyLibrary', MyLibrary.my_run_keyword_if)
register_run_keyword('MyLibrary', 'my_run_keyword_if', 2)
"""
RUN_KW_REGISTER.register_run_keyword(library, keyword, args_to_process)
|
the-stack_106_17589
|
r"""
.. _ref_deflection_of_a_hinged_support:
Deflection of a Hinged Support
------------------------------
Problem Description:
- A structure consisting of two equal steel bars, each of length :math:`l`
and cross-sectional area :math:`A`, with hinged ends is subjected to
the action of a load :math:`F`. Determine the stress, :math:`\sigma`,
in the bars and the deflection, :math:`\delta`, of point 2. Neglect
the weight of the bars as a small quantity in comparison with the load
:math:`F`.
Reference:
- S. Timoshenko, Strength of Materials, Part I, Elementary Theory and
Problems, 3rd Edition, D. Van Nostrand Co., Inc., New York, NY, 1955,
pg. 10, problem 2.
Analysis Type(s):
- Static Analysis ``ANTYPE=0``
Element Type(s):
- 3-D Spar (or Truss) Elements (LINK180)
.. image:: ../../../_static/vm4_setup.png
:width: 400
:alt: VM4 Problem Sketch
Material Properties
- :math:`E = 30 \cdot 10^6 psi`
Geometric Properties:
- :math:`l = 15 ft`
- :math:`A = 0.5 in^2`
- :math:`\Theta = 30 ^\circ`
Loading:
- :math:`F = 5000 lb`
Analytical Equations:
- The tensile force in the bars is :math:`S`
- :math:`S = \frac{P}{2 sin \Theta}`
- The necessary cross-sectional area :math:`A` is
- :math:`A = \frac{S}{\sigma}`
- The elongation of the bar :math:`AB` is
- :math:`B_1 D = \frac{\sigma l}{E}`
- The deflection :math:`BB_1` is
- :math:`BB_1 = \frac{B_1 D}{sin \Theta}`
Notes:
- Consistent length units are used. The dimensions :math:`a` and :math:`b` are
calculated parametrically in the input as follows:
- :math:`a = 2 l cos \Theta`,
- :math:`b = l sin \Theta`.
"""
# sphinx_gallery_thumbnail_path = '_static/vm4_setup.png'
###############################################################################
# Start MAPDL
# ~~~~~~~~~~~
from math import cos, pi, sin
from ansys.mapdl.core import launch_mapdl
# start mapdl and clear it
mapdl = launch_mapdl()
mapdl.clear() # optional as MAPDL just started
# enter verification example mode and the pre-processing routine
mapdl.verify()
mapdl.prep7()
###############################################################################
# Define Material
# ~~~~~~~~~~~~~~~
# Create a simple hinge geometry.
# We use the `LINK180` element type to model this and an elastic modulus
# of 30e6.
# We store the x-coordinate of node 3 and the y-coordinate of node 2 for
# ease of use later on.
length_bar = 15 * 12
theta = 30
theta_rad = theta * pi / 180.0
node3_x = 2 * length_bar * cos(theta_rad)
node2_y = length_bar * sin(theta_rad)
mapdl.et(1, "LINK180")
mapdl.sectype(1, "LINK")
mapdl.secdata(0.5)
mapdl.mp("EX", 1, 30e6)
###############################################################################
# Define Geometry
# ~~~~~~~~~~~~~~~
# We create three nodes in an isosceles triangle shape, with elements
# along the equal sides, forming a hinge.
n1 = mapdl.n(1, 0, 0, 0)
n2 = mapdl.n(2, node3_x * 0.5, -node2_y, 0)
n3 = mapdl.n(3, node3_x, 0, 0)
mapdl.e(n1, n2)
mapdl.e(n2, n3)
mapdl.eplot(show_node_numbering=True, line_width=5, cpos="xy")
###############################################################################
# Define Boundary Conditions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# - Fix nodes 1 and 3 in place
# - Apply a force of -5000 in the negative y-direction to node 2
# - Then finish the prep7 section
mapdl.d(1, "ALL", "", "", 3, 2)
mapdl.f(2, "FY", -5000)
mapdl.finish()
###############################################################################
# Solve
# ~~~~~
# Enter solution mode and solve the system.
mapdl.run("/SOLU")
out = mapdl.solve()
mapdl.finish()
###############################################################################
# Post-processing
# ~~~~~~~~~~~~~~~
# Enter post-processing, get the results and view the nodal displacement
# as well as the equivalent stress on the nodes.
#
# We make the line width larger for ease of visualization as well as
# using two perceptually linear colormaps to enhance display of the
# data.
mapdl.post1()
mapdl.post_processing.plot_nodal_displacement(
"Y",
cmap="magma",
line_width=5,
cpos="xy",
scalar_bar_args={"title": "Displacement", "vertical": False},
)
###############################################################################
# Principal nodal stress
# ~~~~~~~~~~~~~~~~~~~~~~
# Use the ``post_processing`` attribute to get the principal nodal
# stress as an array.
#
# .. note::
# This returns the same data as :func:`prnsol
# <ansys.mapdl.core.Mapdl.prnsol>`, except instead of returning
# text, it returns a numpy array.
seqv = mapdl.post_processing.nodal_eqv_stress()
# print out the nodes
for i, nnum in enumerate(mapdl.mesh.nnum):
print(f"Node {nnum} : {seqv[i]} psi")
# Which is identical to:
# print(mapdl.prnsol('S', 'PRIN'))
###############################################################################
# Check Results
# ~~~~~~~~~~~~~
# Now that we have the results we can compare the nodal displacement and
# stress experienced by node 2 to the known quantities 10000 psi and
# -0.12 inches. To do this we:
#
# - Find the mid-node from the coordinates using the :class:`Query
# <ansys.mapdl.core.inline_functions.Query>` class
# - Get the y-displacement from node 2
# - Get the element nearest to node 2
# - Get the stress on this element
# - Compare
q = mapdl.queries
mid_node = q.node(node3_x * 0.5, -node2_y, 0)
displacement = mapdl.get_value("NODE", mid_node, "U", "Y")
left_element = q.enearn(mid_node)
mapdl.etable("STRS", "LS", 1)
stress = mapdl.get_value("ELEM", left_element, "ETAB", "STRS")
results = f"""
--------------------- RESULTS COMPARISON -----------------------
| TARGET | TARGET | Mechanical APDL | RATIO
------------------------------------------------------------------
Stress [psi] 10000 {stress} {stress/10000:.2f}
Displacement [in] -0.12 {displacement:.2f} {abs(displacement) / 0.12:.2f}
------------------------------------------------------------------
"""
print(results)
###############################################################################
# stop mapdl
mapdl.exit()
|
the-stack_106_17591
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements a EnergyModel abstract class and some basic
implementations. Basically, an EnergyModel is any model that returns an
"energy" for any given structure.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "11/19/13"
import abc
import six
from monty.json import MSONable
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class EnergyModel(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract structure filter class.
"""
@abc.abstractmethod
def get_energy(self, structure):
"""
Returns a boolean for any structure. Structures that return true are
kept in the Transmuter object during filtering.
"""
return
@classmethod
def from_dict(cls, d):
return cls(**d['init_args'])
class EwaldElectrostaticModel(EnergyModel):
"""
Wrapper around EwaldSum to calculate the electrostatic energy.
"""
def __init__(self, real_space_cut=None, recip_space_cut=None,
eta=None, acc_factor=8.0):
"""
Initializes the model. Args have the same definitions as in
:class:`pymatgen.analysis.ewald.EwaldSummation`.
Args:
real_space_cut (float): Real space cutoff radius dictating how
many terms are used in the real space sum. Defaults to None,
which means determine automagically using the formula given
in gulp 3.1 documentation.
recip_space_cut (float): Reciprocal space cutoff radius.
Defaults to None, which means determine automagically using
the formula given in gulp 3.1 documentation.
eta (float): Screening parameter. Defaults to None, which means
determine automatically.
acc_factor (float): No. of significant figures each sum is
converged to.
"""
self.real_space_cut = real_space_cut
self.recip_space_cut = recip_space_cut
self.eta = eta
self.acc_factor = acc_factor
def get_energy(self, structure):
e = EwaldSummation(structure, real_space_cut=self.real_space_cut,
recip_space_cut=self.recip_space_cut,
eta=self.eta,
acc_factor=self.acc_factor)
return e.total_energy
def as_dict(self):
return {"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"real_space_cut": self.real_space_cut,
"recip_space_cut": self.recip_space_cut,
"eta": self.eta,
"acc_factor": self.acc_factor}}
class SymmetryModel(EnergyModel):
"""
Sets the energy to the -ve of the spacegroup number. Higher symmetry =>
lower "energy".
Args have same meaning as in
:class:`pymatgen.symmetry.finder.SpacegroupAnalyzer`.
Args:
symprec (float): Symmetry tolerance. Defaults to 0.1.
angle_tolerance (float): Tolerance for angles. Defaults to 5 degrees.
"""
def __init__(self, symprec=0.1, angle_tolerance=5):
self.symprec = symprec
self.angle_tolerance = angle_tolerance
def get_energy(self, structure):
f = SpacegroupAnalyzer(structure, symprec=self.symprec,
angle_tolerance=self.angle_tolerance)
return -f.get_space_group_number()
def as_dict(self):
return {"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"symprec": self.symprec,
"angle_tolerance": self.angle_tolerance}}
class IsingModel(EnergyModel):
"""
A very simple Ising model, with r^2 decay.
Args:
j (float): The interaction parameter. E = J * spin1 * spin2.
radius (float): max_radius for the interaction.
"""
def __init__(self, j, max_radius):
self.j = j
self.max_radius = max_radius
def get_energy(self, structure):
all_nn = structure.get_all_neighbors(r=self.max_radius)
energy = 0
for i, nn in enumerate(all_nn):
s1 = getattr(structure[i].specie, "spin", 0)
for site, dist in nn:
energy += self.j * s1 * getattr(site.specie, "spin",
0) / (dist ** 2)
return energy
def as_dict(self):
return {"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"j": self.j, "max_radius": self.max_radius}}
class NsitesModel(EnergyModel):
"""
Sets the energy to the number of sites. More sites => higher "energy".
Used to rank structures from smallest number of sites to largest number
of sites after enumeration.
"""
def get_energy(self, structure):
return len(structure)
def as_dict(self):
return {"version": __version__,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {}}
|
the-stack_106_17593
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 16 08:59:34 2020
@author: wantysal
"""
# Standard library imports
import numpy as np
from scipy.io import wavfile, loadmat
from scipy.signal import resample
# Optional package import
try:
import pyuff
except ImportError:
pyuff = None
def load(file, calib=1, mat_signal="", mat_fs=""):
"""Extract the signal and its time axis from .wav or .uff file,
resample the signal to 48 kHz, and affects its sampling frequency
and time signal values.
Parameters
----------
file : string
string path to the signal file
calib : float, optional
Amplification factor. Shall be equal to 1 if the imported signal
is already in Pascal [pa]. In some cases (while working with .wav
files, for instance) the original signal in Pa, of amplitude A, is
scaled to A/factor before being exported. When using the present
function with such file, the factor shall be defined as input parameter
(calib=factor) to be able to scale the signal back to Pascal.
mat_signal : string
in case of a .mat file, name of the signal variable
mat_fs : string
in case of a .mat file, name of the sampling frequency variable
Outputs
-------
signal : numpy.array
time signal values
fs : integer
sampling frequency
"""
# load the .wav file content
if file[-3:] == "wav" or file[-3:] == "WAV":
fs, signal = wavfile.read(file)
# manage multichannel files
if signal.ndim > 1:
signal = signal[:, 0]
print("[Info] Multichannel signal loaded. Keeping only first channel")
# calibration factor for the signal to be in Pa
if isinstance(signal[0], np.int16):
signal = calib * signal / (2 ** 15 - 1)
elif isinstance(signal[0], np.int32):
signal = calib * signal / (2 ** 31 - 1)
elif isinstance(signal[0], np.float):
signal = calib * signal
# load the .uff file content
elif file[-3:].lower() == "uff" or file[-3:].lower() == "unv":
data = uff_load(file)
# extract the signal values
signal = data["data"]
# calculate the sampling frequency
fs = int(1 / data["abscissa_inc"])
# load the .mat file content
elif file[-3:] == "mat":
matfile = loadmat(file)
# extract the signal values and sampling frequency
signal = matfile[mat_signal][:, 0]
fs = matfile[mat_fs]
fs = fs[:, 0]
# load the .txt file content
elif file[-3:] == "txt":
# extract the values
data = np.loadtxt(file)
signal = data[:,1]
time = data[:,0]
# calibration for for the signal to be in Pa
signal = signal * calib
# calculate sampling frequency
fs = 1/(time[1]-time[0])
else:
raise ValueError("""ERROR: only .wav .mat .uff or .txt files are supported""")
# resample to 48kHz to allow calculation
if fs != 48000:
signal = resample(signal, int(48000 * len(signal) / fs))
fs = 48000
print("[Info] Signal resampled to 48 kHz to allow calculation.")
return signal, fs
# def load2oct3(is_stationary, file, calib=1):
# """Load .wav signal and output its third-octave band spectrum
# Parameters
# ----------
# is_stationary: boolean
# True if the signal is stationary, False if it is time-varying
# file : string
# full path to the signal file
# calib : float
# calibration factor for the signal to be in [pa]
# Outputs
# -------
# spec : numpy.ndarray
# Third octave band spectrum of signal sig [dB re.2e-5 Pa]
# fpref : numpy.ndarray
# Corresponding preferred third octave band center frequencies
# """
# # Load the signal from its file
# signal, fs = load(is_stationary, file, calib)
# # Compute third-octave spectrum
# output = comp_third_spec(is_stationary, signal, fs)
# return output
# def load2wav(
# is_stationary, file, sampling_freq, calib=1, encodage=16, mat_signal="", mat_fs=""
# ):
# """Load .uff or .mat file and create the corresponding .wav audio file
# Parameters
# ----------
# is_stationary: boolean
# True if the signal is stationary, False if it is time-varying
# file : string
# full path to the signal file
# sampling_freq : integer
# sampling frequency of the created .wav file
# calib : float
# calibration factor for the signal to be in [pa]
# encodage : integer
# encodage of the signal, 16 for np.int16, 32 for np.int32
# mat_signal : string
# in case of a .mat file, name of the signal variable
# mat_fs : string
# in case of a .mat file, name of the sampling frequency variable
# Output
# ------
# None
# """
# # Load the .uff file content
# if file[-3:].lower() == "uff" or file[-3:].lower() == "unv":
# data = uff_load(file)
# # extract the signal values
# signal = data["data"]
# # calculate the sampling frequency
# fs = int(1 / data["abscissa_inc"])
# # Load the .mat file content
# elif file[-3:] == "mat":
# matfile = loadmat(file)
# # extract the signal values and sampling frequency
# signal = matfile[mat_signal][:, 0]
# fs = matfile[mat_fs]
# fs = fs[:, 0]
# else:
# raise ValueError("""ERROR: only .mat or .uff file are supported""")
# # Resample
# if fs != sampling_freq:
# signal = resample(signal, sampling_freq * int(len(signal) / fs))
# # calibration factor for the signal to be in Pa
# if encodage == 16:
# signal = signal * (2 ** 15 - 1) / calib
# signal = signal.astype(np.int16)
# elif encodage == 32:
# signal = signal * (2 ** 31 - 1) / calib
# signal = signal.astype(np.int32)
# # create the .wav file
# newfile = file[:-3] + "wav"
# wavfile.write(newfile, sampling_freq, signal)
def uff_load(file):
if pyuff is None:
raise RuntimeError(
"In order to load UFF files you need the 'pyuff' " "package."
)
uff_file = pyuff.UFF(file)
data = uff_file.read_sets()
return data
|
the-stack_106_17594
|
"""
1525
medium
number of good ways to split a string
"""
class Solution:
def numSplits(self, s: str) -> int:
from collections import Counter
left = Counter()
right = Counter(s)
total = 0
for c in s:
left[c] += 1
right[c] -= 1
if right[c] == 0:
del right[c]
if len(left) == len(right):
total += 1
return total
s = "aacaba"
sol = Solution()
print(sol.numSplits(s))
|
the-stack_106_17595
|
from django.core import mail
from django.test import TestCase
from bluebottle.initiatives.tests.factories import InitiativeFactory
from bluebottle.time_based.tests.factories import DateActivityFactory
from bluebottle.funding.tests.factories import FundingFactory
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.wallposts import MediaWallpostFactory, ReactionFactory
from bluebottle.follow.tests.factories import (
DateActivityFollowFactory, FundingFollowFactory
)
class InitiativeWallpostTestCase(TestCase):
def setUp(self):
self.initiative = InitiativeFactory.create()
self.follower = BlueBottleUserFactory.create()
DateActivityFollowFactory.create(
instance=DateActivityFactory.create(
status='open',
initiative=self.initiative
),
user=self.follower
)
FundingFollowFactory.create(
instance=FundingFactory.create(
status='open',
initiative=self.initiative
),
user=self.follower
)
DateActivityFollowFactory.create(
instance=DateActivityFactory.create(
status='open',
initiative=self.initiative
),
user=BlueBottleUserFactory(campaign_notifications=False),
)
DateActivityFollowFactory.create(
instance=DateActivityFactory.create(
status='open',
initiative=self.initiative
),
user=self.initiative.owner
)
def test_wallpost(self):
wallpost_user = BlueBottleUserFactory.create()
MediaWallpostFactory.create(
content_object=self.initiative,
author=wallpost_user,
email_followers=False
)
self.assertEqual(len(mail.outbox), 1)
owner_mail = mail.outbox[0]
self.assertEqual(
owner_mail.subject,
"You have a new post on '{}'".format(self.initiative.title)
)
def test_wallpost_owner(self):
MediaWallpostFactory.create(
content_object=self.initiative,
author=self.initiative.owner,
email_followers=True
)
self.assertEqual(len(mail.outbox), 1)
follow_mail = mail.outbox[0]
self.assertEqual(
follow_mail.subject,
"Update from '{}'".format(self.initiative.title)
)
self.assertTrue(
'{} posted an update to {}'.format(
self.initiative.owner.first_name,
self.initiative.title)
in follow_mail.body
)
def test_reaction(self):
reaction_user = BlueBottleUserFactory.create()
wallpost_user = BlueBottleUserFactory.create()
wallpost = MediaWallpostFactory.create(
content_object=self.initiative,
author=wallpost_user,
email_followers=True
)
mail.outbox = []
ReactionFactory.create(
wallpost=wallpost, author=reaction_user
)
self.assertEqual(len(mail.outbox), 2)
wallpost_owner_mail = mail.outbox[0]
self.assertEqual(
wallpost_owner_mail.subject,
"You have a new post on '{}'".format(self.initiative.title)
)
owner_mail = mail.outbox[1]
self.assertEqual(
owner_mail.subject,
"You have a new post on '{}'".format(self.initiative.title)
)
|
the-stack_106_17597
|
import struct
import numpy as np
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
}
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8,
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
index.write(b"TNTIDX\x00\x00")
index.write(struct.pack("<Q", 1))
index.write(struct.pack("<QQ", code(self.dtype), self.element_size))
index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
|
the-stack_106_17598
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch_glow
import unittest
class TestSetGlowBackend(unittest.TestCase):
def test_set_glow_backend(self):
"""Test setting the Glow backend type"""
backend_name_before = torch_glow.getGlowBackendName()
backend_num_devices_before = torch_glow.getGlowBackendNumDevices()
torch_glow.setGlowBackend("CPU")
torch_glow.setGlowBackendNumDevices(4)
assert(torch_glow.getGlowBackendName() == "CPU")
assert(torch_glow.getGlowBackendNumDevices() == 4)
# reset everything
torch_glow.setGlowBackend(backend_name_before)
torch_glow.setGlowBackendNumDevices(backend_num_devices_before)
|
the-stack_106_17599
|
# --- Day 23: Crab queue ---
# The small crab challenges you to a game! The crab is going to mix up some queue, and you have to predict where they'll end up.
# The queue will be arranged in a circle and labeled clockwise (your puzzle input). For example, if your labeling were 32415, there would be five queue in the circle; going clockwise around the circle from the first cup, the queue would be labeled 3, 2, 4, 1, 5, and then back to 3 again.
# Before the crab starts, it will designate the first cup in your list as the current cup. The crab is then going to do 100 moves.
# Each move, the crab does the following actions:
# The crab picks up the three queue that are immediately clockwise of the current cup. They are removed from the circle; cup spacing is adjusted as necessary to maintain the circle.
# The crab selects a destination cup: the cup with a label equal to the current cup's label minus one. If this would select one of the queue that was just picked up, the crab will keep subtracting one until it finds a cup that wasn't just picked up. If at any point in this process the value goes below the lowest value on any cup's label, it wraps around to the highest value on any cup's label instead.
# The crab places the queue it just picked up so that they are immediately clockwise of the destination cup. They keep the same order as when they were picked up.
# The crab selects a new current cup: the cup which is immediately clockwise of the current cup.
# For example, suppose your cup labeling were 389125467. If the crab were to do merely 10 moves, the following changes would occur:
# -- move 1 --
# queue: (3) 8 9 1 2 5 4 6 7
# pick up: 8, 9, 1
# destination: 2
# -- move 2 --
# queue: 3 (2) 8 9 1 5 4 6 7
# pick up: 8, 9, 1
# destination: 7
# -- move 3 --
# queue: 3 2 (5) 4 6 7 8 9 1
# pick up: 4, 6, 7
# destination: 3
# -- move 4 --
# queue: 7 2 5 (8) 9 1 3 4 6
# pick up: 9, 1, 3
# destination: 7
# -- move 5 --
# queue: 3 2 5 8 (4) 6 7 9 1
# pick up: 6, 7, 9
# destination: 3
# -- move 6 --
# queue: 9 2 5 8 4 (1) 3 6 7
# pick up: 3, 6, 7
# destination: 9
# -- move 7 --
# queue: 7 2 5 8 4 1 (9) 3 6
# pick up: 3, 6, 7
# destination: 8
# -- move 8 --
# queue: 8 3 6 7 4 1 9 (2) 5
# pick up: 5, 8, 3
# destination: 1
# -- move 9 --
# queue: 7 4 1 5 8 3 9 2 (6)
# pick up: 7, 4, 1
# destination: 5
# -- move 10 --
# queue: (5) 7 4 1 8 3 9 2 6
# pick up: 7, 4, 1
# destination: 3
# -- final --
# queue: 5 (8) 3 7 4 1 9 2 6
# In the above example, the queue' values are the labels as they appear moving clockwise around the circle; the current cup is marked with ( ).
# After the crab is done, what order will the queue be in? Starting after the cup labeled 1, collect the other queue' labels clockwise into a single string with no extra characters; each number except 1 should appear exactly once. In the above example, after 10 moves, the queue clockwise from 1 are labeled 9, 2, 6, 5, and so on, producing 92658374. If the crab were to complete all 100 moves, the order after cup 1 would be 67384529.
# Using your labeling, simulate 100 moves. What are the labels on the queue after cup 1?
# Your puzzle input is 362981754.
from collections import deque
data = [int(i) for i in "362981754"]
queue = deque(data)
for _ in range(100):
cup = queue[0]
dest = queue[0] - 1
if dest < 1:
dest += 9
queue.rotate(-1)
values = (queue.popleft(), queue.popleft(), queue.popleft())
while dest in values:
dest = dest - 1 if dest > 1 else dest + 8
while queue[0] != dest:
queue.rotate(-1)
queue.rotate(-1)
queue.append(values[0])
queue.append(values[1])
queue.append(values[2])
while queue[0] != cup:
queue.rotate(-1)
queue.rotate(-1)
while queue[0] != 1:
queue.rotate(-1)
queue.popleft()
ans = ''.join([str(i) for i in queue])
print("Part 1:", ans)
# --- Part Two ---
# Due to what you can only assume is a mistranslation (you're not exactly fluent in Crab), you are quite surprised when the crab starts arranging many cups in a circle on your raft - one million (1000000) in total.
# Your labeling is still correct for the first few cups; after that, the remaining cups are just numbered in an increasing fashion starting from the number after the highest number in your list and proceeding one by one until one million is reached. (For example, if your labeling were 54321, the cups would be numbered 5, 4, 3, 2, 1, and then start counting up from 6 until one million is reached.) In this way, every number from one through one million is used exactly once.
# After discovering where you made the mistake in translating Crab Numbers, you realize the small crab isn't going to do merely 100 moves; the crab is going to do ten million (10000000) moves!
# The crab is going to hide your stars - one each - under the two cups that will end up immediately clockwise of cup 1. You can have them if you predict what the labels on those cups will be when the crab is finished.
# In the above example (389125467), this would be 934001 and then 159792; multiplying these together produces 149245887792.
# Determine which two cups will end up immediately clockwise of cup 1. What do you get if you multiply their labels together?
ONE_MILLON = 1000000
TEN_MILLON = 10000000
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
nodes = {}
prev = None
for i in data:
node = Node(i)
nodes[i] = node
if prev is not None:
prev.right = node
node.left = prev
prev = node
for i in range(len(data)+1, ONE_MILLON+1):
node = Node(i)
nodes[i] = node
if prev is not None:
prev.right = node
node.left = prev
prev = node
pointer = nodes[data[0]]
prev.right = pointer
pointer.left = prev
for i in range(TEN_MILLON):
cup = pointer.val
c1 = pointer.right
c2 = c1.right
c3 = c2.right
pointer.right, pointer.right.left = c3.right, pointer
dest = cup - 1 or ONE_MILLON
while dest in (c1.val, c2.val, c3.val):
dest = dest - 1 or ONE_MILLON
dest_node = nodes[dest]
c3.right, c3.right.left = dest_node.right, c3
dest_node.right, c1.left = c1, dest_node
pointer = pointer.right
while pointer.val != 1:
pointer = pointer.right
ans =pointer.right.val * pointer.right.right.val
print("Part 2:", ans)
|
the-stack_106_17602
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
class AutoRestSwaggerBATHeaderServiceConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AutoRestSwaggerBATHeaderService.
Note that all parameters used to create this instance are saved as instance
attributes.
"""
def __init__(self, **kwargs: Any) -> None:
super(AutoRestSwaggerBATHeaderServiceConfiguration, self).__init__(**kwargs)
kwargs.setdefault("sdk_moniker", "autorestswaggerbatheaderservice/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
|
the-stack_106_17603
|
#!/usr/bin/env python
import sys
sys.path.append("..")
from models import iresnet
from collections import OrderedDict
from termcolor import cprint
from torch.nn import Parameter
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import math
import torch
import torch.nn as nn
import os
def builder(args):
model = SoftmaxBuilder(args)
return model
def load_features(args):
if args.arch == 'iresnet18':
features = iresnet.iresnet18(
pretrained=True,
num_classes=args.embedding_size)
elif args.arch == 'iresnet34':
features = iresnet.iresnet34(
pretrained=True,
num_classes=args.embedding_size)
elif args.arch == 'iresnet50':
features = iresnet.iresnet50(
pretrained=True,
num_classes=args.embedding_size)
elif args.arch == 'iresnet100':
features = iresnet.iresnet100(
pretrained=True,
num_classes=args.embedding_size)
else:
raise ValueError()
return features
class SoftmaxBuilder(nn.Module):
def __init__(self, args):
super(SoftmaxBuilder, self).__init__()
self.features = load_features(args)
self.fc = MagLinear(args.embedding_size,
args.last_fc_size,
scale=args.arc_scale)
self.l_margin = args.l_margin
self.u_margin = args.u_margin
self.l_a = args.l_a
self.u_a = args.u_a
def _margin(self, x):
"""generate adaptive margin
"""
margin = (self.u_margin-self.l_margin) / \
(self.u_a-self.l_a)*(x-self.l_a) + self.l_margin
return margin
def forward(self, x, target):
x = self.features(x)
logits, x_norm = self.fc(x, self._margin, self.l_a, self.u_a)
return logits, x_norm
class MagLinear(torch.nn.Module):
"""
Parallel fc for Mag loss
"""
def __init__(self, in_features, out_features, scale=64.0, easy_margin=True):
super(MagLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.scale = scale
self.easy_margin = easy_margin
def forward(self, x, m, l_a, u_a):
"""
Here m is a function which generate adaptive margin
"""
x_norm = torch.norm(x, dim=1, keepdim=True).clamp(l_a, u_a)
ada_margin = m(x_norm)
cos_m, sin_m = torch.cos(ada_margin), torch.sin(ada_margin)
# norm the weight
weight_norm = F.normalize(self.weight, dim=0)
cos_theta = torch.mm(F.normalize(x), weight_norm)
cos_theta = cos_theta.clamp(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(cos_theta, 2))
cos_theta_m = cos_theta * cos_m - sin_theta * sin_m
if self.easy_margin:
cos_theta_m = torch.where(cos_theta > 0, cos_theta_m, cos_theta)
else:
mm = torch.sin(math.pi - ada_margin) * ada_margin
threshold = torch.cos(math.pi - ada_margin)
cos_theta_m = torch.where(
cos_theta > threshold, cos_theta_m, cos_theta - mm)
# multiply the scale in advance
cos_theta_m = self.scale * cos_theta_m
cos_theta = self.scale * cos_theta
return [cos_theta, cos_theta_m], x_norm
class MagLoss(torch.nn.Module):
"""
MagFace Loss.
"""
def __init__(self, l_a, u_a, l_margin, u_margin, scale=64.0):
super(MagLoss, self).__init__()
self.l_a = l_a
self.u_a = u_a
self.scale = scale
self.cut_off = np.cos(np.pi/2-l_margin)
self.large_value = 1 << 10
def calc_loss_G(self, x_norm):
g = 1/(self.u_a**2) * x_norm + 1/(x_norm)
return torch.mean(g)
def forward(self, input, target, x_norm):
loss_g = self.calc_loss_G(x_norm)
cos_theta, cos_theta_m = input
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, target.view(-1, 1), 1.0)
output = one_hot * cos_theta_m + (1.0 - one_hot) * cos_theta
loss = F.cross_entropy(output, target, reduction='mean')
return loss.mean(), loss_g, one_hot
|
the-stack_106_17604
|
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition._pls import (
_center_scale_xy,
_get_first_singular_vectors_power_method,
_get_first_singular_vectors_svd,
_svd_flip_1d
)
from sklearn.cross_decomposition import CCA
from sklearn.cross_decomposition import PLSSVD, PLSRegression, PLSCanonical
from sklearn.datasets import make_regression
from sklearn.utils import check_random_state
from sklearn.utils.extmath import svd_flip
from sklearn.exceptions import ConvergenceWarning
def assert_matrix_orthogonal(M):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)))
def test_pls_canonical_basics():
# Basic checks for PLSCanonical
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, Y)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
# Check X = TP' and Y = UQ'
T = pls._x_scores
P = pls.x_loadings_
U = pls._y_scores
Q = pls.y_loadings_
# Need to scale first
Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(
X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T))
assert_array_almost_equal(Yc, np.dot(U, Q.T))
# Check that rotations on training data lead to scores
Xt = pls.transform(X)
assert_array_almost_equal(Xt, pls._x_scores)
Xt, Yt = pls.transform(X, Y)
assert_array_almost_equal(Xt, pls._x_scores)
assert_array_almost_equal(Yt, pls._y_scores)
# Check that inverse_transform works
X_back = pls.inverse_transform(Xt)
assert_array_almost_equal(X_back, X)
def test_sanity_check_pls_regression():
# Sanity check for PLSRegression
# The results were checked against the R-packages plspm, misOmics and pls
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSRegression(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
expected_x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
expected_y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
expected_y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(np.abs(pls.x_loadings_),
np.abs(expected_x_loadings))
assert_array_almost_equal(np.abs(pls.x_weights_),
np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_loadings_),
np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_weights))
# The R / Python difference in the signs should be consistent across
# loadings, weights, etc.
x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
def test_sanity_check_pls_regression_constant_column_Y():
# Check behavior when the first column of Y is constant
# The results are checked against a modified version of plsreg2
# from the R-package plsdepot
d = load_linnerud()
X = d.data
Y = d.target
Y[:, 0] = 1
pls = PLSRegression(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array(
[[-0.6273573, 0.007081799, 0.7786994],
[-0.7493417, -0.277612681, -0.6011807],
[-0.2119194, 0.960666981, -0.1794690]])
expected_x_loadings = np.array(
[[-0.6273512, -0.22464538, 0.7786994],
[-0.6643156, -0.09871193, -0.6011807],
[-0.5125877, 1.01407380, -0.1794690]])
expected_y_loadings = np.array(
[[0.0000000, 0.0000000, 0.0000000],
[0.4357300, 0.5828479, 0.2174802],
[-0.1353739, -0.2486423, -0.1810386]])
assert_array_almost_equal(np.abs(expected_x_weights),
np.abs(pls.x_weights_))
assert_array_almost_equal(np.abs(expected_x_loadings),
np.abs(pls.x_loadings_))
# For the PLSRegression with default parameters, y_loadings == y_weights
assert_array_almost_equal(np.abs(pls.y_loadings_),
np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_loadings))
x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)
x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)
# we ignore the first full-zeros row for y
y_loadings_sign_flip = np.sign(expected_y_loadings[1:] /
pls.y_loadings_[1:])
assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip)
def test_sanity_check_pls_canonical():
# Sanity check for PLSCanonical
# The results were checked against the R-package plspm
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls .fit(X, Y)
expected_x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
expected_x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
expected_y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
expected_y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(np.abs(pls.x_rotations_),
np.abs(expected_x_rotations))
assert_array_almost_equal(np.abs(pls.x_weights_),
np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_rotations_),
np.abs(expected_y_rotations))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_weights))
x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
def test_sanity_check_pls_canonical_random():
# Sanity check for PLSCanonical on random data
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
rng = check_random_state(11)
l1 = rng.normal(size=n)
l2 = rng.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + rng.normal(size=4 * n).reshape((n, 4))
Y = latents + rng.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
pls = PLSCanonical(n_components=3)
pls.fit(X, Y)
expected_x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
expected_x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
expected_y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
expected_y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(np.abs(pls.x_loadings_),
np.abs(expected_x_loadings))
assert_array_almost_equal(np.abs(pls.x_weights_),
np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_loadings_),
np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_weights))
x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
def test_convergence_fail():
# Make sure ConvergenceWarning is raised if max_iter is too small
d = load_linnerud()
X = d.data
Y = d.target
pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2)
with pytest.warns(ConvergenceWarning):
pls_nipals.fit(X, Y)
@pytest.mark.filterwarnings('ignore:.*scores_ was deprecated') # 1.1
@pytest.mark.parametrize('Est', (PLSSVD, PLSRegression, PLSCanonical))
def test_attibutes_shapes(Est):
# Make sure attributes are of the correct shape depending on n_components
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
pls = Est(n_components=n_components)
pls.fit(X, Y)
assert all(attr.shape[1] == n_components
for attr in (pls.x_scores_, pls.y_scores_, pls.x_weights_,
pls.y_weights_))
@pytest.mark.parametrize('Est', (PLSRegression, PLSCanonical, CCA))
def test_univariate_equivalence(Est):
# Ensure 2D Y with 1 column is equivalent to 1D Y
d = load_linnerud()
X = d.data
Y = d.target
est = Est(n_components=1)
one_d_coeff = est.fit(X, Y[:, 0]).coef_
two_d_coeff = est.fit(X, Y[:, :1]).coef_
assert one_d_coeff.shape == two_d_coeff.shape
assert_array_almost_equal(one_d_coeff, two_d_coeff)
@pytest.mark.parametrize('Est', (PLSRegression, PLSCanonical, CCA, PLSSVD))
def test_copy(Est):
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
X_orig = X.copy()
# copy=True won't modify inplace
pls = Est(copy=True).fit(X, Y)
assert_array_equal(X, X_orig)
# copy=False will modify inplace
with pytest.raises(AssertionError):
Est(copy=False).fit(X, Y)
assert_array_almost_equal(X, X_orig)
if Est is PLSSVD:
return # PLSSVD does not support copy param in predict or transform
X_orig = X.copy()
with pytest.raises(AssertionError):
pls.transform(X, Y, copy=False),
assert_array_almost_equal(X, X_orig)
X_orig = X.copy()
with pytest.raises(AssertionError):
pls.predict(X, copy=False),
assert_array_almost_equal(X, X_orig)
# Make sure copy=True gives same transform and predictions as predict=False
assert_array_almost_equal(pls.transform(X, Y, copy=True),
pls.transform(X.copy(), Y.copy(), copy=False))
assert_array_almost_equal(pls.predict(X, copy=True),
pls.predict(X.copy(), copy=False))
def _generate_test_scale_and_stability_datasets():
"""Generate dataset for test_scale_and_stability"""
# dataset for non-regression 7818
rng = np.random.RandomState(0)
n_samples = 1000
n_targets = 5
n_features = 10
Q = rng.randn(n_targets, n_features)
Y = rng.randn(n_samples, n_targets)
X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1
X *= 1000
yield X, Y
# Data set where one of the features is constaint
X, Y = load_linnerud(return_X_y=True)
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
yield X, Y
X = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
yield X, Y
# Seeds that provide a non-regression test for #18746, where CCA fails
seeds = [530, 741]
for seed in seeds:
rng = np.random.RandomState(seed)
X = rng.randn(4, 3)
Y = rng.randn(4, 2)
yield X, Y
@pytest.mark.parametrize('Est', (CCA, PLSCanonical, PLSRegression, PLSSVD))
@pytest.mark.parametrize('X, Y', _generate_test_scale_and_stability_datasets())
def test_scale_and_stability(Est, X, Y):
"""scale=True is equivalent to scale=False on centered/scaled data
This allows to check numerical stability over platforms as well"""
X_s, Y_s, *_ = _center_scale_xy(X, Y)
X_score, Y_score = Est(scale=True).fit_transform(X, Y)
X_s_score, Y_s_score = Est(scale=False).fit_transform(X_s, Y_s)
assert_allclose(X_s_score, X_score, atol=1e-4)
assert_allclose(Y_s_score, Y_score, atol=1e-4)
@pytest.mark.parametrize('Est', (PLSSVD, PLSCanonical, CCA))
@pytest.mark.parametrize('n_components', (0, 4))
def test_n_components_bounds(Est, n_components):
# n_components should be in [1, min(n_samples, n_features, n_targets)]
# TODO: catch error instead of warning in 1.1
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = Est(n_components=n_components)
with pytest.warns(FutureWarning,
match="n_components=3 will be used instead"):
est.fit(X, Y)
# make sure upper bound of rank is used as a fallback
assert est.transform(X).shape[1] == 3
@pytest.mark.parametrize('n_components', (0, 6))
def test_n_components_bounds_pls_regression(n_components):
# For PLSRegression, the upper bound for n_components is n_features
# TODO: catch error instead of warning in 1.1
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = PLSRegression(n_components=n_components)
with pytest.warns(FutureWarning,
match="n_components=5 will be used instead"):
est.fit(X, Y)
# make sure upper bound of rank is used as a fallback
assert est.transform(X).shape[1] == 5
@pytest.mark.parametrize('Est', (PLSSVD, CCA, PLSCanonical))
def test_scores_deprecations(Est):
# Make sure x_scores_ and y_scores_ are deprecated.
# It's not deprecated for PLSRegression because y_score_ is different from
# transform(Y_train)
# TODO: remove attributes and test in 1.1
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = Est().fit(X, Y)
with pytest.warns(FutureWarning, match="x_scores_ was deprecated"):
assert_allclose(est.x_scores_, est.transform(X))
with pytest.warns(FutureWarning, match="y_scores_ was deprecated"):
assert_allclose(est.y_scores_, est.transform(X, Y)[1])
@pytest.mark.parametrize('Est', (PLSRegression, PLSCanonical, CCA))
def test_norm_y_weights_deprecation(Est):
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = Est().fit(X, Y)
with pytest.warns(FutureWarning, match="norm_y_weights was deprecated"):
est.norm_y_weights
# TODO: Remove test in 1.1
@pytest.mark.parametrize('Estimator',
(PLSRegression, PLSCanonical, CCA, PLSSVD))
@pytest.mark.parametrize('attribute',
("x_mean_", "y_mean_", "x_std_", "y_std_"))
def test_mean_and_std_deprecation(Estimator, attribute):
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
estimator = Estimator().fit(X, Y)
with pytest.warns(FutureWarning, match=f"{attribute} was deprecated"):
getattr(estimator, attribute)
@pytest.mark.parametrize('n_samples, n_features', [(100, 10), (100, 200)])
@pytest.mark.parametrize('seed', range(10))
def test_singular_value_helpers(n_samples, n_features, seed):
# Make sure SVD and power method give approximately the same results
X, Y = make_regression(n_samples, n_features, n_targets=5,
random_state=seed)
u1, v1, _ = _get_first_singular_vectors_power_method(X, Y,
norm_y_weights=True)
u2, v2 = _get_first_singular_vectors_svd(X, Y)
_svd_flip_1d(u1, v1)
_svd_flip_1d(u2, v2)
rtol = 1e-1
assert_allclose(u1, u2, rtol=rtol)
assert_allclose(v1, v2, rtol=rtol)
def test_one_component_equivalence():
# PLSSVD, PLSRegression and PLSCanonical should all be equivalent when
# n_components is 1
X, Y = make_regression(100, 10, n_targets=5, random_state=0)
svd = PLSSVD(n_components=1).fit(X, Y).transform(X)
reg = PLSRegression(n_components=1).fit(X, Y).transform(X)
canonical = PLSCanonical(n_components=1).fit(X, Y).transform(X)
assert_allclose(svd, reg, rtol=1e-2)
assert_allclose(svd, canonical, rtol=1e-2)
def test_svd_flip_1d():
# Make sure svd_flip_1d is equivalent to svd_flip
u = np.array([1, -4, 2])
v = np.array([1, 2, 3])
u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1))
_svd_flip_1d(u, v) # inplace
assert_allclose(u, u_expected.ravel())
assert_allclose(u, [-1, 4, -2])
assert_allclose(v, v_expected.ravel())
assert_allclose(v, [-1, -2, -3])
|
the-stack_106_17605
|
# coding=utf8
"""
translate.py - Willie Translation Module
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2013-2014, Elad Alfassa <[email protected]>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from __future__ import unicode_literals
from willie import web
from willie.module import rule, commands, priority, example
import json
import sys
import random
import os
mangle_lines = {}
if sys.version_info.major >= 3:
unicode = str
def translate(text, in_lang='auto', out_lang='en'):
raw = False
if unicode(out_lang).endswith('-raw'):
out_lang = out_lang[:-4]
raw = True
headers = {
'User-Agent': 'Mozilla/5.0' +
'(X11; U; Linux i686)' +
'Gecko/20071127 Firefox/2.0.0.11'
}
url_query = {
"client": "gtx",
"sl": in_lang,
"tl": out_lang,
"dt": "t",
"q": text,
}
query_string = "&".join(
"{key}={value}".format(key=key, value=value)
for key, value in url_query.items()
)
url = "http://translate.googleapis.com/translate_a/single?{query}".format(query=query_string)
result = web.get(url, timeout=40, headers=headers)
while ',,' in result:
result = result.replace(',,', ',null,')
result = result.replace('[,', '[null,')
data = json.loads(result)
if raw:
return str(data), 'en-raw'
try:
language = data[2] # -2][0][0]
except:
language = '?'
return ''.join(x[0] for x in data[0]), language
@rule(u'$nickname[,:]\s+(?:([a-z]{2}) +)?(?:([a-z]{2}|en-raw) +)?["“](.+?)["”]\? *$')
@example('$nickname: "mon chien"? or $nickname: fr "mon chien"?')
@priority('low')
def tr(bot, trigger):
"""Translates a phrase, with an optional language hint."""
in_lang, out_lang, phrase = trigger.groups()
if (len(phrase) > 350) and (not trigger.admin):
return bot.reply('Phrase must be under 350 characters.')
in_lang = in_lang or 'auto'
out_lang = out_lang or 'en'
if in_lang != out_lang:
msg, in_lang = translate(phrase, in_lang, out_lang)
if sys.version_info.major < 3 and isinstance(msg, str):
msg = msg.decode('utf-8')
if msg:
msg = web.decode(msg) # msg.replace(''', "'")
msg = '"%s" (%s to %s, translate.google.com)' % (msg, in_lang, out_lang)
else:
msg = 'The %s to %s translation failed, sorry!' % (in_lang, out_lang)
bot.reply(msg)
else:
bot.reply('Language guessing failed, so try suggesting one!')
@commands('translate', 'tr')
@example('.tr :en :fr my dog', '"mon chien" (en to fr, translate.google.com)')
@example('.tr היי', '"Hi" (iw to en, translate.google.com)')
@example('.tr mon chien', '"my dog" (fr to en, translate.google.com)')
def tr2(bot, trigger):
"""Translates a phrase, with an optional language hint."""
command = trigger.group(2)
if not command:
return bot.reply('You did not give me anything to translate')
def langcode(p):
return p.startswith(':') and (2 < len(p) < 10) and p[1:].isalpha()
args = ['auto', 'en']
for i in range(2):
if ' ' not in command:
break
prefix, cmd = command.split(' ', 1)
if langcode(prefix):
args[i] = prefix[1:]
command = cmd
phrase = command
if (len(phrase) > 350) and (not trigger.admin):
return bot.reply('Phrase must be under 350 characters.')
src, dest = args
if src != dest:
msg, src = translate(phrase, src, dest)
if sys.version_info.major < 3 and isinstance(msg, str):
msg = msg.decode('utf-8')
if msg:
msg = web.decode(msg) # msg.replace(''', "'")
msg = '"%s" (%s to %s, translate.google.com)' % (msg, src, dest)
else:
msg = 'The %s to %s translation failed, sorry!' % (src, dest)
bot.reply(msg)
else:
bot.reply('Language guessing failed, so try suggesting one!')
def get_random_lang(long_list, short_list):
random_index = random.randint(0, len(long_list) - 1)
random_lang = long_list[random_index]
if random_lang not in short_list:
short_list.append(random_lang)
else:
return get_random_lang(long_list, short_list)
return short_list
@commands('mangle', 'mangle2')
def mangle(bot, trigger):
"""Repeatedly translate the input until it makes absolutely no sense."""
global mangle_lines
long_lang_list = ['fr', 'de', 'es', 'it', 'no', 'he', 'la', 'ja', 'cy', 'ar', 'yi', 'zh', 'nl', 'ru', 'fi', 'hi', 'af', 'jw', 'mr', 'ceb', 'cs', 'ga', 'sv', 'eo', 'el', 'ms', 'lv']
lang_list = []
for __ in range(0, 8):
lang_list = get_random_lang(long_lang_list, lang_list)
random.shuffle(lang_list)
if trigger.group(2) is None:
try:
phrase = (mangle_lines[trigger.sender.lower()], '')
except:
bot.reply("What do you want me to mangle?")
return
else:
phrase = (trigger.group(2).strip(), '')
if phrase[0] == '':
bot.reply("What do you want me to mangle?")
return
for lang in lang_list:
backup = phrase
try:
phrase = translate(phrase[0], 'en', lang)
except:
phrase = False
if not phrase:
phrase = backup
break
try:
phrase = translate(phrase[0], lang, 'en')
except:
phrase = backup
continue
if not phrase:
phrase = backup
break
bot.reply(phrase[0])
@rule('(.*)')
@priority('low')
def collect_mangle_lines(bot, trigger):
global mangle_lines
mangle_lines[trigger.sender.lower()] = "%s said '%s'" % (trigger.nick, (trigger.group(0).strip()))
if __name__ == "__main__":
from willie.test_tools import run_example_tests
run_example_tests(__file__)
|
the-stack_106_17606
|
import os
import subprocess as sp
import random as RNG
import numpy as np
from turbojpeg import TurboJPEG as JPEG
reader = JPEG()
def read(fname):
with open(fname, 'rb') as f:
return reader.decode(f.read(), pixel_format=0)
min_diff = 1e8
def remove_static_and_sample(frames, num_frames):
global min_diff
DUPLICATES_THRESHOLD = 0.01
filtered_frames = []
next_img = read(frames[0])
static = 0
for i in range(len(frames) - 1):
img = next_img
next_img = read(frames[i + 1])
assert img.shape == next_img.shape
diff = np.abs(img - next_img).mean()
if diff > DUPLICATES_THRESHOLD * 255:
filtered_frames.append(frames[i])
else:
static += 1
if len(filtered_frames) + 1 >= num_frames:
filtered_frames.append(frames[i + 1])
break
min_diff = min(diff, min_diff)
del img
if static > 0:
print("Removed static frames: ", static)
if len(filtered_frames) < num_frames:
raise Exception(
"Cannot sumsample {} frames. Too many static frames".format(num_frames))
return filtered_frames
def generate_splits(data_dir, out_dir, train_prop=0.9,
subsample=1.0, name=None):
'''Splits the traininig set in training, validation and test sets.
The training ad validation sets are obtained from the waymo's training folder.
We assume that sequences on the training folder dont cover the same secenes.
'''
tr_dir = os.path.join(data_dir, 'training')
val_dir = os.path.join(data_dir, 'validation')
# list traininig sequences
tr_seqs = sp.run(
"find {} -mindepth 2 -maxdepth 2 -type d".format(tr_dir),
shell=True,
stdout=sp.PIPE)
tr_seqs = tr_seqs.stdout.decode('utf-8').split('\n')
tr_seqs = sorted(tr_seqs)
tmp = []
for i in range(len(tr_seqs)):
if len(tr_seqs[i]) > 0:
tmp.append(tr_seqs[i])
tr_seqs = tmp
RNG.seed(10000003)
RNG.shuffle(tr_seqs)
# split in tr and val
if name is None:
train_fname = 'train.txt'
val_fname = 'val.txt'
test_fname = 'test.txt'
else:
train_fname = 'train_{}.txt'.format(name)
val_fname = 'val_{}.txt'.format(name)
test_fname = 'test_{}.txt'.format(name)
with open(os.path.join(out_dir, train_fname), 'w') as trf:
with open(os.path.join(out_dir, val_fname), 'w') as valf:
num_train = int(train_prop * len(tr_seqs))
num_val = len(tr_seqs) - num_train
for i in range(0, num_train):
print("({}/{}) {}".format(i, num_train, tr_seqs[i]))
frames = sp.run(
"find {}/*.jpg".format(tr_seqs[i]), shell=True, stdout=sp.PIPE)
frames = frames.stdout.decode('utf-8').strip().split('\n')
frames = sorted(frames)
num_frames = int(len(frames) * subsample)
frames = remove_static_and_sample(frames, num_frames)
for i in range(num_frames):
if len(frames[i]) > 0:
frame = frames[i][len(data_dir):].strip(os.sep)
trf.write(frame + '\n')
print('min_diff', min_diff)
val_offset = int(train_prop * len(tr_seqs))
for i in range(val_offset, val_offset + num_val):
frames = sp.run(
"find {}/*.jpg".format(tr_seqs[i]), shell=True, stdout=sp.PIPE)
frames = frames.stdout.decode('utf-8').strip().split('\n')
frames = sorted(frames)
num_frames = int(len(frames) * subsample)
frames = remove_static_and_sample(frames, num_frames)
for i in range(num_frames):
if len(frames[i]) > 0:
frame = frames[i][len(data_dir):].strip(os.sep)
valf.write(frame + '\n')
print('min_diff', min_diff)
# list test sequences
te_seqs = sp.run(
"find {} -mindepth 2 -maxdepth 2 -type d".format(val_dir),
shell=True,
stdout=sp.PIPE)
te_seqs = te_seqs.stdout.decode('utf-8').split('\n')
te_seqs = sorted(te_seqs)
tmp = []
for i in range(len(te_seqs)):
if len(te_seqs[i]) > 0:
tmp.append(te_seqs[i])
te_seqs = tmp
num_test = len(te_seqs)
with open(os.path.join(out_dir, test_fname), 'w') as tef:
for i in range(num_test):
frames = sp.run(
"find {}/*.jpg".format(te_seqs[i]), shell=True, stdout=sp.PIPE)
frames = frames.stdout.decode('utf-8').split('\n')
frames = sorted(frames)
num_frames = int(len(frames) * subsample)
for i in range(num_frames):
if len(frames[i]) > 0:
frame = frames[i][len(data_dir):].strip(os.sep)
tef.write(frame + '\n')
if __name__ == '__main__':
data_dir = '/data/ra153646/datasets/waymo/waymo76k'
#data_dir = '/data/ra153646/datasets/waymo/waymo_test_processed'
out_dir = '/home/phd/ra153646/robustness/robustdepthflow/data/waymo'
generate_splits(data_dir, out_dir, subsample=0.2, name='sub0.2-rmstatic')
|
the-stack_106_17608
|
""" This file contains functions to be used in miscellaneous tasks like comparing simulated to estimated results, etc
"""
# base
import math
import numpy as np
import statsmodels.api as sm
from copy import deepcopy
import pandas as pd
# viz
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
# feems
from .utils import prepare_graph_inputs
from .sim import setup_graph, setup_graph_long_range, simulate_genotypes
from .spatial_graph import SpatialGraph, query_node_attributes
from .cross_validation import comp_mats, run_cv
from .objective import Objective
from .viz import Viz
# change matplotlib fonts
plt.rcParams["font.family"] = "Arial"
plt.rcParams["font.sans-serif"] = "Arial"
def cov_to_dist(S):
"""Convert a covariance matrix to a distance matrix
"""
s2 = np.diag(S).reshape(-1, 1)
ones = np.ones((s2.shape[0], 1))
D = s2 @ ones.T + ones @ s2.T - 2 * S
return(D)
def plot_default_vs_long_range(
sp_Graph_def,
sp_Graph,
max_res_nodes=None,
lamb=np.array((1.0,1.0))
):
"""Function to plot default graph with NO long range edges next to full graph with long range edges
(useful for comparison of feems default fit with extra parameters)
"""
assert all(lamb>=0.0), "lamb must be non-negative"
assert type(max_res_nodes) == list, "max_res_nodes must be a list of int 2-tuples"
fig = plt.figure(dpi=100)
#sp_Graph_def.fit(lamb = float(lamb[0]))
ax = fig.add_subplot(1, 2, 1)
v = Viz(ax, sp_Graph_def, edge_width=2,
edge_alpha=1, edge_zorder=100, sample_pt_size=20,
obs_node_size=7.5, sample_pt_color="black",
cbar_font_size=10)
v.draw_edges(use_weights=True)
v.draw_obs_nodes(use_ids=False)
#sp_Graph.fit(lamb = float(lamb[1]))
ax = fig.add_subplot(1, 2, 2)
v = Viz(ax, sp_Graph, edge_width=2.0,
edge_alpha=1, edge_zorder=100, sample_pt_size=20,
obs_node_size=7.5, sample_pt_color="black",
cbar_font_size=10)
v.draw_edges(use_weights=True)
v.draw_obs_nodes(use_ids=False)
lre_idx = [list(sp_Graph.edges).index(val) for val in max_res_nodes]
# paste correlation between the two weights
ax.text(0.5, 1.0, "cor={:.2f}".format(np.corrcoef(sp_Graph.w[~np.in1d(np.arange(len(sp_Graph.w)), lre_idx)],sp_Graph_def.w)[0,1]), transform=ax.transAxes)
return(None)
def comp_genetic_vs_fitted_distance(
sp_Graph_def,
lrn=None,
lamb=None,
n_lre=3,
plotFig=True
):
"""Function to plot genetic vs fitted distance to visualize outliers in residual calculations,
passes back 3 pairs of nodes (default) with largest residuals if plotFig=False
"""
if lamb is not None:
assert lamb >= 0.0, "lambda must be non-negative"
assert type(lamb) == float, "lambda must be float"
assert type(n_lre) == int, "n_lre must be int"
tril_idx = np.tril_indices(sp_Graph_def.n_observed_nodes, k=-1)
if lamb is None:
lamb_grid = np.geomspace(1e-6, 1e2, 20)[::-1]
cv_err = run_cv(sp_Graph_def, lamb_grid, n_folds=sp_Graph_def.n_observed_nodes, factr=1e10)
# average over folds
mean_cv_err = np.mean(cv_err, axis=0)
# argmin of cv error
lamb = float(lamb_grid[np.argmin(mean_cv_err)])
sp_Graph_def.fit(lamb=lamb,
lb=math.log(1e-6),
ub=math.log(1e+6))
sp_Graph_def.comp_graph_laplacian(sp_Graph_def.w)
obj = Objective(sp_Graph_def)
fit_cov, _, emp_cov = comp_mats(obj)
fit_dist = cov_to_dist(fit_cov)[tril_idx]
emp_dist = cov_to_dist(emp_cov)[tril_idx]
# using code from supp fig 6 of feems-analysis
X = sm.add_constant(fit_dist)
mod = sm.OLS(emp_dist, X)
res = mod.fit()
muhat, betahat = res.params
if(plotFig):
if lrn is not None:
# computing the vector index for lower triangular matrix of long range nodes (i+j(j+1)/2-j for lower triangle)
lrn_idx = [np.int(val[0] + 0.5*val[1]*(val[1]+1) - val[1]) if val[0]<val[1] else np.int(val[1] + 0.5*val[0]*(val[0]+1) - val[0]) for val in lrn]
fig = plt.figure(dpi=100)
ax = fig.add_subplot()
ax.scatter(fit_dist, emp_dist,
marker=".", alpha=1, zorder=0, color="grey", s=3)
if lrn is not None:
ax.scatter(fit_dist[lrn_idx], emp_dist[lrn_idx],
marker=".", alpha=1, zorder=0, color="black", s=10)
x_ = np.linspace(np.min(fit_dist), np.max(fit_dist), 20)
ax.plot(x_, muhat + betahat * x_, zorder=2, color="orange", linestyle='--', linewidth=1)
ax.text(0.8, 0.15, "$\lambda$={:.3}".format(lamb), transform=ax.transAxes)
ax.text(0.8, 0.05, "R²={:.4f}".format(res.rsquared), transform=ax.transAxes)
ax.set_ylabel("genetic distance")
ax.set_xlabel("fitted distance")
# TODO: include a scaling here to downweight residuals from small pops - sqrt(n1*n2)?
# TODO: find a way to exclude edges already in the graph (super unlikely in empirical analyses tho...)
# extract indices with most negative residuals
max_idx = np.argpartition(res.resid, -n_lre)
# no need to reorder here, since we are looking for highest negative residuals
max_idx = max_idx[np.argsort(res.resid[max_idx])]
# getting the labels for pairs of nodes from the array index
max_res_node = []
for k in max_idx:
x = np.floor(np.sqrt(2*k+0.25)-0.5).astype('int')+1
y = np.int(k - 0.5*x*(x-1))
max_res_node.append(tuple(sorted((x,y))))
return(max_res_node)
else:
# extract indices with maximum absolute residuals
max_idx = np.argpartition(res.resid, -n_lre)
# np.argpartition does not return indices in order of max to min, so another round of ordering
max_idx = max_idx[np.argsort(res.resid[max_idx])]
# can also choose outliers based on z-score
#max_idx = np.where(np.abs((res.resid-np.mean(res.resid))/np.std(res.resid))>3)[0]
# getting the labels for pairs of nodes from the array index
max_res_node = []
for k in max_idx:
x = np.floor(np.sqrt(2*k+0.25)-0.5).astype('int')+1
y = np.int(k - 0.5*x*(x-1))
max_res_node.append(tuple(sorted((x,y))))
return(max_res_node)
def plot_estimated_vs_simulated_edges(
graph,
sp_Graph,
lrn=None,
max_res_nodes=None,
lamb=1.0
):
"""Function to plot estimated vs simulated edge weights to look for significant deviations
"""
assert lamb >= 0.0, "lambda must be non-negative"
assert type(lamb) == float, "lambda must be float"
# both variables below are long range nodes but lrn is from the simulated and max_res_nodes is from the empirical
assert type(lrn) == list, "lrn must be a list of int 2-tuples"
assert type(max_res_nodes) == list, "max_res_nodes must be a list of int 2-tuples"
# getting edges from the simulated graph
idx = [list(graph.edges).index(val) for val in lrn]
sim_edges = np.append(np.array([graph[val[0]][val[1]]["w"] for i, val in enumerate(graph.edges) if i not in idx]),
np.array([graph[val[0]][val[1]]["w"] for i, val in enumerate(graph.edges) if i in idx]))
idx = [list(sp_Graph.edges).index(val) for val in max_res_nodes]
w_plot = np.append(sp_Graph.w[[i for i in range(len(sp_Graph.w)) if i not in idx]], sp_Graph.w[idx])
X = sm.add_constant(sim_edges)
mod = sm.OLS(w_plot[range(len(graph.edges))], X)
res = mod.fit()
muhat, betahat = res.params
# getting index of long range edges
lre_idx = [list(graph.edges).index(val) for val in lrn]
fig = plt.figure(dpi=100)
ax = fig.add_subplot()
ax.scatter(sim_edges, w_plot[range(len(sim_edges))],
marker=".", alpha=1, zorder=0, color="grey", s=3)
ax.scatter(sim_edges[-len(lrn)::], w_plot[-len(lrn)::],
marker=".", alpha=1, zorder=0, color="black", s=10)
x_ = np.linspace(np.min(sim_edges), np.max(sim_edges), 20)
ax.plot(x_, muhat + betahat * x_, zorder=2, color="orange", linestyle='--', linewidth=1)
ax.text(0.8, 0.05, "R²={:.4f}".format(res.rsquared), transform=ax.transAxes)
ax.text(0.8, 0.15, "$\lambda$={:.3}".format(lamb), transform=ax.transAxes)
ax.set_xlabel("simulated edge weights")
ax.set_ylabel("estimated edge weights")
return(None)
def plot_residual_matrix(
sp_Graph,
lamb_cv,
pop_labs_file=None
):
"""Function to plot the residual matrix of the pairs of populations
"""
# TODO: finalize way to map samples to pops and pops to nodes
# reading in file with sample and pop labels
#pop_labs_file = pd.read_csv()
permuted_idx = query_node_attributes(sp_Graph, "permuted_idx")
obs_perm_ids = permuted_idx[: sp_Graph.n_observed_nodes]
# code for mapping nodes back to populations (since multiple pops can be assigned to the same nodes)
node_to_pop = pd.DataFrame(index = np.arange(sp_Graph.n_observed_nodes), columns = ['nodes', 'pops'])
node_to_pop['nodes'] = obs_perm_ids
node_to_pop['pops'] = [np.unique(sample_data['popId'][query_node_attributes(sp_Graph,"sample_idx")[x]]) for x in obs_perm_ids]
tril_idx = np.tril_indices(sp_Graph.n_observed_nodes, k=-1)
sp_Graph.fit(lamb=lamb_cv)
obj = Objective(sp_Graph)
fit_cov, _, emp_cov = comp_mats(obj)
fit_dist = cov_to_dist(fit_cov)[tril_idx]
emp_dist = cov_to_dist(emp_cov)[tril_idx]
X = sm.add_constant(fit_dist)
mod = sm.OLS(emp_dist, X)
res = mod.fit()
resnode = np.zeros((sp_Graph.n_observed_nodes,sp_Graph.n_observed_nodes))
resnode[np.tril_indices_from(resmat, k=-1)] = np.abs(res.resid)
mask = np.zeros_like(resnode)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
fig = plt.figure(dpi=100)
# try clustermap(col_cluster=False)
ax = sns.heatmap(resnode, mask=mask, square=True, cmap=sns.color_palette("crest", as_cmap=True), xticklabels=node_to_pop['pops'])
plt.show()
return(None)
|
the-stack_106_17610
|
"""DVXplorer Test.
Author: Yuhuang Hu
Email : [email protected]
"""
from __future__ import print_function, absolute_import
import numpy as np
import cv2
from pyaer.dvxplorer import DVXPLORER
from timer import Timer
device = DVXPLORER()
print("Device ID:", device.device_id)
print("Device Serial Number:", device.device_serial_number)
print("Device USB bus Number:", device.device_usb_bus_number)
print("Device USB device address:", device.device_usb_device_address)
print("Device String:", device.device_string)
print("Device Firmware Version:", device.firmware_version)
print("Logic Version:", device.logic_version)
print("Device Chip ID:", device.chip_id)
if device.device_is_master:
print("Device is master.")
else:
print("Device is slave.")
print("MUX has statistics:", device.mux_has_statistics)
print("Device size X:", device.dvs_size_X)
print("Device size Y:", device.dvs_size_Y)
print("DVS has statistics:", device.dvs_has_statistics)
print("IMU Type:", device.imu_type)
print("EXT input has generator:", device.ext_input_has_generator)
clip_value = 3
histrange = [(0, v) for v in (device.dvs_size_Y, device.dvs_size_X)]
# load new config
device.set_bias_from_json("./configs/dvxplorer_config.json")
print(device.get_bias())
device.start_data_stream()
while True:
try:
with Timer("time to fetch data"):
(pol_events, num_pol_event,
special_events, num_special_event,
imu_events, num_imu_event) = \
device.get_event("events_hist")
print("Number of events:", num_pol_event)
if num_pol_event != 0:
with Timer("time to prepare plotting"):
img = pol_events[..., 1]-pol_events[..., 0]
img = np.clip(img, -clip_value, clip_value)
img = (img+clip_value)/float(clip_value*2)
# print("I'm here")
# pol_on = (pol_events[:, 3] == 1)
# pol_off = np.logical_not(pol_on)
# img_on, _, _ = np.histogram2d(
# pol_events[pol_on, 2], pol_events[pol_on, 1],
# bins=(device.dvs_size_Y, device.dvs_size_X),
# range=histrange)
# img_off, _, _ = np.histogram2d(
# pol_events[pol_off, 2], pol_events[pol_off, 1],
# bins=(device.dvs_size_Y, device.dvs_size_X),
# range=histrange)
# if clip_value is not None:
# integrated_img = np.clip(
# (img_on-img_off), -clip_value, clip_value)
# else:
# integrated_img = (img_on-img_off)
# img = integrated_img+clip_value
with Timer("Time to plot"):
cv2.imshow("image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except KeyboardInterrupt:
device.shutdown()
break
|
the-stack_106_17611
|
import logging
class Logger(object):
"""
Class to setup and utilize basic logging
Args:
name: Name of class utilizing logger
"""
def __init__(self, name):
logging.basicConfig(
filename=None,
level=logging.INFO,
format='[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s',
datefmt='%H:%M:%S'
)
name = name.replace('.log', '')
logger = logging.getLogger('log_namespace.%s' % name)
self._logger = logger
def get(self):
"""
Method to return an instance of the logger
"""
return self._logger
|
the-stack_106_17612
|
#!/usr/bin/env python
#
# Electrum - lightweight UraniumX client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# A QR scanner that uses zbar (via ctypes)
# - to access the camera,
# - and to find and decode QR codes (visible in the live feed).
import os
import sys
import ctypes
from typing import Optional, Mapping
from .util import UserFacingException
from .i18n import _
from .logging import get_logger
_logger = get_logger(__name__)
if sys.platform == 'darwin':
name = 'libzbar.0.dylib'
elif sys.platform in ('windows', 'win32'):
name = 'libzbar-0.dll'
else:
name = 'libzbar.so.0'
try:
libzbar = ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), name))
except BaseException as e1:
try:
libzbar = ctypes.cdll.LoadLibrary(name)
except BaseException as e2:
libzbar = None
_logger.error(f"failed to load zbar. exceptions: {[e1,e2]!r}")
def scan_barcode(device='', timeout=-1, display=True, threaded=False) -> Optional[str]:
if libzbar is None:
raise UserFacingException("Cannot start QR scanner: zbar not available.")
libzbar.zbar_symbol_get_data.restype = ctypes.c_char_p
libzbar.zbar_processor_create.restype = ctypes.POINTER(ctypes.c_int)
libzbar.zbar_processor_get_results.restype = ctypes.POINTER(ctypes.c_int)
libzbar.zbar_symbol_set_first_symbol.restype = ctypes.POINTER(ctypes.c_int)
# libzbar.zbar_set_verbosity(100) # verbose logs for debugging
proc = libzbar.zbar_processor_create(threaded)
libzbar.zbar_processor_request_size(proc, 640, 480)
if libzbar.zbar_processor_init(proc, device.encode('utf-8'), display) != 0:
raise UserFacingException(
_("Cannot start QR scanner: initialization failed.") + "\n" +
_("Make sure you have a camera connected and enabled."))
libzbar.zbar_processor_set_visible(proc)
if libzbar.zbar_process_one(proc, timeout):
symbols = libzbar.zbar_processor_get_results(proc)
else:
symbols = None
libzbar.zbar_processor_destroy(proc)
if symbols is None:
return
if not libzbar.zbar_symbol_set_get_size(symbols):
return
symbol = libzbar.zbar_symbol_set_first_symbol(symbols)
data = libzbar.zbar_symbol_get_data(symbol)
return data.decode('utf8')
def find_system_cameras() -> Mapping[str, str]:
device_root = "/sys/class/video4linux"
devices = {} # Name -> device
if os.path.exists(device_root):
for device in os.listdir(device_root):
path = os.path.join(device_root, device, 'name')
try:
with open(path, encoding='utf-8') as f:
name = f.read()
except Exception:
continue
name = name.strip('\n')
devices[name] = os.path.join("/dev", device)
return devices
if __name__ == "__main__":
print(scan_barcode())
|
the-stack_106_17613
|
import os
from enum import Enum
from parallelm.mlops.mlops_exception import MLOpsException
from parallelm.mlops.models.mlobject import MLObject
from parallelm.mlops.models.mlobject import MLObjectType
from parallelm.mlops.stats.stats_helper import StatsHelper
from parallelm.mlops.stats_category import StatCategory
class ModelFormat(str, Enum):
JSON = "Json"
SAVEDMODEL = "Savedmodel"
SPARKML = "SparkML"
BINARY = "Binary"
TEXT = "Text"
SCIKIT_LEARN_2 = "ScikitLearn_2"
SCIKIT_LEARN_3 = "ScikitLearn_3"
H2O_3 = "H2O_3"
H2O_DRIVERLESS_AI = "H2O_Driverless_AI"
UNKNOWN = "Unknown"
@classmethod
def from_str(cls, name):
for e in cls:
if e.name.lower() == name.lower():
return e
class ModelMetadata(object):
"""
Model related metadata
"""
def __init__(self, modelId, name="", model_format=ModelFormat.UNKNOWN, description="", user_defined="", size=0):
if model_format and not isinstance(model_format, ModelFormat):
raise MLOpsException("model_format object must be an instance of ModelFormat class! provided: "
"{}, type: {}".format(model_format, type(model_format)))
if model_format == ModelFormat.UNKNOWN:
raise MLOpsException(
"model_format can not be {}. Did you forget to set a format for model?".format(model_format.value))
self.modelId = modelId
self.name = name
self.modelFormat = model_format
self.description = description
self.user_defined = user_defined
self.size = size
# these fields are set by MCenter server
self.source = ""
self.user = ""
self.state = ""
self.createdTimestamp = ""
self.workflowRunId = ""
def __str__(self):
return "MODEL METADATA - name: {}; modelId: {}; source: {}; user: {}; status: {}; modelFormat: {}; createdTimestamp: {}; size: {}; " \
"description: {}; user_defined {}; workflowRunId {};" \
.format(self.name, self.modelId, self.source, self.user, self.state, self.modelFormat,
self.createdTimestamp, self.size, self.description, self.user_defined, self.workflowRunId)
def __eq__(self, other):
"""
Implements a naive equal comparison. Yet to be improved.
:param other: a model metadata
:return: True if model id are equal else False
"""
if isinstance(other, ModelMetadata):
return self.modelId == other.modelId
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def to_dict(self):
return {"description": self.description, "workflowRunId": self.workflowRunId,
"modelFormat": self.modelFormat.value, "modelId": self.modelId,
"name": self.name, "createdTimestamp": self.createdTimestamp, "source": self.source,
"state": self.state, "user": self.user, "user_defined": self.user_defined, "size": self.size}
class Model(MLObject):
"""
This class provides APIs to access Model related data, publish model, attach statistics to model.
"""
def __init__(self, stats_helper, rest_helper, name, model_format, description, user_defined, id=None):
super(Model, self).__init__(rest_helper, id)
self.model_path = None
self.metadata = ModelMetadata(self.get_id(), name, model_format, description, user_defined, 0)
if stats_helper and not isinstance(stats_helper, StatsHelper):
raise MLOpsException("stats_helper object must be an instance of StatsHelper class")
self._stats_helper = stats_helper
def __eq__(self, other):
"""
Models are regarded as equal if their metadata are equal
:param other: another model
:return: True if model's metadata are equal else False
"""
return self.metadata == other.metadata
def __ne__(self, other):
"""
Models are regarded as not equal if their metadata are not equal
:param other: another model
:return: True if model's metadata are not equal else False
"""
return self.metadata != other.metadata
def _get_object_type(self):
return MLObjectType.MODEL
def __str__(self):
return self.metadata.__str__()
def _validate_stats_helper(self):
if not self._stats_helper:
raise MLOpsException("stats_helper object was not set or is None")
def set_model_path(self, path):
self.model_path = path
self.metadata.size = os.stat(path).st_size
def get_model_path(self):
return self.model_path
def set_stat(self, name, data=None, category=StatCategory.TIME_SERIES, timestamp=None):
"""
Report this statistic.
Statistic is attached to the current model and can be fetched later for this model.
:param name: name to use in the export
:param data: data object to export
:param category: category of the statistic. One of :class:`StatsCategory`
:param timestamp: optional timestamp
:raises: MLOpsException
"""
self._validate_stats_helper()
self._stats_helper.set_stat(name, data, self.get_id(), category, timestamp)
def set_data_distribution_stat(self, data, model=None, timestamp=None):
"""
Exports distribution statistics which will be shown in Health View.
Statistic is attached to the current model and can be fetched later for this model.
:param data: The data that represents distribution. Data must have specific type according to engine.
For PyStark engine: RDD or DataFrame.
For Python engine: Numpy ND array or Pandas DataFrame
Currently the only expected data type is a line graph, which consists of
discrete numeric values
:param model: For PySpark engine: model is used to classify categorical and continuous features.
:param timestamp: The timestamp is a given units (Optional). If not provided, the current time is assumed
:raises: MLOpsException
"""
self._validate_stats_helper()
self._stats_helper.set_data_distribution_stat(data, self.get_id(), model, timestamp)
def set_kpi(self, name, data, timestamp=None, units=None):
"""
Exports KPI data to the PM service. Users may supply a timestamp, which allows older data to be loaded.
Statistic is attached to the current model and can be fetched later for this model.
:param name: KPI name, which will be displayed in the UI. It can be used to fetch the stored data
at a later time.
:param data: The data to store. Currently the only expected data type is a line graph, which consists of
discrete numeric values
:param timestamp: The timestamp is a given units (Optional). If not provided, the current time is assumed
:param units: The timestamp units. One of: KpiValue.TIME_SEC, KpiValue.TIME_MSEC, KpiValue.TIME_NSEC
:return: The current PM instance for further calls
:raises: MLOpsException
"""
self._validate_stats_helper()
self._stats_helper.set_kpi(name, data, self.get_id(), timestamp, units)
def feature_importance(self, feature_importance_vector=None, feature_names=None, model=None, df=None,
num_significant_features=8):
"""
present feature importance, either according to the provided vector or generated from
the provided model if available.
Feature importance bar graph is attached to the current model and can be fetched later for
this model.
this function implements:
1) use feature_importance_vector if exists
2) feature_names from the model if available
3) get feature names vector if exists
4) extract feature name from pipeline model or dataframe if exists -
(code different to pyspark and sklearn)
5) sort the vector.
6) take first k elements
7) create a bar graph for feature importance
:param feature_importance_vector: feature importance vector optional
:param feature_names: feature names vector optional
:param model: optional pipeline model for pyspark, sklearn model for python
:param df: optional dataframe for analysis
:raises: MLOpsException
"""
self._validate_stats_helper()
self._stats_helper.feature_importance(self, feature_importance_vector, feature_names, model, df,
num_significant_features)
def download(self, filepath):
"""
Download the model content specified by this model metadata and save it on the local file system.
Note model size might be big, check the expected model size before downloading it.
:param: filepath the file path in the local file system to save the model's content
"""
content = self._rest_helper.download_model(self.get_id())
# In case the model was created from a json response of get model REST API
if self.metadata.size and self.metadata.size != len(content):
raise MLOpsException("Unexpected downloaded model size! model id: {}, expected size: {},"
" downloaded size: {}".format(self.get_id(), self.metadata.size, len(content)))
with open(filepath, 'w') as f:
f.write(content)
self.set_model_path(filepath)
|
the-stack_106_17614
|
#app/ecommend.py
# Imports
from os import path
import pandas as pd
import pickle
import json
# Load pickled vectorizer and model
with open('pickles/tfidf.pkl', 'rb') as tfidf_pkl:
tfidf = pickle.load(tfidf_pkl)
with open('pickles/nn_model.pkl', 'rb') as nn_pkl:
nn_model = pickle.load(nn_pkl)
#with open('./pickles/min_data.pkl', 'rb') as data_pkl:
# data = pickle.load(data_pkl)
"""Pickle data to use here, or try loading from DB"""
# Import pop_rent_crime_bins csv
#file_name = path.join(path.dirname(__file__), "cityspire-c-ds/app/data/pop_rent_crime_walk_cost_livability_bins.csv")
prcb = pd.read_csv("data/pop_rent_crime_walk_cost_livability_bins.csv")
# Recommend Function
def recommend(user_input):
temp_df = nn_model.kneighbors(tfidf.transform([user_input]).todense())[1]
for i in range(4):
info = prcb.iloc[temp_df[0][i]]['Location']
info_pop = prcb.iloc[temp_df[0][i]]['2019 Population']
info_town_or_city = prcb.iloc[temp_df[0][i]]['Town or City']
info_rent = prcb.iloc[temp_df[0][i]]['2019 Rental Rates']
info_state = prcb.iloc[temp_df[0][i]]['State']
info_city = prcb.iloc[temp_df[0][i]]['City']
info_population = prcb.iloc[temp_df[0][i]]['Population']
info_violent_crime = prcb.iloc[temp_df[0][i]]['Violent crime']
info_murder = prcb.iloc[temp_df[0][i]]['Murder and nonnegligent manslaughter']
info_vehicle_theft = prcb.iloc[temp_df[0][i]]['Motor vehicle theft']
info_arson = prcb.iloc[temp_df[0][i]]['Arson']
info_crime_rate = prcb.iloc[temp_df[0][i]]['Crime Rate']
info_urb_pop_cat = prcb.iloc[temp_df[0][i]]['Urban Population by City Size Categories']
info_urb_pop_rang = prcb.iloc[temp_df[0][i]]['Urban Population by City Size Ranges']
info_rent_cat = prcb.iloc[temp_df[0][i]]['Rental Rate Categories']
info_rent_rang = prcb.iloc[temp_df[0][i]]['Rental Rate Ranges']
info_crime_cat = prcb.iloc[temp_df[0][i]]['Crime Rate Categories']
info_crime_rang = prcb.iloc[temp_df[0][i]]['Crime Rate Categories']
# Possible Outputs
location = json.dumps(info)
pop = json.dumps(int(info_pop))
town_or_city = json.dumps(info_town_or_city)
rent = json.dumps(int(info_rent))
state = json.dumps(info_state)
city = json.dumps(info_city)
population = json.dumps(int(info_population))
violent_crime = json.dumps(int(info_violent_crime))
murder = json.dumps(int(info_murder))
vehicle_theft = json.dumps(int(info_vehicle_theft))
arson = json.dumps(int(info_arson))
crime_rate = json.dumps(int(info_crime_rate))
urb_pop_cat = json.dumps(info_urb_pop_cat)
urb_pop_rang = json.dumps(info_urb_pop_rang)
rent_cat = json.dumps(info_rent_cat)
rent_rang = json.dumps(info_rent_rang)
crime_cat = json.dumps(info_crime_cat)
crime_rang = json.dumps(info_crime_rang)
# Add all future column names
return [location, pop, town_or_city, rent, state, city, population, violent_crime, murder, vehicle_theft,
arson, crime_rate, urb_pop_cat, urb_pop_rang, rent_cat, rent_rang, crime_cat, crime_rang]
|
the-stack_106_17616
|
import json
import arcade
from .base import Base
from .util import arcade_int_to_string
import queue
import multiprocessing
OFFSET = 320
COLOURS = [(200, 100, 100), (100, 200, 100), (100, 100, 200)]
class Lobby(Base):
def __init__(self, display):
self.display = display
self.spritelist = arcade.SpriteList()
self.spritedict = dict()
self.sceneTime = 0
self.name = ""
self.cursor_index = -1
self.focus = None
self.network_thread = None
self.receive_queue = None
self.send_queue = None
self.players = [{}, {}, {}]
self.host = False
self.player_id = None
self.sprite_setup()
def reset(self, network_thread: multiprocessing.Process, receive: multiprocessing.Queue, send: multiprocessing.Queue, player_id, host=False) -> None:
self.network_thread = network_thread
self.receive_queue = receive
self.send_queue = send
self.host = host
self.sceneTime = 0
self.name = ""
self.cursor_index = -1
self.focus = None
self.player_id = player_id
self.players = [{}, {}, {}]
if self.host is True:
self.spritedict["start"] = arcade.Sprite(
"./assets/start_button.png",
scale=0.25,
center_x=160,
center_y=557.5
)
self.spritelist.append(self.spritedict["start"])
else:
try:
self.spritedict.pop("start")
except KeyError:
pass
def sprite_setup(self):
self.spritedict = {
"back": arcade.Sprite(
"./assets/back_button.png",
scale=0.25,
center_x=160,
center_y=687.5
),
"name": arcade.Sprite(
"./assets/simple_button.png",
scale=0.25,
center_x=160,
center_y=622.5
)
}
self.spritelist.extend(list(self.spritedict.values()))
def update(self, delta_time: float) -> None:
self.sceneTime += delta_time
try:
data = self.receive_queue.get(block=False)
except queue.Empty:
pass
else:
if data["type"] == "playersUpdate":
self.players = data["data"]
elif data["type"] == "startGame":
self.start_game()
def draw(self):
self.spritelist.draw()
if len(self.name) < 25:
buffer = " " * (25 - len(self.name))
else:
buffer = ""
arcade.draw_text(buffer + self.name[-25:], 15, 600, color=(255, 255, 255), font_size=35, width=560)
for c, player in enumerate(self.players):
if len(player) == 0:
continue
arcade.draw_text(player["name"], OFFSET * (c + 1), 640, color=(0, 0, 100), font_size=25)
arcade.draw_rectangle_filled(OFFSET * (c + 1.5), 700, 320, 40, COLOURS[c])
def mouse_release(self, x: float, y: float, button: int, modifiers: int):
print(self.host)
if self.spritedict["back"].collides_with_point((x, y)) is True:
self.display.change_scenes("mainMenu")
elif self.spritedict["name"].collides_with_point((x, y)) is True:
self.focus = "name"
elif self.host is True and self.spritedict["start"].collides_with_point((x, y)) is True:
print("starting game")
self.send_queue.put({"type": "startGame"})
else:
self.focus = None
def key_press(self, key, modifiers):
if self.focus == "name":
if key == arcade.key.BACKSPACE:
if self.cursor_index == -1:
self.name = self.name[:-1]
else:
self.name = self.name[:self.cursor_index] + self.name[self.cursor_index + 1:]
elif key == arcade.key.DELETE:
if self.cursor_index == - (len(self.name) + 1):
self.name = self.name[1:]
self.cursor_index += 1
elif self.cursor_index < -2:
self.name = self.name[:self.cursor_index + 1] + self.name[self.cursor_index + 2:]
self.cursor_index += 1
elif self.cursor_index == -2:
self.name = self.name[:-1]
self.cursor_index += 1
elif key == arcade.key.LEFT:
self.cursor_index -= 1
if self.cursor_index <= - (len(self.name) + 2):
self.cursor_index = -1
elif key == arcade.key.RIGHT:
self.cursor_index += 1
if self.cursor_index >= 0:
self.cursor_index = - (len(self.name) + 1)
else:
key = arcade_int_to_string(key, modifiers)
if key != "":
if self.cursor_index == -1:
self.name = self.name + key
else:
self.name = self.name[:self.cursor_index + 1] + key + self.name[self.cursor_index + 1:]
self.send_queue.put({"type": "nameChange", "newName": self.name})
def start_game(self):
self.display.change_scenes("game", self.network_thread, self.receive_queue, self.send_queue, self.players, self.player_id)
|
the-stack_106_17619
|
"""Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
from ..externals.six import string_types, advance_iterator
from distutils.version import LooseVersion
import os
import inspect
import warnings
from itertools import cycle
import numpy as np
from scipy import linalg
from ..io.pick import pick_types
from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
from ..transforms import read_trans, _find_trans, apply_trans
from ..utils import get_subjects_dir, logger, _check_subject
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to eggie in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt.show()
return fig
def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
ch_type=None, source='bem'):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans_fname : str | 'auto'
The full path to the `*-trans.fif` file produced during
coregistration.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
if trans_fname == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans_fname = _find_trans(subject, subjects_dir)
trans = read_trans(trans_fname)
surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
if ch_type is None or ch_type == 'meg':
surfs.append(get_meg_helmet_surf(info, trans))
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, surf in enumerate(surfs):
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
if ch_type is None or ch_type == 'eeg':
eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
if l['eeg_loc'] is not None]
if len(eeg_locs) > 0:
eeg_loc = np.array(eeg_locs)
# Transform EEG electrodes to MRI coordinates
eeg_loc = apply_trans(trans['trans'], eeg_loc)
with warnings.catch_warnings(record=True): # traits
mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
color=(1.0, 0.0, 0.0), scale_factor=0.005)
else:
warnings.warn('EEG electrode locations not found. '
'Cannot plot EEG electrodes.')
mlab.view(90, 90)
return fig
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='hot', time_label='time=%0.2f ms',
smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
transparent=True, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str
The type of colormap to use.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
fmin : float
The minimum value to display.
fmid : float
The middle value on the colormap.
fmax : float
The maximum value for the colormap.
transparent : bool
If True, use a linear transparency between fmin and fmid.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
import surfer
from surfer import Brain, TimeViewer
if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
raise NotImplementedError('hemi type "%s" not supported with your '
'version of pysurfer. Please upgrade to '
'version 0.4 or higher.' % hemi)
try:
import mayavi
from mayavi import mlab
except ImportError:
from enthought import mayavi
from enthought.mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
subject = _check_subject(stc.subject, subject, False)
if subject is None:
if 'SUBJECT' in os.environ:
subject = os.environ['SUBJECT']
else:
raise ValueError('SUBJECT environment variable not set')
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = inspect.getargspec(Brain.__init__)[0]
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
else:
logger.info('PySurfer does not support "views" argument, please '
'consider updating to a newer version (0.4 or later)')
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertno[0])]
else:
data = stc.data[len(stc.vertno[0]):]
vertices = stc.vertno[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=['cone', 'sphere'],
scale_factors=[1, 0.6],
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
try:
from mayavi import mlab
except ImportError:
from enthought.mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple))
and len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
if show:
plt.show()
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
|
the-stack_106_17620
|
import os.path
from os.path import basename, exists
from os.path import join as pjoin
from pathlib import Path
from subprocess import CalledProcessError, check_output
from tqdm import tqdm
from skelshop.io import ShotSegmentedWriter
from skelshop.shotseg.base import SHOT_CHANGE
def fulldir(path):
return os.path.dirname(os.path.realpath(path))
FMT_VERSION = 1
CUR_DIR = fulldir(__file__)
def add_basic_metadata(h5f, video, num_frames):
h5f.attrs["video"] = basename(video)
h5f.attrs["num_frames"] = num_frames
def add_metadata(h5f, video, num_frames, mode, limbs):
add_basic_metadata(h5f, video, num_frames)
h5f.attrs["mode"] = mode
h5f.attrs["limbs"] = limbs
def git_describe_safe(cwd):
git_describe = pjoin(cwd, ".git-describe")
if exists(git_describe):
with open(git_describe) as describe_f:
return describe_f.read()
else:
try:
return (
check_output(["git", "describe", "--always"], cwd=cwd).decode().strip()
)
except CalledProcessError:
return "unknown"
def extract_cmake_flags(cwd, flags):
build_path = Path(cwd)
flag_results = {flag: "unknown" for flag in flags}
path_success = None
while build_path != build_path.parent:
cmake_path = build_path / "CMakeCache.txt"
if cmake_path.exists():
path_success = cmake_path
break
build_path = build_path.parent
if path_success is not None:
with open(cmake_path) as cmake_cache:
for line in cmake_cache:
for flag in flags:
if line.startswith(flag + ":STRING="):
flag_results[flag] = line.split("=", 1)[1]
return flag_results
def add_fmt_metadata(h5f, fmt_type, running_op=False):
h5f.attrs["fmt_type"] = fmt_type
h5f.attrs["fmt_ver"] = FMT_VERSION
h5f.attrs["legacy_skels"] = "LEGACY_SKELS" in os.environ
skeldump_ver = "{}={}".format(fmt_type, git_describe_safe(CUR_DIR))
if "skeldump_ver" in h5f.attrs:
h5f.attrs["skeldump_ver"] = skeldump_ver + ";" + h5f.attrs["skeldump_ver"]
else:
h5f.attrs["skeldump_ver"] = skeldump_ver
if running_op:
import openpose
op_py_dir = fulldir(openpose.__file__)
h5f.attrs["op_ver"] = git_describe_safe(op_py_dir)
for name, val in extract_cmake_flags(
op_py_dir, ("GPU_MODE", "DL_FRAMEWORK")
).items():
h5f.attrs["op_" + name] = val
def write_shots(
h5f,
num_kps,
frame_iter,
writer_cls=ShotSegmentedWriter,
start_frame=0,
**create_kwargs
):
writer = writer_cls(h5f, num_kps=num_kps, **create_kwargs)
writer.start_shot(start_frame)
frame_num = start_frame
for frame in tqdm(frame_iter, total=frame_iter.total_frames):
if frame is SHOT_CHANGE:
writer.end_shot()
writer.start_shot()
else:
writer.register_frame(frame_num)
for pose_id, pose in frame:
writer.add_pose(frame_num, pose_id, pose.all())
frame_num += 1
writer.end_shot()
|
the-stack_106_17622
|
# proses memasukan data ke dalam variabel
nama = "John Doe"
# proses mencetak variabel
print(nama)
# nilai dan tipe data dalam variabel dapat diubah
umur = 20 # nilai awal
print(umur) # mencetak nilai umur
type(umur) # mengecek tipe data umur
umur = "dua puluh satu" # nilai setelah diubah
print(umur) # mencetak nilai umur
type(umur) # mengecek tipe data umur
namaDepan = "Budi"
namaBelakang = "Susanto"
nama = namaDepan + " " + namaBelakang
umur = 22
hobi = "Berenang"
print("Biodata\n", nama, "\n", umur, "\n", hobi)
# contoh variabel lainya
inivariabel = "Halo"
ini_juga_variabel = "Hai"
_inivariabeljuga = "Hi"
inivariabel222 = "Bye"
panjang = 10
lebar = 5
luas = panjang * lebar
print(luas)
|
the-stack_106_17627
|
# -*- test-case-name: twisted.python.test.test_util -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import, print_function
import os, sys, errno, warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
try:
from os import setgroups, getgroups
except ImportError:
setgroups = getgroups = None
from functools import wraps
from twisted.python.compat import _PY3, unicode
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
# For backwards compatibility, some things import this, so just link it
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.util",
"OrderedDict")
class InsensitiveDict:
"""Dictionary, that has case-insensitive keys.
Normally keys are retained in their original form when queried with
.keys() or .items(). If initialized with preserveCase=0, keys are both
looked up in lowercase and returned in lowercase by .keys() and .items().
"""
"""
Modified recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66315 originally
contributed by Sami Hangaslammi.
"""
def __init__(self, dict=None, preserve=1):
"""Create an empty dictionary, or update from 'dict'."""
self.data = {}
self.preserve=preserve
if dict:
self.update(dict)
def __delitem__(self, key):
k=self._lowerOrReturn(key)
del self.data[k]
def _lowerOrReturn(self, key):
if isinstance(key, bytes) or isinstance(key, unicode):
return key.lower()
else:
return key
def __getitem__(self, key):
"""Retrieve the value associated with 'key' (in any case)."""
k = self._lowerOrReturn(key)
return self.data[k][1]
def __setitem__(self, key, value):
"""Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced."""
k = self._lowerOrReturn(key)
self.data[k] = (key, value)
def has_key(self, key):
"""Case insensitive test whether 'key' exists."""
k = self._lowerOrReturn(key)
return k in self.data
__contains__ = has_key
def _doPreserve(self, key):
if not self.preserve and (isinstance(key, bytes)
or isinstance(key, unicode)):
return key.lower()
else:
return key
def keys(self):
"""List of keys in their original case."""
return list(self.iterkeys())
def values(self):
"""List of values."""
return list(self.itervalues())
def items(self):
"""List of (key,value) pairs."""
return list(self.iteritems())
def get(self, key, default=None):
"""Retrieve value associated with 'key' or return default value
if 'key' doesn't exist."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""If 'key' doesn't exist, associate it with the 'default' value.
Return value associated with 'key'."""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""Copy (key,value) pairs from 'dict'."""
for k,v in dict.items():
self[k] = v
def __repr__(self):
"""String representation of the dictionary."""
items = ", ".join([("%r: %r" % (k,v)) for k,v in self.items()])
return "InsensitiveDict({%s})" % items
def iterkeys(self):
for v in self.data.values():
yield self._doPreserve(v[0])
def itervalues(self):
for v in self.data.values():
yield v[1]
def iteritems(self):
for (k, v) in self.data.values():
yield self._doPreserve(k), v
def popitem(self):
i=self.items()[0]
del self[i[0]]
return i
def clear(self):
for k in self.keys():
del self[k]
def copy(self):
return InsensitiveDict(self, self.preserve)
def __len__(self):
return len(self.data)
def __eq__(self, other):
for k,v in self.items():
if not (k in other) or not (other[k]==v):
return 0
return len(self)==len(other)
def uniquify(lst):
"""Make the elements of a list unique by inserting them into a dictionary.
This must not change the order of the input lst.
"""
dct = {}
result = []
for k in lst:
if k not in dct:
result.append(k)
dct[k] = 1
return result
def padTo(n, seq, default=None):
"""
Pads a sequence out to n elements,
filling in with a default value if it is not long enough.
If the input sequence is longer than n, raises ValueError.
Details, details:
This returns a new list; it does not extend the original sequence.
The new list contains the values of the original sequence, not copies.
"""
if len(seq) > n:
raise ValueError("%d elements is more than %d." % (len(seq), n))
blank = [default] * n
blank[:len(seq)] = list(seq)
return blank
def getPluginDirs():
warnings.warn(
"twisted.python.util.getPluginDirs is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
import twisted
systemPlugins = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(twisted.__file__))), 'plugins')
userPlugins = os.path.expanduser("~/TwistedPlugins")
confPlugins = os.path.expanduser("~/.twisted")
allPlugins = filter(os.path.isdir, [systemPlugins, userPlugins, confPlugins])
return allPlugins
def addPluginDir():
warnings.warn(
"twisted.python.util.addPluginDir is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
sys.path.extend(getPluginDirs())
def sibpath(path, sibling):
"""
Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
"""
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
def _getpass(prompt):
"""
Helper to turn IOErrors into KeyboardInterrupts.
"""
import getpass
try:
return getpass.getpass(prompt)
except IOError as e:
if e.errno == errno.EINTR:
raise KeyboardInterrupt
raise
except EOFError:
raise KeyboardInterrupt
def getPassword(prompt = 'Password: ', confirm = 0, forceTTY = 0,
confirmPrompt = 'Confirm password: ',
mismatchMessage = "Passwords don't match."):
"""Obtain a password by prompting or from stdin.
If stdin is a terminal, prompt for a new password, and confirm (if
C{confirm} is true) by asking again to make sure the user typed the same
thing, as keystrokes will not be echoed.
If stdin is not a terminal, and C{forceTTY} is not true, read in a line
and use it as the password, less the trailing newline, if any. If
C{forceTTY} is true, attempt to open a tty and prompt for the password
using it. Raise a RuntimeError if this is not possible.
@returns: C{str}
"""
isaTTY = hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
old = None
try:
if not isaTTY:
if forceTTY:
try:
old = sys.stdin, sys.stdout
sys.stdin = sys.stdout = open('/dev/tty', 'r+')
except:
raise RuntimeError("Cannot obtain a TTY")
else:
password = sys.stdin.readline()
if password[-1] == '\n':
password = password[:-1]
return password
while 1:
try1 = _getpass(prompt)
if not confirm:
return try1
try2 = _getpass(confirmPrompt)
if try1 == try2:
return try1
else:
sys.stderr.write(mismatchMessage + "\n")
finally:
if old:
sys.stdin.close()
sys.stdin, sys.stdout = old
def println(*a):
sys.stdout.write(' '.join(map(str, a))+'\n')
# XXX
# This does not belong here
# But where does it belong?
def str_xor(s, b):
return ''.join([chr(ord(c) ^ b) for c in s])
def makeStatBar(width, maxPosition, doneChar = '=', undoneChar = '-', currentChar = '>'):
"""
Creates a function that will return a string representing a progress bar.
"""
aValue = width / float(maxPosition)
def statBar(position, force = 0, last = ['']):
assert len(last) == 1, "Don't mess with the last parameter."
done = int(aValue * position)
toDo = width - done - 2
result = "[%s%s%s]" % (doneChar * done, currentChar, undoneChar * toDo)
if force:
last[0] = result
return result
if result == last[0]:
return ''
last[0] = result
return result
statBar.__doc__ = """statBar(position, force = 0) -> '[%s%s%s]'-style progress bar
returned string is %d characters long, and the range goes from 0..%d.
The 'position' argument is where the '%s' will be drawn. If force is false,
'' will be returned instead if the resulting progress bar is identical to the
previously returned progress bar.
""" % (doneChar * 3, currentChar, undoneChar * 3, width, maxPosition, currentChar)
return statBar
def spewer(frame, s, ignored):
"""
A trace function for sys.settrace that prints every function or method call.
"""
from twisted.python import reflect
if 'self' in frame.f_locals:
se = frame.f_locals['self']
if hasattr(se, '__class__'):
k = reflect.qual(se.__class__)
else:
k = reflect.qual(type(se))
print('method %s of %s at %s' % (
frame.f_code.co_name, k, id(se)))
else:
print('function %s in %s, line %s' % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno))
def searchupwards(start, files=[], dirs=[]):
"""
Walk upwards from start, looking for a directory containing
all files and directories given as arguments::
>>> searchupwards('.', ['foo.txt'], ['bar', 'bam'])
If not found, return None
"""
start=os.path.abspath(start)
parents=start.split(os.sep)
exists=os.path.exists; join=os.sep.join; isdir=os.path.isdir
while len(parents):
candidate=join(parents)+os.sep
allpresent=1
for f in files:
if not exists("%s%s" % (candidate, f)):
allpresent=0
break
if allpresent:
for d in dirs:
if not isdir("%s%s" % (candidate, d)):
allpresent=0
break
if allpresent: return candidate
parents.pop(-1)
return None
class LineLog:
"""
A limited-size line-based log, useful for logging line-based
protocols such as SMTP.
When the log fills up, old entries drop off the end.
"""
def __init__(self, size=10):
"""
Create a new log, with size lines of storage (default 10).
A log size of 0 (or less) means an infinite log.
"""
if size < 0:
size = 0
self.log = [None]*size
self.size = size
def append(self,line):
if self.size:
self.log[:-1] = self.log[1:]
self.log[-1] = line
else:
self.log.append(line)
def str(self):
return '\n'.join(filter(None,self.log))
def __getitem__(self, item):
return filter(None,self.log)[item]
def clear(self):
"""Empty the log"""
self.log = [None]*self.size
def raises(exception, f, *args, **kwargs):
"""
Determine whether the given call raises the given exception.
"""
try:
f(*args, **kwargs)
except exception:
return 1
return 0
class IntervalDifferential(object):
"""
Given a list of intervals, generate the amount of time to sleep between
"instants".
For example, given 7, 11 and 13, the three (infinite) sequences::
7 14 21 28 35 ...
11 22 33 44 ...
13 26 39 52 ...
will be generated, merged, and used to produce::
(7, 0) (4, 1) (2, 2) (1, 0) (7, 0) (1, 1) (4, 2) (2, 0) (5, 1) (2, 0)
New intervals may be added or removed as iteration proceeds using the
proper methods.
"""
def __init__(self, intervals, default=60):
"""
@type intervals: C{list} of C{int}, C{long}, or C{float} param
@param intervals: The intervals between instants.
@type default: C{int}, C{long}, or C{float}
@param default: The duration to generate if the intervals list
becomes empty.
"""
self.intervals = intervals[:]
self.default = default
def __iter__(self):
return _IntervalDifferentialIterator(self.intervals, self.default)
class _IntervalDifferentialIterator(object):
def __init__(self, i, d):
self.intervals = [[e, e, n] for (e, n) in zip(i, range(len(i)))]
self.default = d
self.last = 0
def __next__(self):
if not self.intervals:
return (self.default, None)
last, index = self.intervals[0][0], self.intervals[0][2]
self.intervals[0][0] += self.intervals[0][1]
self.intervals.sort()
result = last - self.last
self.last = last
return result, index
# Iterators on Python 2 use next(), not __next__()
next = __next__
def addInterval(self, i):
if self.intervals:
delay = self.intervals[0][0] - self.intervals[0][1]
self.intervals.append([delay + i, i, len(self.intervals)])
self.intervals.sort()
else:
self.intervals.append([i, i, 0])
def removeInterval(self, interval):
for i in range(len(self.intervals)):
if self.intervals[i][1] == interval:
index = self.intervals[i][2]
del self.intervals[i]
for i in self.intervals:
if i[2] > index:
i[2] -= 1
return
raise ValueError("Specified interval not in IntervalDifferential")
class FancyStrMixin:
"""
Mixin providing a flexible implementation of C{__str__}.
C{__str__} output will begin with the name of the class, or the contents
of the attribute C{fancybasename} if it is set.
The body of C{__str__} can be controlled by overriding C{showAttributes} in
a subclass. Set C{showAttributes} to a sequence of strings naming
attributes, or sequences of C{(attributeName, callable)}, or sequences of
C{(attributeName, displayName, formatCharacter)}. In the second case, the
callable is passed the value of the attribute and its return value used in
the output of C{__str__}. In the final case, the attribute is looked up
using C{attributeName}, but the output uses C{displayName} instead, and
renders the value of the attribute using C{formatCharacter}, e.g. C{"%.3f"}
might be used for a float.
"""
# Override in subclasses:
showAttributes = ()
def __str__(self):
r = ['<', (hasattr(self, 'fancybasename') and self.fancybasename)
or self.__class__.__name__]
for attr in self.showAttributes:
if isinstance(attr, str):
r.append(' %s=%r' % (attr, getattr(self, attr)))
elif len(attr) == 2:
r.append((' %s=' % (attr[0],)) + attr[1](getattr(self, attr[0])))
else:
r.append((' %s=' + attr[2]) % (attr[1], getattr(self, attr[0])))
r.append('>')
return ''.join(r)
__repr__ = __str__
class FancyEqMixin:
"""
Mixin that implements C{__eq__} and C{__ne__}.
Comparison is done using the list of attributes defined in
C{compareAttributes}.
"""
compareAttributes = ()
def __eq__(self, other):
if not self.compareAttributes:
return self is other
if isinstance(self, other.__class__):
return (
[getattr(self, name) for name in self.compareAttributes] ==
[getattr(other, name) for name in self.compareAttributes])
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
try:
# initgroups is available in Python 2.7+ on UNIX-likes
from os import initgroups as _initgroups
except ImportError:
_initgroups = None
if _initgroups is None:
def initgroups(uid, primaryGid):
"""
Do nothing.
Underlying platform support require to manipulate groups is missing.
"""
else:
def initgroups(uid, primaryGid):
"""
Initializes the group access list.
This uses the stdlib support which calls initgroups(3) under the hood.
If the given user is a member of more than C{NGROUPS}, arbitrary
groups will be silently discarded to bring the number below that
limit.
@type uid: C{int}
@param uid: The UID for which to look up group information.
@type primaryGid: C{int} or L{None}
@param primaryGid: If provided, an additional GID to include when
setting the groups.
"""
return _initgroups(pwd.getpwuid(uid)[0], primaryGid)
def switchUID(uid, gid, euid=False):
"""
Attempts to switch the uid/euid and gid/egid for the current process.
If C{uid} is the same value as L{os.getuid} (or L{os.geteuid}),
this function will issue a L{UserWarning} and not raise an exception.
@type uid: C{int} or L{None}
@param uid: the UID (or EUID) to switch the current process to. This
parameter will be ignored if the value is L{None}.
@type gid: C{int} or L{None}
@param gid: the GID (or EGID) to switch the current process to. This
parameter will be ignored if the value is L{None}.
@type euid: C{bool}
@param euid: if True, set only effective user-id rather than real user-id.
(This option has no effect unless the process is running
as root, in which case it means not to shed all
privileges, retaining the option to regain privileges
in cases such as spawning processes. Use with caution.)
"""
if euid:
setuid = os.seteuid
setgid = os.setegid
getuid = os.geteuid
else:
setuid = os.setuid
setgid = os.setgid
getuid = os.getuid
if gid is not None:
setgid(gid)
if uid is not None:
if uid == getuid():
uidText = (euid and "euid" or "uid")
actionText = "tried to drop privileges and set%s %s" % (uidText, uid)
problemText = "%s is already %s" % (uidText, getuid())
warnings.warn("%s but %s; should we be root? Continuing."
% (actionText, problemText))
else:
initgroups(uid, gid)
setuid(uid)
class SubclassableCStringIO(object):
"""
A wrapper around cStringIO to allow for subclassing.
"""
__csio = None
def __init__(self, *a, **kw):
from cStringIO import StringIO
self.__csio = StringIO(*a, **kw)
def __iter__(self):
return self.__csio.__iter__()
def next(self):
return self.__csio.next()
def close(self):
return self.__csio.close()
def isatty(self):
return self.__csio.isatty()
def seek(self, pos, mode=0):
return self.__csio.seek(pos, mode)
def tell(self):
return self.__csio.tell()
def read(self, n=-1):
return self.__csio.read(n)
def readline(self, length=None):
return self.__csio.readline(length)
def readlines(self, sizehint=0):
return self.__csio.readlines(sizehint)
def truncate(self, size=None):
return self.__csio.truncate(size)
def write(self, s):
return self.__csio.write(s)
def writelines(self, list):
return self.__csio.writelines(list)
def flush(self):
return self.__csio.flush()
def getvalue(self):
return self.__csio.getvalue()
def untilConcludes(f, *a, **kw):
"""
Call C{f} with the given arguments, handling C{EINTR} by retrying.
@param f: A function to call.
@param *a: Positional arguments to pass to C{f}.
@param **kw: Keyword arguments to pass to C{f}.
@return: Whatever C{f} returns.
@raise: Whatever C{f} raises, except for C{IOError} or C{OSError} with
C{errno} set to C{EINTR}.
"""
while True:
try:
return f(*a, **kw)
except (IOError, OSError) as e:
if e.args[0] == errno.EINTR:
continue
raise
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
pass
try:
g.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
g.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
try:
g.__module__ = f.__module__
except TypeError:
pass
return g
def nameToLabel(mname):
"""
Convert a string like a variable name into a slightly more human-friendly
string with spaces and capitalized letters.
@type mname: C{str}
@param mname: The name to convert to a label. This must be a string
which could be used as a Python identifier. Strings which do not take
this form will result in unpredictable behavior.
@rtype: C{str}
"""
labelList = []
word = ''
lastWasUpper = False
for letter in mname:
if letter.isupper() == lastWasUpper:
# Continuing a word.
word += letter
else:
# breaking a word OR beginning a word
if lastWasUpper:
# could be either
if len(word) == 1:
# keep going
word += letter
else:
# acronym
# we're processing the lowercase letter after the acronym-then-capital
lastWord = word[:-1]
firstLetter = word[-1]
labelList.append(lastWord)
word = firstLetter + letter
else:
# definitely breaking: lower to upper
labelList.append(word)
word = letter
lastWasUpper = letter.isupper()
if labelList:
labelList[0] = labelList[0].capitalize()
else:
return mname.capitalize()
labelList.append(word)
return ' '.join(labelList)
def uidFromString(uidString):
"""
Convert a user identifier, as a string, into an integer UID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a UID or the
name of a user which can be converted to a UID via L{pwd.getpwnam}.
@rtype: C{int}
@return: The integer UID corresponding to the given string.
@raise ValueError: If the user name is supplied and L{pwd} is not
available.
"""
try:
return int(uidString)
except ValueError:
if pwd is None:
raise
return pwd.getpwnam(uidString)[2]
def gidFromString(gidString):
"""
Convert a group identifier, as a string, into an integer GID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a GID or the
name of a group which can be converted to a GID via L{grp.getgrnam}.
@rtype: C{int}
@return: The integer GID corresponding to the given string.
@raise ValueError: If the group name is supplied and L{grp} is not
available.
"""
try:
return int(gidString)
except ValueError:
if grp is None:
raise
return grp.getgrnam(gidString)[2]
def runAsEffectiveUser(euid, egid, function, *args, **kwargs):
"""
Run the given function wrapped with seteuid/setegid calls.
This will try to minimize the number of seteuid/setegid calls, comparing
current and wanted permissions
@param euid: effective UID used to call the function.
@type euid: C{int}
@type egid: effective GID used to call the function.
@param egid: C{int}
@param function: the function run with the specific permission.
@type function: any callable
@param *args: arguments passed to C{function}
@param **kwargs: keyword arguments passed to C{function}
"""
uid, gid = os.geteuid(), os.getegid()
if uid == euid and gid == egid:
return function(*args, **kwargs)
else:
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(egid)
if euid != 0 and (euid != uid or gid != egid):
os.seteuid(euid)
try:
return function(*args, **kwargs)
finally:
if euid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(gid)
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(uid)
def runWithWarningsSuppressed(suppressedWarnings, f, *args, **kwargs):
"""
Run C{f(*args, **kwargs)}, but with some warnings suppressed.
Unlike L{twisted.internet.utils.runWithWarningsSuppressed}, it has no
special support for L{twisted.internet.defer.Deferred}.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable.
@param args: Arguments for C{f}.
@param kwargs: Keyword arguments for C{f}
@return: The result of C{f(*args, **kwargs)}.
"""
with warnings.catch_warnings():
for a, kw in suppressedWarnings:
warnings.filterwarnings(*a, **kw)
return f(*args, **kwargs)
def _replaceIf(condition, alternative):
"""
If C{condition}, replace this function with C{alternative}.
@param condition: A L{bool} which says whether this should be replaced.
@param alternative: An alternative function that will be swapped in instead
of the original, if C{condition} is truthy.
@return: A decorator.
"""
def decorator(func):
if condition is True:
call = alternative
elif condition is False:
call = func
else:
raise ValueError(("condition argument to _replaceIf requires a "
"bool, not {}").format(repr(condition)))
@wraps(func)
def wrapped(*args, **kwargs):
return call(*args, **kwargs)
return wrapped
return decorator
__all__ = [
"uniquify", "padTo", "getPluginDirs", "addPluginDir", "sibpath",
"getPassword", "println", "makeStatBar", "OrderedDict",
"InsensitiveDict", "spewer", "searchupwards", "LineLog",
"raises", "IntervalDifferential", "FancyStrMixin", "FancyEqMixin",
"switchUID", "SubclassableCStringIO", "mergeFunctionMetadata",
"nameToLabel", "uidFromString", "gidFromString", "runAsEffectiveUser",
"untilConcludes", "runWithWarningsSuppressed",
]
if _PY3:
__notported__ = ["SubclassableCStringIO", "LineLog", "makeStatBar"]
for name in __all__[:]:
if name in __notported__:
__all__.remove(name)
del globals()[name]
del name, __notported__
|
the-stack_106_17628
|
import math
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
import determined as det
from determined import keras
from determined_common import check
ArrayLike = Union[np.ndarray, List[np.ndarray], Dict[str, np.ndarray]]
def _is_list_of_numpy_array(x: Any) -> bool:
return isinstance(x, (list, tuple)) and all(isinstance(v, np.ndarray) for v in x)
def _is_dict_of_numpy_array(x: Any) -> bool:
return isinstance(x, dict) and all(isinstance(x[k], np.ndarray) for k in x)
def _length_of_multi_arraylike(data: ArrayLike) -> int:
if isinstance(data, np.ndarray):
return len(data)
elif isinstance(data, (list, tuple)):
return len(data[0])
elif isinstance(data, dict):
return len(list(data.values())[0])
else:
raise det.errors.InternalException(f"Unsupported data type: {type(data)}.")
def _get_elements_in_multi_arraylike(data: ArrayLike, start: int, end: int) -> Any:
if isinstance(data, np.ndarray):
return data[start:end]
elif isinstance(data, (list, tuple)):
return [arraylike[start:end] for arraylike in data]
elif isinstance(data, dict):
return {name: data[name][start:end] for name in data}
else:
raise det.errors.InternalException(f"Unsupported data type: {type(data)}.")
class _ArrayLikeAdapter(tf.keras.utils.Sequence): # type: ignore
"""This adapter adapts np.ndarray, a list of np.ndarray, and a dict of
np.ndarray into a tf.keras.utils.Sequence instance.
"""
def __init__(
self,
x: ArrayLike,
y: ArrayLike,
batch_size: int,
sample_weights: Optional[np.ndarray] = None,
drop_leftovers: bool = False,
):
"""
If converting numpy array data to Sequence to optimize performance, consider
using ArrayLikeAdapter.
Args:
x: Input data. It could be:
1) A Numpy array (or array-like), or a list of arrays (in case the model
has multiple inputs).
2) A dict mapping input names to the corresponding array, if the model
has named inputs.
y: Target data. Like the input data x, it could be either Numpy array(s).
batch_size: Number of samples per batch.
sample_weights: Numpy array of weights for the samples.
drop_leftovers: If True, drop the data that cannot complete the last batch. This
argument is ignored if x is a Sequence or a Dataset.
"""
self._x_length = _length_of_multi_arraylike(x)
self._y_length = _length_of_multi_arraylike(y)
check.eq(self._x_length, self._y_length, "Length of x and y do not match.")
check.check_gt_eq(self._x_length, batch_size, "Batch size is too large for the input data.")
if sample_weights is not None:
check.eq(
self._x_length,
len(sample_weights),
"Lengths of input data and sample weights do not match.",
)
self.x = x
self.y = y
self.sample_weight = sample_weights
self.batch_size = batch_size
self.drop_leftovers = drop_leftovers
def __len__(self) -> int:
# Returns number of batches (keeps last partial batch).
if self.drop_leftovers:
return math.floor(self._x_length / self.batch_size)
else:
return math.ceil(self._x_length / self.batch_size)
def __getitem__(
self, index: int
) -> Union[Tuple[ArrayLike, ArrayLike], Tuple[ArrayLike, ArrayLike, np.ndarray]]:
# Gets batch at position index.
start = index * self.batch_size
# The end is not `(index + 1) * self.batch_size` if the
# last batch is not a full `self.batch_size`
end = min((index + 1) * self.batch_size, self._x_length)
if self.sample_weight is None:
return (
_get_elements_in_multi_arraylike(self.x, start, end),
_get_elements_in_multi_arraylike(self.y, start, end),
)
else:
return (
_get_elements_in_multi_arraylike(self.x, start, end),
_get_elements_in_multi_arraylike(self.y, start, end),
self.sample_weight[start:end],
)
class _SequenceWithOffset(tf.keras.utils.Sequence): # type: ignore
def __init__(self, sequence: tf.keras.utils.Sequence, batch_offset: int = 0):
self._sequence = sequence
self._batch_offset = batch_offset
def __len__(self): # type: ignore
return len(self._sequence)
def __getitem__(self, index): # type: ignore
index = (index + self._batch_offset) % len(self)
return self._sequence[index]
class SequenceAdapter:
"""
A class to assist to optimize the performance of loading data with
``tf.keras.utils.Sequence`` and help with restoring and saving iterators for
a dataset.
"""
def __init__(
self,
data: tf.keras.utils.Sequence,
use_multiprocessing: bool = False,
workers: int = 1,
max_queue_size: int = 10,
):
"""
Multiprocessing or multithreading for native Python generators is not supported.
If you want these performance accelerations, please consider using a Sequence.
Args:
sequence: A ``tf.keras.utils.Sequence`` that holds the data.
use_multiprocessing: If True, use process-based threading. If unspecified,
`use_multiprocessing` will default to False. Note that because this implementation
relies on multiprocessing, you should not pass non-picklable arguments for the
data loaders as they can't be passed easily to children processes.
workers: Maximum number of processes to spin up when using process-based threading.
If unspecified, workers will default to 1. If 0, will execute the data loading on
the main thread.
max_queue_size: Maximum size for the generator queue. If unspecified, `max_queue_size`
will default to 10.
"""
self._max_queue_size = max_queue_size
if not len(data):
raise ValueError("tf.keras.utils.Sequence objects should have a non-zero length.")
self._sequence = _SequenceWithOffset(data)
self._use_multiprocessing = use_multiprocessing
self._workers = workers
def __len__(self) -> int:
return len(self._sequence)
def start(self, batch_offset: int = 0, is_validation: bool = False) -> None:
"""
Sets a batch offset and starts the pre-processing of data.
Pre-processing of data only happens if workers >0. If the underlying data type is an
iterator, we are unable to set a batch_offset.
Args:
batch_offset: Batch number to start at.
is_validation: Whether this iterator will be used for validation. This is necessary
because `get_iterator` usually returns an infinite iterator. When `is_validation`
is True, the iterator stops at the end of the epoch.
"""
self._is_validation = is_validation
self._sequence._batch_offset = batch_offset
if self._workers > 0:
self._enqueuer = tf.keras.utils.OrderedEnqueuer(
self._sequence, use_multiprocessing=self._use_multiprocessing
)
self._enqueuer.start(workers=self._workers, max_queue_size=self._max_queue_size)
def get_iterator(self) -> Iterator:
"""
Gets an Iterator over the data.
`start` must be called prior to calling this function"
"""
def _make_finite(iterator: Iterator, num_steps: int) -> Iterator:
for _ in range(num_steps):
yield next(iterator)
def _iter_sequence_infinite(sequence: tf.keras.utils.Sequence) -> Iterator:
while True:
yield from sequence
if self._is_validation:
if self._workers > 0:
iterator = self._enqueuer.get()
return _make_finite(iterator, len(self._sequence))
return iter(self._sequence)
if self._workers > 0:
return self._enqueuer.get() # type: ignore
return _iter_sequence_infinite(self._sequence)
def stop(self, timeout: Optional[int] = None) -> None:
"""
Stops processing the data.
If workers is >0, this will stop any related threads and processes.
Otherwise this is a no-op.
Args:
timeout: Maximum time to wait.
"""
if self._workers > 0:
self._enqueuer.stop(timeout=timeout)
InputData = Union[tf.keras.utils.Sequence, tf.data.Dataset, SequenceAdapter, tuple]
def _get_x_y_and_sample_weight(
input_data: Union[tf.keras.utils.Sequence, tf.data.Dataset, SequenceAdapter, tuple]
) -> Tuple[Any, Any, Any]:
if isinstance(input_data, (tf.keras.utils.Sequence, tf.data.Dataset, SequenceAdapter)):
return input_data, None, None
elif isinstance(input_data, tuple) and len(input_data) == 2:
return input_data[0], input_data[1], None
elif isinstance(input_data, tuple) and len(input_data) == 3:
return input_data[0], input_data[1], input_data[2]
else:
raise det.errors.InvalidDataTypeException(
type(input_data),
"input_data is invalid type. See the instruction below for details: \n"
f"{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
def _adapt_keras_data(
x: Any,
y: Any = None,
sample_weight: Optional[np.ndarray] = None,
batch_size: Optional[int] = None,
use_multiprocessing: bool = False,
workers: int = 1,
max_queue_size: int = 10,
drop_leftovers: bool = False,
) -> Union[SequenceAdapter, tf.data.Dataset]:
"""_adapt_keras_data adapts input and target data to a SequenceAdapter or leaves
it as a tf.data.Dataset.
Args:
x: Input data. It could be:
1) A Numpy array (or array-like), or a list of arrays (in case the model
has multiple inputs).
2) A dict mapping input names to the corresponding array, if the model
has named inputs.
3) A tf.data dataset. Should return a tuple of either (inputs, targets) or
(inputs, targets, sample_weights).
4) A keras.utils.Sequence returning (inputs, targets) or (inputs, targets,
sample weights).
5) a det.keras.SequenceAdapter returning (inputs, targets) or (inputs, targets,
sample weights).
y: Target data. Like the input data x, it could be either Numpy array(s).
If x is a dataset or keras.utils.Sequence instance, y should not be specified
(since targets will be obtained from x).
use_multiprocessing: If True, use process-based threading. If unspecified,
`use_multiprocessing` will default to False. Note that because this implementation
relies on multiprocessing, you should not pass non-picklable arguments for the
data loaders as they can't be passed easily to children processes. This argument is
ignored if x is a tf.data.Dataset or SequenceAdapter.
sample_weight: Optional Numpy array of weights for the training samples. This argument is
ignored if x is a tf.data.Dataset, SequenceAdapter, or tf.keras.Sequence.
batch_size: Number of samples per gradient update. This argument is ignored if x is a
tf.data.Dataset, SequenceAdapter, or tf.keras.Sequence.
workers: Maximum number of processes to spin up when using process-based threading.
If unspecified, workers will default to 1. If 0, will execute the data loading on
the main thread. This argument is ignored if x is a tf.data.Dataset or SequenceAdapter.
max_queue_size: Maximum size for the generator queue. If unspecified, `max_queue_size`
will default to 10. This argument is ignored if x is a tf.data.Dataset or
SequenceAdapter.
drop_leftovers: If True, drop the data that cannot complete the last batch. This
argument is ignored if x is a Sequence or a Dataset. This argument is ignored if
x is a tf.data.Dataset or SequenceAdapter.
"""
def check_y_is_none(y_data: Any) -> None:
if y is not None:
raise det.errors.InvalidDataTypeException(
type(y_data),
"If x is a keras.utils.Sequence or a tf.data.Dataset, "
"y should not be specified (since targets will be obtained from x)."
"See the instruction below for details: "
f"\n{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
if isinstance(x, np.ndarray) or _is_list_of_numpy_array(x) or _is_dict_of_numpy_array(x):
if not (
(isinstance(y, np.ndarray) or _is_list_of_numpy_array(y))
and isinstance(batch_size, int)
):
raise det.errors.InvalidDataTypeException(
type(y),
"If x is a numpy array or list/dict of numpy arrays, "
"y must also be a numpy array. "
"See the instruction below for details: "
f"\n{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
return SequenceAdapter(
_ArrayLikeAdapter(x, y, batch_size, sample_weight, drop_leftovers),
use_multiprocessing,
workers,
max_queue_size,
)
elif isinstance(x, tf.keras.utils.Sequence):
check_y_is_none(y)
return SequenceAdapter(x, use_multiprocessing, workers, max_queue_size)
elif isinstance(x, tf.data.Dataset):
return x
elif isinstance(x, SequenceAdapter):
check_y_is_none(y)
return x
else:
raise det.errors.InvalidDataTypeException(
type(x),
f"x is invalid type. x={x}\n"
f"See the instruction below for details: \n"
f"\n{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
|
the-stack_106_17629
|
# Copyright 2020 Soda
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from sodasql.scan.db import sql_update
from tests.common.warehouse_fixture import WarehouseFixture
class RedshiftFixture(WarehouseFixture):
original_dialect = None
original_connection = None
def create_database(self):
self.database = self.create_unique_database_name()
self.original_connection = self.warehouse.connection
self.original_dialect = self.warehouse.dialect
self.original_connection.set_isolation_level(0)
sql_update(self.original_connection, f'CREATE DATABASE {self.database}')
self.warehouse.dialect = self.warehouse.dialect.with_database(self.database)
self.warehouse.connection = self.warehouse.dialect.create_connection()
def test_warehouse_connection(self):
assert(self.warehouse.dialect.sql_test_connection())
def drop_database(self):
try:
self.warehouse.connection.close()
except Exception as e:
logging.debug(f'Closing connection failed: {str(e)}')
sql_update(self.original_connection, f'DROP DATABASE {self.database}')
|
the-stack_106_17631
|
"""
Mastermind Game Play: Code Breaker
"""
import sys
from argparse import ArgumentParser
from collections import Counter
from itertools import product, permutations
from random import choice
from typing import List, Tuple
# variants
STANDARD = 0
NO_REPEATS = 1
# number of colors for code
BASIC = 6
SUPER = 8
# number of code pins
NORMAL = 4
# feedback
RIGHT_COLOR = 'o'
RIGHT_COLOR_AND_POSITION = '+'
def run(code, *, variant: int = STANDARD, colors: int = BASIC, pins: int = NORMAL) -> None:
def init() -> List[Tuple[int, ...]]:
possible_codes = {
STANDARD: lambda c, p: list(product(range(c), repeat=p)),
NO_REPEATS: lambda c, p: list(permutations(range(c), p)),
}[variant](colors, pins)
return possible_codes
def compare_codes(a: Tuple[int, ...], b: Tuple[int, ...]) -> str:
if len(a) != len(b):
raise ValueError('Can not compare iterables of different length.')
result = RIGHT_COLOR * sum((Counter(a) & Counter(b)).values())
for x, y in zip(a, b):
if x == y:
result = result[1:] + RIGHT_COLOR_AND_POSITION
return result
def reduce_choices() -> List[Tuple]:
return [
code
for code in remaining_codes
if compare_codes(code, guess) == feedback and code != guess
]
secret_code = code
possible_codes = init()
print(f'The secret code is one of {len(possible_codes)} possible combinations.')
rounds = 0
feedback = ''
remaining_codes = possible_codes[:]
while feedback != '+' * pins:
guess = choice(remaining_codes)
if len(guess) != pins:
print(f'Please give {pins} digits, separated by blanks.')
continue
rounds += 1
feedback = compare_codes(secret_code, guess)
remaining_codes = reduce_choices()
print(f'{rounds}: {guess} -> {feedback:4} | remaining choices: {len(remaining_codes)}')
print(f'You cracked the secret code {secret_code} with {rounds} tries.')
def parse_args():
parser = ArgumentParser(description='Break the code like a mastermind!', usage='%(prog)s [options]')
parser.add_argument('digits', type=int, nargs='+', help='your code digits')
parser.add_argument('--colors', dest='num_colors', default=6, type=int,
help='set the number of different colors (default = 6)')
parser.add_argument('--pins', dest='num_pins', default=4, type=int,
help='set the number of code pins (default = 4)')
parser.add_argument('--no_repeats', action='store_true', help='do not repeat colors in code')
return parser.parse_args()
def check_secret_code(args):
digits = args.digits
pins = args.num_pins
if len(digits) != args.num_pins:
print(f'Bad code: {digits}.')
print(f'Please give {pins} digits, separated by blanks.')
sys.exit(4)
for d in digits:
if not 0 <= d < 10:
print(f'Bad color: {d}.')
print(f'All color codes must be single digits 0 <= d < 10.')
sys.exit(4)
return args
if __name__ == '__main__':
args = check_secret_code(parse_args())
run(
args.digits,
colors=args.num_colors,
pins=args.num_pins,
variant=NO_REPEATS if args.no_repeats else STANDARD
)
# last line of code
|
the-stack_106_17632
|
# -*- coding: utf-8 -*-
"""
ppstore.dbconnection
~~~~~~~~~~~~~~~~~
(Deprecated) A module containing a class to connect to database, query it, request
updates and commit to database.
:author: Muzammil Abdul Rehman
:copyright: Northeastern University © 2018.
:license: Custom BSD, see LICENSE for more details.
:email: [email protected]
"""
import configs.system
import MySQLdb
import traceback
from ConfigParser import SafeConfigParser
# Notes: Please add SSL to this connection.
##############################################################################
# A basic class to handle database connections, queries, inserts, updates,
# commits, rollbacks, etc
##############################################################################
class DatabaseConnection(object):
def __init__(self):
config_parser = SafeConfigParser()
config_parser.read(configs.system.CONFIG_FILE_PATH)
if configs.system.HOSTED_LOCALLY:
section = "Local"
else:
section = "Remote"
username = config_parser.get(section, 'username')
password = config_parser.get(section, 'password')
hostname = config_parser.get(section, 'hostname')
database = config_parser.get(section, 'database')
self.__db_con = MySQLdb.connect(host=hostname,
user=username,
passwd=password,
db=database)
self.cursor = None
# self.cursor = self.__db_con.cursor()
def query(self, query, params):
self.new_cursor()
self.cursor.query(query, params)
def execute(self, query, params, commit_to_db):
try:
self.new_cursor()
self.cursor.execute(query, params)
if commit_to_db:
self.__db_con.commit()
self.close_cursor()
return True
except:
traceback.print_exc()
self.rollback()
return False
def commit(self):
self.__db_con.commit()
def rollback(self):
try:
self.__db_con.rollback()
except:
traceback.print_exc()
def new_cursor(self):
self.close_cursor()
self.cursor = self.__db_con.cursor()
def close_cursor(self):
if self.cursor is not None:
self.cursor.close()
self.cursor = None
def close_connection(self):
self.close_cursor()
self.__db_con.close()
|
the-stack_106_17633
|
# -*- coding: utf-8 -*-
from numpy import cos as npCos
from numpy import exp as npExp
from numpy import pi as npPi
from numpy import sqrt as npSqrt
from pandas_ta.utils import get_offset, verify_series
def ssf(close, length=None, poles=None, offset=None, **kwargs):
"""Indicator: Ehler's Super Smoother Filter (SSF)"""
# Validate Arguments
length = int(length) if length and length > 0 else 10
poles = int(poles) if poles in [2, 3] else 2
close = verify_series(close, length)
offset = get_offset(offset)
if close is None: return
# Calculate Result
m = close.size
ssf = close.copy()
if poles == 3:
x = npPi / length # x = PI / n
a0 = npExp(-x) # e^(-x)
b0 = 2 * a0 * npCos(npSqrt(3) * x) # 2e^(-x)*cos(3^(.5) * x)
c0 = a0 * a0 # e^(-2x)
c4 = c0 * c0 # e^(-4x)
c3 = -c0 * (1 + b0) # -e^(-2x) * (1 + 2e^(-x)*cos(3^(.5) * x))
c2 = c0 + b0 # e^(-2x) + 2e^(-x)*cos(3^(.5) * x)
c1 = 1 - c2 - c3 - c4
for i in range(0, m):
ssf.iloc[i] = c1 * close.iloc[i] + c2 * ssf.iloc[i - 1] + c3 * ssf.iloc[i - 2] + c4 * ssf.iloc[i - 3]
else: # poles == 2
x = npPi * npSqrt(2) / length # x = PI * 2^(.5) / n
a0 = npExp(-x) # e^(-x)
a1 = -a0 * a0 # -e^(-2x)
b1 = 2 * a0 * npCos(x) # 2e^(-x)*cos(x)
c1 = 1 - a1 - b1 # e^(-2x) - 2e^(-x)*cos(x) + 1
for i in range(0, m):
ssf.iloc[i] = c1 * close.iloc[i] + b1 * ssf.iloc[i - 1] + a1 * ssf.iloc[i - 2]
# Offset
if offset != 0:
ssf = ssf.shift(offset)
# Name & Category
ssf.name = f"SSF_{length}_{poles}"
ssf.category = "overlap"
return ssf
ssf.__doc__ = \
"""Ehler's Super Smoother Filter (SSF) © 2013
John F. Ehlers's solution to reduce lag and remove aliasing noise with his
research in aerospace analog filter design. This indicator comes with two
versions determined by the keyword poles. By default, it uses two poles but
there is an option for three poles. Since SSF is a (Resursive) Digital Filter,
the number of poles determine how many prior recursive SSF bars to include in
the design of the filter. So two poles uses two prior SSF bars and three poles
uses three prior SSF bars for their filter calculations.
Sources:
http://www.stockspotter.com/files/PredictiveIndicators.pdf
https://www.tradingview.com/script/VdJy0yBJ-Ehlers-Super-Smoother-Filter/
https://www.mql5.com/en/code/588
https://www.mql5.com/en/code/589
Calculation:
Default Inputs:
length=10, poles=[2, 3]
See the source code or Sources listed above.
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
poles (int): The number of poles to use, either 2 or 3. Default: 2
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
|
the-stack_106_17634
|
# python3
from gooey import *
import synbiopython
import synbiopython.genbabel as stdgen
# imput parameters
@Gooey(required_cols=2, program_name='genbank to sbol', header_bg_color= '#DCDCDC', terminal_font_color= '#DCDCDC', terminal_panel_color= '#DCDCDC')
def main():
ap = GooeyParser()
ap.add_argument("-gb", "--genbank", required=True, widget='FileChooser', help="input genbank file")
ap.add_argument("-sbol", "--sbol", required=True, widget='FileSaver', help="outpul sbol file")
args = vars(ap.parse_args())
# main
stdconv = stdgen.GenSBOLconv()
uri_Prefix_igb = 'http://synbiohub.org/public/igem'
stdconv.run_sbolvalidator(args['genbank'],'SBOL2', uri_Prefix_igb, outputfile = args['sbol'])
if __name__ == '__main__':
main()
|
the-stack_106_17635
|
import os
import os.path as osp
import numpy as np
import pickle
from PIL import Image
import glob
import yaml
import torch
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
import open3d as o3d
import xmuda.data.semantic_kitti.io_data as SemanticKittiIO
from xmuda.data.semantic_kitti import splits
# prevent "RuntimeError: received 0 items of ancdata"
torch.multiprocessing.set_sharing_strategy('file_system')
class DummyDataset(Dataset):
"""Use torch dataloader for multiprocessing"""
def __init__(self, root_dir, scenes):
self.root_dir = root_dir
self.data = []
self.glob_frames(scenes)
yaml_path, _ = os.path.split(os.path.realpath(__file__))
self.dataset_config = yaml.safe_load(
open(os.path.join(yaml_path, 'semantic-kitti.yaml'), 'r'))
self.nbr_classes = self.dataset_config['nbr_classes']
self.grid_dimensions = self.dataset_config['grid_dims'] # [W, H, D]
self.remap_lut = self.get_remap_lut()
# self.VOXEL_DIMS = (256, 256, 32)
# self.img_size = (610, 185)
# self.downsample = 2
# self.img_grid = self._create_img_grid()
# self.scene_lower_bound = np.array([0, -25.6, -2]).reshape(1, -1)
# self.output_voxel_size = 0.2
# self.num_voxelized_depth_classes = num_voxelized_depth_classes
# self.depth_voxel_size = 51.2 / self.num_voxelized_depth_classes
def get_remap_lut(self):
'''
remap_lut to remap classes of semantic kitti for training...
:return:
'''
# make lookup table for mapping
maxkey = max(self.dataset_config['learning_map'].keys())
# +100 hack making lut bigger just in case there are unknown labels
remap_lut = np.zeros((maxkey + 100), dtype=np.int32)
remap_lut[list(self.dataset_config['learning_map'].keys())] = list(
self.dataset_config['learning_map'].values())
# in completion we have to distinguish empty and invalid voxels.
# Important: For voxels 0 corresponds to "empty" and not "unlabeled".
remap_lut[remap_lut == 0] = 255 # map 0 to 'invalid'
remap_lut[0] = 0 # only 'empty' stays 'empty'.
return remap_lut
def glob_frames(self, scenes):
for scene in scenes:
# glob_path = osp.join(self.root_dir, 'dataset', 'sequences', scene, 'image_2', '*.png')
# cam_paths = sorted(glob.glob(glob_path))
glob_path = osp.join(self.root_dir, 'dataset',
'sequences', scene, 'voxels', '*.label')
voxel_paths = sorted(glob.glob(glob_path))
# load calibration
calib = self.read_calib(
osp.join(self.root_dir, 'dataset', 'sequences', scene, 'calib.txt'))
P = calib['P2']
T_velo_2_cam = calib['Tr']
proj_matrix = P @ T_velo_2_cam
proj_matrix = proj_matrix.astype(np.float32)
# print(proj_matrix)
K_intrinsic = P[0:3, 0:3]
for voxel_path in voxel_paths:
basename = osp.basename(voxel_path)
frame_id = osp.splitext(basename)[0]
assert frame_id.isdigit()
data = {
'scene': scene,
'frame_id': frame_id,
'camera_path': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'image_2',
frame_id + '.png'),
'edge_path': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'image_2',
frame_id + '_edge.png'),
'lidar_path': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'velodyne',
frame_id + '.bin'),
'label_path': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'labels',
frame_id + '.label'),
'voxel_label_path_1_1': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'voxels',
frame_id + '.label'),
'voxel_label_path_1_4': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'voxels',
frame_id + '.label_1_4'),
'voxel_label_path_1_16': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'voxels',
frame_id + '.label_1_16'),
'voxel_invalid_path_1_1': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'voxels',
frame_id + '.invalid'),
'voxel_invalid_path_1_4': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'voxels',
frame_id + '.invalid_1_4'),
'voxel_invalid_path_1_16': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'voxels',
frame_id + '.invalid_1_16'),
'voxel_occupancy': osp.join(self.root_dir, 'dataset', 'sequences', scene, 'voxels',
frame_id + '.bin'),
'proj_matrix': proj_matrix,
'K_intrinsic': K_intrinsic,
'T_velo_2_cam': T_velo_2_cam
}
for k, v in data.items():
if isinstance(v, str) and k != "scene" and k != "frame_id":
if not osp.exists(v):
raise IOError('File not found {}'.format(v))
self.data.append(data)
@staticmethod
def read_calib(calib_path):
"""
:param calib_path: Path to a calibration text file.
:return: dict with calibration matrices.
"""
calib_all = {}
with open(calib_path, 'r') as f:
for line in f.readlines():
if line == '\n':
break
key, value = line.split(':', 1)
calib_all[key] = np.array([float(x) for x in value.split()])
# reshape matrices
calib_out = {}
# 3x4 projection matrix for left camera
calib_out['P2'] = calib_all['P2'].reshape(3, 4)
calib_out['Tr'] = np.identity(4) # 4x4 matrix
calib_out['Tr'][:3, :4] = calib_all['Tr'].reshape(3, 4)
return calib_out
@staticmethod
def select_points_in_frustum(points_2d, x1, y1, x2, y2):
"""
Select points in a 2D frustum parametrized by x1, y1, x2, y2 in image coordinates
:param points_2d: point cloud projected into 2D
:param points_3d: point cloud
:param x1: left bound
:param y1: upper bound
:param x2: right bound
:param y2: lower bound
:return: points (2D and 3D) that are in the frustum
"""
keep_ind = (points_2d[:, 0] > x1) * \
(points_2d[:, 1] > y1) * \
(points_2d[:, 0] < x2) * \
(points_2d[:, 1] < y2)
return keep_ind
def get_label_at_scale(self, scale, idx):
scale_divide = int(scale[-1])
INVALID = SemanticKittiIO._read_invalid_SemKITTI(
self.data[idx]["voxel_invalid_path_" + scale])
LABEL = SemanticKittiIO._read_label_SemKITTI(
self.data[idx]["voxel_label_path_" + scale])
if scale == '1_1':
LABEL = self.remap_lut[LABEL.astype(np.uint16)].astype(
np.float32) # Remap 20 classes semanticKITTI SSC
# unique, counts = np.unique(LABEL, return_counts=True)
# Setting to unknown all voxels marked on invalid mask...
LABEL[np.isclose(INVALID, 1)] = 255
LABEL = np.moveaxis(LABEL.reshape([int(self.grid_dimensions[0] / scale_divide),
int(self.grid_dimensions[2] /
scale_divide),
int(self.grid_dimensions[1] / scale_divide)]), [0, 1, 2], [0, 2, 1])
# LABEL = LABEL.reshape(self.VOXEL_DIMS)
return LABEL
def save_points(path, xyz, colors=None):
"""
xyz: nx3
"""
# print(xyz)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors / 255.0)
# print(xyz.shape, colors.shape)
o3d.io.write_point_cloud(path, pcd)
def __getitem__(self, index):
data_dict = self.data[index].copy()
scan = np.fromfile(data_dict['lidar_path'], dtype=np.float32)
scan = scan.reshape((-1, 4))
points = scan[:, :3]
label = np.fromfile(data_dict['label_path'], dtype=np.uint32)
label = label.reshape((-1))
label = label & 0xFFFF # get lower half for semantics
# Keep points inside the completion area
# min extent: [0, -25.6, -2]
# max extent: [51.2, 25.6, 4.4]
# voxel size: 0.2
keep_idx = (points[:, 0] < 51.2) * \
(points[:, 1] < 25.6) * (points[:, 1] > -25.6) * \
(points[:, 2] < 4.4) * (points[:, 2] > -2)
points = points[keep_idx, :]
label = label[keep_idx]
# load image
# image = Image.open(data_dict['camera_path'])
# image_size = image.size
image_size = (1220, 370)
keep_idx = points[:, 0] > 0
points_3d = points[keep_idx]
label_3d = label[keep_idx].astype(np.int16)
label_3d = self.remap_lut[label_3d].astype(np.float32)
# print(points_3d.min(0))
# Extract points_2d by projecting points into image
points_hcoords = np.concatenate(
[points_3d, np.ones([keep_idx.sum(), 1], dtype=np.float32)], axis=1)
# T_velo_2_cam = data_dict['T_velo_2_cam']
# points_3d_cam = (T_velo_2_cam @ points_hcoords.T).T
img_points = (data_dict['proj_matrix'] @ points_hcoords.T).T
depth = img_points[:, 2]
# print(np.sum(depth < 0) / np.sum(depth))
img_points = img_points[:, :2] / \
np.expand_dims(depth, axis=1) # scale 2D points
keep_idx_img_pts = self.select_points_in_frustum(
img_points, 0, 0, *image_size)
keep_idx[keep_idx] = keep_idx_img_pts
# fliplr so that indexing is row, col and not col, row
img_points = np.fliplr(img_points)
points_2d = points[keep_idx]
label_2d = label[keep_idx].astype(np.int16)
label_2d = self.remap_lut[label_2d].astype(np.float32)
data_dict['seg_label_3d'] = label_3d
data_dict['seg_label_2d'] = label_2d
# points 3d are points in front of the vehicle
data_dict['points_3d'] = points_3d
# points 2d are points in the frustum of the lidar
data_dict['points_2d'] = points_2d
points_img = img_points[keep_idx_img_pts]
data_dict['points_img'] = points_img
# print("dataset", points_img.shape)
data_dict['image_size'] = np.array(image_size)
data_dict['ssc_label_1_1'] = self.get_label_at_scale('1_1', index)
data_dict['ssc_label_1_4'] = self.get_label_at_scale('1_4', index)
data_dict['ssc_label_1_16'] = self.get_label_at_scale('1_16', index)
OCCUPANCY = SemanticKittiIO._read_occupancy_SemKITTI(
data_dict['voxel_occupancy'])
OCCUPANCY = np.moveaxis(OCCUPANCY.reshape([self.grid_dimensions[0],
self.grid_dimensions[2],
self.grid_dimensions[1]]), [0, 1, 2], [0, 2, 1])
data_dict['voxel_occupancy'] = OCCUPANCY
return data_dict
def __len__(self):
return len(self.data)
def preprocess(scene, root_dir, out_dir):
pkl_data = []
#split = getattr(splits, split_name)
scenes = [scene]
dataloader = DataLoader(DummyDataset(
root_dir, scenes), num_workers=10)
num_skips = 0
for i, data_dict in enumerate(dataloader):
# data error leads to returning empty dict
if not data_dict:
print('empty dict, continue')
num_skips += 1
continue
for k, v in data_dict.items():
data_dict[k] = v[0]
# print(data_dict['scene'])
print('{}/{} {}'.format(i, len(dataloader), data_dict['lidar_path']))
# convert to relative path
# lidar_path = data_dict['lidar_path'].replace(root_dir + '/', '')
cam_path = data_dict['camera_path'].replace(root_dir + '/', '')
edge_path = data_dict['edge_path'].replace(root_dir + '/', '')
# print(data_dict['voxel_indices'].shape)
# append data
out_dict = {
'scene': data_dict['scene'],
'frame_id': data_dict['frame_id'],
# 'points_2d': data_dict['points_2d'].numpy(),
#'points_3d': data_dict['points_3d'].numpy(),
# 'seg_label_3d': data_dict['seg_label_3d'].numpy(),
# 'seg_label_2d': data_dict['seg_label_2d'].numpy(),
# row, col format, shape: (num_points, 2)
# 'points_img': data_dict['points_img'].numpy(),
# 'lidar_path': lidar_path,
'camera_path': cam_path,
# 'edge_path': edge_path,
'image_size': tuple(data_dict['image_size'].numpy()),
# 'ssc_label_1_1': data_dict['ssc_label_1_1'].numpy(),
#'ssc_label_1_2': data_dict['ssc_label_1_2'].numpy(),
'proj_matrix': data_dict['proj_matrix'].numpy(),
'K_intrinsic': data_dict['K_intrinsic'].numpy(),
'T_velo_2_cam': data['T_velo_2_cam'].numpy(),
'ssc_label_1_4': data_dict['ssc_label_1_4'].numpy(),
'ssc_label_1_16': data_dict['ssc_label_1_16'].numpy(),
# 'voxel_occupancy': data_dict['voxel_occupancy'].numpy()
}
# pkl_data.append(out_dict)
# print('Skipped {} files'.format(num_skips))
# save to pickle file
save_dir = osp.join(out_dir, 'preprocess', data_dict['scene'])
os.makedirs(save_dir, exist_ok=True)
save_path = osp.join(save_dir, '{}.pkl'.format(data_dict['frame_id']))
with open(save_path, 'wb') as f:
pickle.dump(out_dict, f, pickle.HIGHEST_PROTOCOL)
print('Wrote preprocessed data to ' + save_path)
if __name__ == '__main__':
# root_dir = '/datasets_master/semantic_kitti'
# out_dir = '/datasets_local/datasets_acao/semantic_kitti_preprocess'
root_dir = '/gpfswork/rech/xqt/uyl37fq/data/semantic_kitti'
out_dir = "/gpfsscratch/rech/xqt/uyl37fq/kitti_preprocess"
scenes = getattr(splits, "train")
scenes += getattr(splits, "val")
scenes += getattr(splits, "test")
for scene in scenes:
preprocess(scene, root_dir, out_dir)
# compute_2d_depths_to_3d_voxel_indices(
# root_dir, out_dir, num_voxelized_depth_classes=num_voxelized_depth_classes)
|
the-stack_106_17636
|
import numpy as np
import random, os
import argparse
import copy
import tensorflow as tf
#from datetime import datetime
from Load_Controllers import Load_BBB, Load_Demonstrator
from tqdm import tqdm
from multiple_tasks import get_task_on_MUJOCO_environment
from Dataset import getDemonstrationsFromTask
import _pickle as pickle
import sys
sys.path.insert(0, './../')
from Housekeeping import *
from BBBNNRegression import BBBNNRegression
from Dataset import getDemonstrationsFromTask
from Detector import Detector
def train_BBB(data_x, data_y, configuration_identity, epochs, number_mini_batches, activation_unit, learning_rate, hidden_units, number_samples_variance_reduction, precision_alpha, weights_prior_mean_1, weights_prior_mean_2, weights_prior_deviation_1, weights_prior_deviation_2, mixture_pie, rho_mean, extra_likelihood_emphasis):
directory_to_save_tensorboard_data = configuration_identity + TENSORBOARD_DIRECTORY
saved_models_during_iterations_bbb = configuration_identity + SAVED_MODELS_DURING_ITERATIONS_DIRECTORY
saved_final_model_bbb = configuration_identity + SAVED_FINAL_MODEL_DIRECTORY
if not os.path.exists(directory_to_save_tensorboard_data):
os.makedirs(directory_to_save_tensorboard_data)
if not os.path.exists(saved_models_during_iterations_bbb):
os.makedirs(saved_models_during_iterations_bbb)
if not os.path.exists(saved_final_model_bbb):
os.makedirs(saved_final_model_bbb)
controller_graph = tf.Graph()
with controller_graph.as_default():
BBB_Regressor=BBBNNRegression(number_mini_batches=number_mini_batches, number_features=data_x.shape[1], number_output_units=data_y.shape[1], activation_unit=activation_unit, learning_rate=learning_rate,
hidden_units=hidden_units, number_samples_variance_reduction=number_samples_variance_reduction, precision_alpha=precision_alpha,
weights_prior_mean_1=weights_prior_mean_1, weights_prior_mean_2=weights_prior_mean_2, weights_prior_deviation_1=weights_prior_deviation_1,
weights_prior_deviation_2=weights_prior_deviation_2, mixture_pie=mixture_pie, rho_mean=rho_mean, extra_likelihood_emphasis=extra_likelihood_emphasis)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(directory_to_save_tensorboard_data, sess.graph)
saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=2)
previous_minimum_loss = sys.float_info.max
mini_batch_size = int(data_x.shape[0]/number_mini_batches)
for epoch_iterator in tqdm(range(epochs)):
data_x, data_y = randomize(data_x, data_y)
ptr = 0
for mini_batch_iterator in range(number_mini_batches):
x_batch = data_x[ptr:ptr+mini_batch_size, :]
y_batch = data_y[ptr:ptr+mini_batch_size, :]
_, loss, summary = sess.run([BBB_Regressor.train(), BBB_Regressor.getMeanSquaredError(), BBB_Regressor.summarize()], feed_dict={BBB_Regressor.X_input:x_batch, BBB_Regressor.Y_input:y_batch})
sess.run(BBB_Regressor.update_mini_batch_index())
if loss < previous_minimum_loss:
saver.save(sess, saved_models_during_iterations_bbb + 'iteration', global_step=epoch_iterator, write_meta_graph=False)
previous_minimum_loss = loss
ptr += mini_batch_size
writer.add_summary(summary, global_step=tf.train.global_step(sess, BBB_Regressor.global_step))
#if epoch_iterator % 2 == 0:
# print(BLUE('Training progress: ' + str(epoch_iterator) + '/' + str(epochs)))
writer.close()
saver.save(sess, saved_final_model_bbb + 'final', write_state=False)
def validate_BBB(domain_name, controller_identity, configuration_identity):
#tf.reset_default_graph()
copycat_graph = tf.Graph()
with copycat_graph.as_default():
copycat_controller = Load_BBB(controller_identity=controller_identity)
file_to_save_logs = configuration_identity + 'validation_logs.pkl'
logs_for_all_tasks = {}
for task_to_validate in ALL_MUJOCO_TASK_IDENTITIES:
logs_for_a_task = {}
demonstrator_graph = tf.Graph()
with demonstrator_graph.as_default():
demonstrator_controller = Load_Demonstrator(domain_name=domain_name, task_identity=str(task_to_validate))
for validation_trial in range(NUMBER_VALIDATION_TRIALS):
all_observations = []
all_behavior_control_means = []
all_behavior_control_deviations = []
all_behavior_rewards = []
all_demonstrator_controls = []
#all_target_control_means, all_target_control_deviations = [], []
env = get_task_on_MUJOCO_environment(env_name=domain_name, task_identity=str(task_to_validate))
total_cost = total_variance = 0.
observation = env.reset()
finish = False
time_step = 0.0
observation = np.append(observation, time_step) # add time step feature
moving_window_x = np.zeros((1, copycat_controller.moving_windows_x_size))
moving_window_x[0, -observation.shape[0]:] = observation
behavior_mean_control, behavior_dev_control, maximum_this_time_step, minimum_this_time_step = copycat_controller.sess.run([copycat_controller.mean_of_predictions, copycat_controller.deviation_of_predictions, copycat_controller.maximum_of_predictions, copycat_controller.minimum_of_predictions], feed_dict={copycat_controller.x_input:NORMALIZE(copy.deepcopy(moving_window_x), copycat_controller.mean_x, copycat_controller.deviation_x)})
behavior_mean_control = REVERSE_NORMALIZE(behavior_mean_control, copycat_controller.mean_y, copycat_controller.deviation_y)
maximum_this_time_step = REVERSE_NORMALIZE(maximum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
minimum_this_time_step = REVERSE_NORMALIZE(minimum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
behavior_dev_control = behavior_dev_control * copycat_controller.deviation_y
demonstrator_control = demonstrator_controller.sess.run(demonstrator_controller.output_action_node, feed_dict={demonstrator_controller.scaled_observation_node: (observation.reshape(1,-1) - demonstrator_controller.offset) * demonstrator_controller.scale})
while not finish:
all_observations.append(observation)
all_behavior_control_means.append(behavior_mean_control)
all_behavior_control_deviations.append(behavior_dev_control)
all_demonstrator_controls.append(demonstrator_control)
#all_target_control_means.append(target_mean_control)
#all_target_control_deviations.append(target_var_control)
observation, reward, finish, info = env.step(behavior_mean_control)
all_behavior_rewards.append(reward)
time_step += 1e-3
observation = np.append(observation, time_step)
#target_mean_control, target_var_control = -1. * np.dot(K, observation), np.array([[0.]])
if not copycat_controller.window_size == 1:
moving_window_x[0, :-copycat_controller.drift_per_time_step] = moving_window_x[0, copycat_controller.drift_per_time_step:]
moving_window_x[0, -copycat_controller.drift_per_time_step:-(copycat_controller.drift_per_time_step-behavior_mean_control.shape[1])] = behavior_mean_control[0]
moving_window_x[0, -(copycat_controller.drift_per_time_step-behavior_mean_control.shape[1])] = reward
moving_window_x[0, -observation.shape[0]:] = observation
behavior_mean_control, behavior_dev_control, maximum_this_time_step, minimum_this_time_step = copycat_controller.sess.run([copycat_controller.mean_of_predictions, copycat_controller.deviation_of_predictions, copycat_controller.maximum_of_predictions, copycat_controller.minimum_of_predictions], feed_dict={copycat_controller.x_input:NORMALIZE(copy.deepcopy(moving_window_x), copycat_controller.mean_x, copycat_controller.deviation_x)})
behavior_mean_control = REVERSE_NORMALIZE(behavior_mean_control, copycat_controller.mean_y, copycat_controller.deviation_y)
maximum_this_time_step = REVERSE_NORMALIZE(maximum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
minimum_this_time_step = REVERSE_NORMALIZE(minimum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
behavior_dev_control = behavior_dev_control * copycat_controller.deviation_y
demonstrator_control = demonstrator_controller.sess.run(demonstrator_controller.output_action_node, feed_dict={demonstrator_controller.scaled_observation_node: (observation.reshape(1,-1) - demonstrator_controller.offset) * demonstrator_controller.scale})
all_observations = np.array(all_observations)
all_behavior_control_means = np.concatenate(all_behavior_control_means, axis=0)
all_behavior_rewards = np.array(all_behavior_rewards)
all_behavior_control_deviations = np.concatenate(all_behavior_control_deviations, axis=0)
all_demonstrator_controls = np.array(all_demonstrator_controls)
logs_for_a_task[str(validation_trial)] = {OBSERVATIONS_LOG_KEY: all_observations, BEHAVIORAL_CONTROL_MEANS_LOG_KEY: all_behavior_control_means,
BEHAVIORAL_CONTROL_REWARDS_LOG_KEY: all_behavior_rewards, BEHAVIORAL_CONTROL_DEVIATIONS_LOG_KEY: all_behavior_control_deviations,
TARGET_CONTROL_MEANS_LOG_KEY: all_demonstrator_controls}
logs_for_all_tasks[str(task_to_validate)] = logs_for_a_task
with open(file_to_save_logs, 'wb') as f:
pickle.dump(logs_for_all_tasks, f, protocol=-1)
def run_on_itself(domain_name, task_identities, controller_identity, detector=None):
#tf.reset_default_graph()
copycat_graph = tf.Graph()
with copycat_graph.as_default():
copycat_controller = Load_BBB(controller_identity=controller_identity)
stats = {}
behavior_deviations_across_tasks = []
for task_identity in task_identities:
#isSafe = True
all_behavior_deviation = []
env = get_task_on_MUJOCO_environment(env_name=domain_name, task_identity=str(task_identity))
total_cost = total_variance = 0.
observation = env.reset()
finish = False
time_step = 0.0
observation = np.append(observation, time_step) # add time step feature
moving_window_x = np.zeros((1, copycat_controller.moving_windows_x_size))
moving_window_x[0, -observation.shape[0]:] = observation
behavior_mean_control, behavior_dev_control, maximum_this_time_step, minimum_this_time_step = copycat_controller.sess.run([copycat_controller.mean_of_predictions, copycat_controller.deviation_of_predictions, copycat_controller.maximum_of_predictions, copycat_controller.minimum_of_predictions], feed_dict={copycat_controller.x_input:NORMALIZE(copy.deepcopy(moving_window_x), copycat_controller.mean_x, copycat_controller.deviation_x)})
behavior_mean_control = REVERSE_NORMALIZE(behavior_mean_control, copycat_controller.mean_y, copycat_controller.deviation_y)
maximum_this_time_step = REVERSE_NORMALIZE(maximum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
minimum_this_time_step = REVERSE_NORMALIZE(minimum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
behavior_dev_control = behavior_dev_control * copycat_controller.deviation_y
while not finish:
all_behavior_deviation.append(behavior_dev_control)
if not detector is None:
isSafe, stats = detector.isSafeToContinue(behavior_dev_control)
if not str_to_bool(isSafe):
return 'False', np.mean(all_behavior_deviation), stats
observation, reward, finish, info = env.step(behavior_mean_control)
time_step += 1e-3
observation = np.append(observation, time_step)
#target_mean_control, target_var_control = -1. * np.dot(K, observation), np.array([[0.]])
if not copycat_controller.window_size == 1:
moving_window_x[0, :-copycat_controller.drift_per_time_step] = moving_window_x[0, copycat_controller.drift_per_time_step:]
moving_window_x[0, -copycat_controller.drift_per_time_step:-(copycat_controller.drift_per_time_step-behavior_mean_control.shape[1])] = behavior_mean_control[0]
moving_window_x[0, -(copycat_controller.drift_per_time_step-behavior_mean_control.shape[1])] = reward
moving_window_x[0, -observation.shape[0]:] = observation
behavior_mean_control, behavior_dev_control, maximum_this_time_step, minimum_this_time_step = copycat_controller.sess.run([copycat_controller.mean_of_predictions, copycat_controller.deviation_of_predictions, copycat_controller.maximum_of_predictions, copycat_controller.minimum_of_predictions], feed_dict={copycat_controller.x_input:NORMALIZE(copy.deepcopy(moving_window_x), copycat_controller.mean_x, copycat_controller.deviation_x)})
behavior_mean_control = REVERSE_NORMALIZE(behavior_mean_control, copycat_controller.mean_y, copycat_controller.deviation_y)
maximum_this_time_step = REVERSE_NORMALIZE(maximum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
minimum_this_time_step = REVERSE_NORMALIZE(minimum_this_time_step, copycat_controller.mean_y, copycat_controller.deviation_y)
behavior_dev_control = behavior_dev_control * copycat_controller.deviation_y
behavior_deviations_across_tasks.append(np.mean(all_behavior_deviation))
return 'True', np.max(behavior_deviations_across_tasks), stats
def data_efficient_imitation_across_multiple_tasks(experiment_type, controller, domain_name, window_size, number_demonstrations, adapt_detector_threshold, start_monitoring_at_time_step, detector_c, detector_m, initial_detector_threshold, epochs, number_mini_batches, activation_unit, learning_rate, hidden_units, number_samples_variance_reduction, precision_alpha, weights_prior_mean_1, weights_prior_mean_2, weights_prior_deviation_1, weights_prior_deviation_2, mixture_pie, rho_mean, extra_likelihood_emphasis, simulation_iteration_onset, total_simulation_runs):
COPY_OF_ALL_MUJOCO_TASK_IDENTITIES = copy.deepcopy(ALL_MUJOCO_TASK_IDENTITIES)
if experiment_type == 'active_learning_proof_of_concept': simulation_runs = 1
else: simulation_runs = total_simulation_runs
for simulation_iterator in range(simulation_iteration_onset, simulation_runs):
if experiment_type == 'active_learning_proof_of_concept':
if domain_name == 'HalfCheetah':
COPY_OF_ALL_MUJOCO_TASK_IDENTITIES = np.array([8, 3, 7])
elif domain_name == 'Swimmer':
COPY_OF_ALL_MUJOCO_TASK_IDENTITIES = np.array([3, 4, 0])
else:
random.seed(simulation_iterator)
random.shuffle(COPY_OF_ALL_MUJOCO_TASK_IDENTITIES)
###### Naive Controller ######
if controller == 'NAIVE':
print(RED('NAIVE controller invoked'))
all_gathered_x, all_gathered_y = None, None
tasks_trained_on, task_iterator_trained_on = [], []
print(GREEN('Starting runs for the naive controller'))
for task_iterator, current_task_identity in enumerate(COPY_OF_ALL_MUJOCO_TASK_IDENTITIES):
print(RED('Simulation iteration is ' + str(simulation_iterator) + ' and task iterator is ' + str(task_iterator)))
tasks_trained_on.append(current_task_identity)
task_iterator_trained_on.append(task_iterator)
moving_windows_x, moving_windows_y, drift_per_time_step, moving_windows_x_size = getDemonstrationsFromTask(domain_name=domain_name, task_identity=current_task_identity, window_size=window_size, number_demonstrations=number_demonstrations)
if all_gathered_x is None:
all_gathered_x, all_gathered_y = copy.deepcopy(moving_windows_x), copy.deepcopy(moving_windows_y)
else:
all_gathered_x, all_gathered_y = np.append(all_gathered_x, moving_windows_x, axis=0), np.append(all_gathered_y, moving_windows_y, axis=0)
disposible_training_x, disposible_training_y = copy.deepcopy(all_gathered_x), copy.deepcopy(all_gathered_y)
mean_x, deviation_x = get_mean_and_deviation(data = disposible_training_x)
disposible_training_x = NORMALIZE(disposible_training_x, mean_x, deviation_x)
mean_y, deviation_y = get_mean_and_deviation(data = disposible_training_y)
disposible_training_y = NORMALIZE(disposible_training_y, mean_y, deviation_y)
configuration_identity = 'logs/' + domain_name + '/' + str(number_demonstrations) + '/naive_controller/' + experiment_type + '/' + str(simulation_iterator) + '/' + str(task_iterator) + '/'
training_logs_configuration_identity = configuration_identity + 'training/'
if not os.path.exists(training_logs_configuration_identity):
os.makedirs(training_logs_configuration_identity)
file_name_to_save_meta_data = training_logs_configuration_identity + 'training_meta_data.pkl'
meta_data_to_store = {MEAN_KEY_X: mean_x, DEVIATION_KEY_X: deviation_x, MEAN_KEY_Y:mean_y, DEVIATION_KEY_Y:deviation_y,
DRIFT_PER_TIME_STEP_KEY: drift_per_time_step, MOVING_WINDOWS_X_SIZE_KEY: moving_windows_x_size,
WINDOW_SIZE_KEY: window_size}
with open(file_name_to_save_meta_data, 'wb') as f:
pickle.dump(meta_data_to_store, f)
print(BLUE('Training phase'))
train_BBB(data_x=disposible_training_x, data_y=disposible_training_y, configuration_identity=training_logs_configuration_identity, epochs=epochs, number_mini_batches=number_mini_batches, activation_unit=activation_unit,
learning_rate=learning_rate, hidden_units=hidden_units, number_samples_variance_reduction=number_samples_variance_reduction, precision_alpha=precision_alpha, weights_prior_mean_1=weights_prior_mean_1,
weights_prior_mean_2=weights_prior_mean_2, weights_prior_deviation_1=weights_prior_deviation_1, weights_prior_deviation_2=weights_prior_deviation_2, mixture_pie=mixture_pie, rho_mean=rho_mean, extra_likelihood_emphasis=extra_likelihood_emphasis)
meta_data_file_for_this_run = 'logs/' + domain_name + '/' + str(number_demonstrations) + '/naive_controller/' + experiment_type + '/' + str(simulation_iterator) + '/meta_data.pkl'
meta_data_for_this_run = {TRAINING_TASK_ITERATION_KEY: task_iterator_trained_on, TASKS_TRAINED_ON_KEY: tasks_trained_on}
with open(meta_data_file_for_this_run, 'wb') as f:
pickle.dump(meta_data_for_this_run, f)
print(BLUE('Validation phase'))
validate_BBB(domain_name=domain_name, controller_identity=configuration_identity, configuration_identity=configuration_identity)
###### BBB Controller ######
if controller == 'BBB':
print(RED('BBB controller invoked'))
stats='first_run'
did_succeed = False
all_gathered_x, all_gathered_y = None, None
tasks_trained_on, tasks_encountered, task_iterator_trained_on, all_thresholds, all_stats = [], [], [], [], []
current_task_identity = COPY_OF_ALL_MUJOCO_TASK_IDENTITIES[0]
tasks_encountered.append(current_task_identity)
detector = Detector(domain_name=domain_name, start_monitoring_at_time_step=start_monitoring_at_time_step, initial_threshold=initial_detector_threshold, detector_m=detector_m, detector_c=detector_c)
print(GREEN('Starting runs for the BBB controller'))
for task_iterator in range(len(COPY_OF_ALL_MUJOCO_TASK_IDENTITIES)):
print(RED('Simulation iteration is ' + str(simulation_iterator) + ', task iterator is ' + str(task_iterator) + ', and current task is ' + str(current_task_identity)))
detector.reset()
configuration_identity = 'logs/' + domain_name + '/' + str(number_demonstrations) + '/bbb_controller/detector_c_' + str(detector_c) + '_detector_m_' + str(detector_m) + '/' + experiment_type + '/' + str(simulation_iterator) + '/' + str(task_iterator) + '/'
if not os.path.exists(configuration_identity):
os.makedirs(configuration_identity)
if not did_succeed:
training_logs_configuration_identity = configuration_identity + 'training/'
if not os.path.exists(training_logs_configuration_identity):
os.makedirs(training_logs_configuration_identity)
current_controllers_identity = configuration_identity
tasks_trained_on.append(current_task_identity)
task_iterator_trained_on.append(task_iterator)
moving_windows_x, moving_windows_y, drift_per_time_step, moving_windows_x_size = getDemonstrationsFromTask(domain_name=domain_name, task_identity=current_task_identity, window_size=window_size, number_demonstrations=number_demonstrations)
if all_gathered_x is None:
all_gathered_x, all_gathered_y = copy.deepcopy(moving_windows_x), copy.deepcopy(moving_windows_y)
else:
all_gathered_x, all_gathered_y = np.append(all_gathered_x, moving_windows_x, axis=0), np.append(all_gathered_y, moving_windows_y, axis=0)
disposible_training_x, disposible_training_y = copy.deepcopy(all_gathered_x), copy.deepcopy(all_gathered_y)
mean_x, deviation_x = get_mean_and_deviation(data = disposible_training_x)
disposible_training_x = NORMALIZE(disposible_training_x, mean_x, deviation_x)
mean_y, deviation_y = get_mean_and_deviation(data = disposible_training_y)
disposible_training_y = NORMALIZE(disposible_training_y, mean_y, deviation_y)
file_name_to_save_meta_data = training_logs_configuration_identity + 'training_meta_data.pkl'
#meta_data_to_store = {MEAN_KEY_X: mean_x, DEVIATION_KEY_X: deviation_x, MEAN_KEY_Y:mean_y, DEVIATION_KEY_Y:deviation_y,
# DRIFT_PER_TIME_STEP_KEY: drift_per_time_step, MOVING_WINDOWS_X_SIZE_KEY: moving_windows_x_size,
# WINDOW_SIZE_KEY: window_size, TASKS_TRAINED_ON_KEY: tasks_trained_on, TASKS_ENCOUNTERED_KEY: tasks_encountered,
# STATS_KEY: stats}
meta_data_to_store = {MEAN_KEY_X: mean_x, DEVIATION_KEY_X: deviation_x, MEAN_KEY_Y:mean_y, DEVIATION_KEY_Y:deviation_y,
DRIFT_PER_TIME_STEP_KEY: drift_per_time_step, MOVING_WINDOWS_X_SIZE_KEY: moving_windows_x_size,
WINDOW_SIZE_KEY: window_size}
with open(file_name_to_save_meta_data, 'wb') as f:
pickle.dump(meta_data_to_store, f)
print(BLUE('Training phase'))
train_BBB(data_x=disposible_training_x, data_y=disposible_training_y, configuration_identity=training_logs_configuration_identity, epochs=epochs, number_mini_batches=number_mini_batches,
activation_unit=activation_unit, learning_rate=learning_rate, hidden_units=hidden_units, number_samples_variance_reduction=number_samples_variance_reduction, precision_alpha=precision_alpha,
weights_prior_mean_1=weights_prior_mean_1, weights_prior_mean_2=weights_prior_mean_2, weights_prior_deviation_1=weights_prior_deviation_1, weights_prior_deviation_2=weights_prior_deviation_2,
mixture_pie=mixture_pie, rho_mean=rho_mean, extra_likelihood_emphasis=extra_likelihood_emphasis)
#_, average_uncertainty, _ = run_on_itself(domain_name=domain_name, task_identities=current_task_identity, controller_identity= current_controllers_identity)
_, average_uncertainty, _ = run_on_itself(domain_name=domain_name, task_identities=tasks_trained_on, controller_identity= current_controllers_identity)
#### Ground the threshold according to the quantitative value of uncertainty on the current task ####
if adapt_detector_threshold:
detector.threshold = average_uncertainty
all_thresholds.append(detector.threshold)
all_stats.append(stats)
meta_data_file_for_this_run = 'logs/' + domain_name + '/' + str(number_demonstrations) + '/bbb_controller/detector_c_' + str(detector_c) + '_detector_m_' + str(detector_m) + '/' + experiment_type + '/' + str(simulation_iterator) + '/meta_data.pkl'
meta_data_for_this_run = {TRAINING_TASK_ITERATION_KEY: task_iterator_trained_on, DETECTOR_THRESHOLD_KEY: all_thresholds, TASKS_TRAINED_ON_KEY: tasks_trained_on, TASKS_ENCOUNTERED_KEY: tasks_encountered, STATS_KEY: all_stats}
with open(meta_data_file_for_this_run, 'wb') as f:
pickle.dump(meta_data_for_this_run, f)
#need_training = False
print(BLUE('Validation phase'))
validate_BBB(domain_name=domain_name, controller_identity=current_controllers_identity, configuration_identity=configuration_identity)
if task_iterator == (len(COPY_OF_ALL_MUJOCO_TASK_IDENTITIES) - 1):
break
current_task_identity = COPY_OF_ALL_MUJOCO_TASK_IDENTITIES[task_iterator + 1]
tasks_encountered.append(current_task_identity)
#did_succeed, average_uncertainty, stats = run_on_itself(domain_name=domain_name, task_identities=current_task_identity, controller_identity=current_controllers_identity, detector=detector)
did_succeed, average_uncertainty, stats = run_on_itself(domain_name=domain_name, task_identities=[current_task_identity], controller_identity=current_controllers_identity, detector=detector)
did_succeed = str_to_bool(did_succeed)
#if not did_succeed:
# need_training = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-et', '--experiment_type', type=str, help='Experiment Type', choices=['active_learning_proof_of_concept', 'data_efficient_active_learning'])
parser.add_argument('-c', '--controller_type', type=str, help='Controller Type', choices=['NAIVE', 'BBB'])
parser.add_argument('-dn', '--domain_name', type=str, help='MuJoCo domain', choices=['HalfCheetah', 'Swimmer'])
parser.add_argument('-ws', '--window_size', type=int, help='Number of time-steps in a moving window', default=2)
parser.add_argument('-nd', '--number_demonstrations', type=int, help='Number demonstrations per request', default=10)
parser.add_argument('-adt', '--adapt_detector_threshold', type=str, help='Adaptive Detector Threshold', choices=['True', 'False'], default='True')
parser.add_argument('-mts', '--start_monitoring_at_time_step', type=int, help='Time step to start monitoring BBB controller uncertainty', default=200)
parser.add_argument('-dc', '--detector_c', type=float, help='Scaling factor for the detector threshold', default=2.5)
parser.add_argument('-dm', '--detector_m', type=int, help='Number of last few time-steps to smoothen predictive uncertainty', default=50)
parser.add_argument('-idt', '--initial_detector_threshold', type=float, help='The detector threshold to start with or the value of non-adaptive threshold', default=0.3)
parser.add_argument('-sio', '--simulation_iteration_onset', type=int, help='simulation_iteration_onset', default=0)
parser.add_argument('-tsr', '--total_simulation_runs', type=int, help='Total Simulation Runs', default=5)
args = parser.parse_args()
data_efficient_imitation_across_multiple_tasks(experiment_type=args.experiment_type, controller=args.controller_type, domain_name=args.domain_name, window_size=args.window_size, number_demonstrations=args.number_demonstrations, adapt_detector_threshold=str_to_bool(args.adapt_detector_threshold),
start_monitoring_at_time_step=args.start_monitoring_at_time_step, detector_c=args.detector_c, detector_m=args.detector_m, initial_detector_threshold=args.initial_detector_threshold, epochs = 10001, number_mini_batches = 20,
activation_unit = 'RELU', learning_rate = 0.001, hidden_units= [90, 30, 10], number_samples_variance_reduction = 25, precision_alpha = 0.01,
weights_prior_mean_1 = 0., weights_prior_mean_2 = 0., weights_prior_deviation_1 = 0.4, weights_prior_deviation_2 = 0.4, mixture_pie = 0.7, rho_mean = -3.,
extra_likelihood_emphasis = 10000000000000000., simulation_iteration_onset=args.simulation_iteration_onset, total_simulation_runs=args.total_simulation_runs)
|
the-stack_106_17637
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.vision.v1p3beta1",
manifest={
"Vertex",
"NormalizedVertex",
"BoundingPoly",
"NormalizedBoundingPoly",
"Position",
},
)
class Vertex(proto.Message):
r"""A vertex represents a 2D point in the image.
NOTE: the vertex coordinates are in the same scale as the
original image.
Attributes:
x (int):
X coordinate.
y (int):
Y coordinate.
"""
x = proto.Field(proto.INT32, number=1,)
y = proto.Field(proto.INT32, number=2,)
class NormalizedVertex(proto.Message):
r"""A vertex represents a 2D point in the image.
NOTE: the normalized vertex coordinates are relative to the
original image and range from 0 to 1.
Attributes:
x (float):
X coordinate.
y (float):
Y coordinate.
"""
x = proto.Field(proto.FLOAT, number=1,)
y = proto.Field(proto.FLOAT, number=2,)
class BoundingPoly(proto.Message):
r"""A bounding polygon for the detected image annotation.
Attributes:
vertices (Sequence[google.cloud.vision_v1p3beta1.types.Vertex]):
The bounding polygon vertices.
normalized_vertices (Sequence[google.cloud.vision_v1p3beta1.types.NormalizedVertex]):
The bounding polygon normalized vertices.
"""
vertices = proto.RepeatedField(proto.MESSAGE, number=1, message="Vertex",)
normalized_vertices = proto.RepeatedField(
proto.MESSAGE, number=2, message="NormalizedVertex",
)
class NormalizedBoundingPoly(proto.Message):
r"""A normalized bounding polygon around a portion of an image.
Attributes:
vertices (Sequence[google.cloud.vision_v1p3beta1.types.NormalizedVertex]):
Normalized vertices of the bounding polygon.
"""
vertices = proto.RepeatedField(proto.MESSAGE, number=1, message="NormalizedVertex",)
class Position(proto.Message):
r"""A 3D position in the image, used primarily for Face detection
landmarks. A valid Position must have both x and y coordinates.
The position coordinates are in the same scale as the original
image.
Attributes:
x (float):
X coordinate.
y (float):
Y coordinate.
z (float):
Z coordinate (or depth).
"""
x = proto.Field(proto.FLOAT, number=1,)
y = proto.Field(proto.FLOAT, number=2,)
z = proto.Field(proto.FLOAT, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_106_17638
|
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import os
import re
import tempfile
import time
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from cinder import compute
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume import driver
LOG = logging.getLogger(__name__)
nas_opts = [
# TODO(eharney): deprecate nas_ip and change this to nas_host
cfg.StrOpt('nas_ip',
default='',
help='IP address or Hostname of NAS system.'),
cfg.StrOpt('nas_login',
default='admin',
help='User name to connect to NAS system.'),
cfg.StrOpt('nas_password',
default='',
help='Password to connect to NAS system.',
secret=True),
cfg.IntOpt('nas_ssh_port',
default=22,
help='SSH port to use to connect to NAS system.'),
cfg.StrOpt('nas_private_key',
default='',
help='Filename of private key to use for SSH authentication.'),
cfg.StrOpt('nas_secure_file_operations',
default='auto',
help=('Allow network-attached storage systems to operate in a '
'secure environment where root level access is not '
'permitted. If set to False, access is as the root user '
'and insecure. If set to True, access is not as root. '
'If set to auto, a check is done to determine if this is '
'a new installation: True is used if so, otherwise '
'False. Default is auto.')),
cfg.StrOpt('nas_secure_file_permissions',
default='auto',
help=('Set more secure file permissions on network-attached '
'storage volume files to restrict broad other/world '
'access. If set to False, volumes are created with open '
'permissions. If set to True, volumes are created with '
'permissions for the cinder user and group (660). If '
'set to auto, a check is done to determine if '
'this is a new installation: True is used if so, '
'otherwise False. Default is auto.')),
cfg.StrOpt('nas_share_path',
default='',
help=('Path to the share to use for storing Cinder volumes. '
'For example: "/srv/export1" for an NFS server export '
'available at 10.0.5.10:/srv/export1 .')),
cfg.StrOpt('nas_mount_options',
default=None,
help=('Options used to mount the storage backend file system '
'where Cinder volumes are stored.'))
]
CONF = cfg.CONF
CONF.register_opts(nas_opts)
class RemoteFSDriver(driver.VolumeDriver):
"""Common base for drivers that work like NFS."""
driver_volume_type = None
driver_prefix = None
volume_backend_name = None
SHARE_FORMAT_REGEX = r'.+:/.+'
def __init__(self, *args, **kwargs):
super(RemoteFSDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
if self.configuration:
self.configuration.append_config_values(nas_opts)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume['provider_location'],
'name': volume['name']}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(RemoteFSDriver, self).do_setup(context)
# Validate the settings for our secure file options.
self.configuration.nas_secure_file_permissions = \
self.configuration.nas_secure_file_permissions.lower()
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations.lower()
valid_secure_opts = ['auto', 'true', 'false']
secure_options = {'nas_secure_file_permissions':
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.iteritems():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
"'auto', 'true', or 'false'") % err_parms
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
def _get_mount_point_base(self):
"""Returns the mount point base for the remote fs.
This method facilitates returning mount point base
for the specific remote fs. Override this method
in the respective driver to return the entry to be
used while attach/detach using brick in cinder.
If not overridden then it returns None without
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
:returns: provider_location update dict for database
"""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and tries to mount them
locally.
"""
mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s') % (exc,))
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s' % self._mounted_shares)
def create_cloned_volume(self, volume, src_vref):
raise NotImplementedError()
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warn(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
mounted_path = self.local_path(volume)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume):
"""Exports the volume.
Can optionally return a dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _delete(self, path):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
def _create_sparsed_file(self, path, size):
"""Creates a sparse file of a given size in GiB."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def _create_regular_file(self, path, size):
"""Creates a regular file of given size in GiB."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=self._execute_as_root)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size in GiB."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _set_rw_permissions(self, path):
"""Sets access permissions for given NFS path.
Volume file permissions are set based upon the value of
secure_file_permissions: 'true' sets secure access permissions and
'false' sets more open (insecure) access permissions.
:param path: the volume file path.
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %s is being set with permissions: %s' %
(path, permissions))
else:
permissions = 'ugo+rw'
parms = {'path': path, 'perm': permissions}
LOG.warn(_LW('%(path)s is being set with open permissions: '
'%(perm)s') % parms)
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_owner(self, path):
"""Sets read-write permissions to the owner for the path."""
self._execute('chmod', 'u+rw', path,
run_as_root=self._execute_as_root)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume
:param volume: volume reference
"""
remotefs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(remotefs_share),
volume['name'])
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
run_as_root = self._execute_as_root
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'],
run_as_root=run_as_root)
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume['size'],
run_as_root=run_as_root)
data = image_utils.qemu_img_info(self.local_path(volume),
run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file=None):
self.shares = {}
if all((self.configuration.nas_ip,
self.configuration.nas_share_path)):
LOG.debug('Using nas_ip and nas_share_path configuration.')
nas_ip = self.configuration.nas_ip
nas_share_path = self.configuration.nas_share_path
share_address = '%s:%s' % (nas_ip, nas_share_path)
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
msg = (_("Share %s ignored due to invalid format. Must "
"be of form address:/export. Please check the "
"nas_ip and nas_share_path settings."),
share_address)
raise exception.InvalidConfigurationValue(msg)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.' % share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip().decode('unicode_escape')
share_opts = None
if len(share_info) > 1:
share_opts = share_info[1].strip()
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
LOG.error(_LE("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, update the stats first.
"""
if refresh or not self._stats:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
def _do_mount(self, cmd, ensure, share):
"""Finalize mount command.
:param cmd: command to do the actual mount
:param ensure: boolean to allow remounting a share with a warning
:param share: description of the share for error reporting
"""
try:
self._execute(*cmd, run_as_root=True)
except putils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.stderr:
LOG.warn(_LW("%s is already mounted"), share)
else:
raise
def _get_capacity_info(self, share):
raise NotImplementedError()
def _find_share(self, volume_size_in_gib):
raise NotImplementedError()
def _ensure_share_mounted(self, share):
raise NotImplementedError()
def secure_file_operations_enabled(self):
"""Determine if driver is operating in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is operating
in a secure file mode; check our nas_secure_file_operations flag.
"""
if self.configuration.nas_secure_file_operations == 'true':
return True
return False
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
This method must be overridden by child wishing to use secure
NAS file operations. This base method will set the NAS security
options to false.
"""
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
"/nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
LOG.warn(_LW("The NAS file operations will be run as root: allowing "
"root level access at the storage backend. This is "
"considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration.") %
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warn(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered an "
"insecure NAS environment. Please see %s for information "
"on a secure NFS configuration.") %
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
"""Determine NAS security option setting when 'auto' is assigned.
This method determines the final 'true'/'false' setting of an NAS
security option when the default value of 'auto' has been detected.
If the nas option isn't 'auto' then its current value is used.
:param nas_option: The NAS security option value loaded from config.
:param mount_point: Mount where indicator file is written.
:param is_new_cinder_install: boolean for new Cinder installation.
:return string: 'true' or 'false' for new option setting.
"""
if nas_option == 'auto':
# For auto detection, we first check to see if we have been
# through this process before by checking for the existence of
# the Cinder secure environment indicator file.
file_name = '.cinderSecureEnvIndicator'
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
LOG.info(_LI('Cinder secure environment '
'indicator file exists.'))
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
if is_new_cinder_install:
nas_option = 'true'
try:
with open(file_path, 'w') as fh:
fh.write('Detector file for Cinder secure '
'environment usage.\n')
fh.write('Do not delete this file.\n')
# Set the permissions on our special marker file to
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
' file created at path %s.') % file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
'environment indicator file: %s') %
format(err))
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
nas_option = 'false'
return nas_option
class RemoteFSSnapDriver(RemoteFSDriver):
"""Base class for remotefs drivers implementing qcow2 snapshots.
Driver must implement:
_local_volume_dir(self, volume)
"""
def __init__(self, *args, **kwargs):
self._remotefsclient = None
self.base = None
self._nova = None
super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(RemoteFSSnapDriver, self).do_setup(context)
self._nova = compute.API()
def _local_volume_dir(self, volume):
share = volume['provider_location']
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path = '%s.%s' % (vol_path, snapshot['id'])
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.RemoteFSException(msg)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _qemu_img_info_base(self, path, volume_name, basedir):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
"""
info = image_utils.qemu_img_info(path)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
backing_file_template = \
"(%(basedir)s/[0-9a-f]+/)?%" \
"(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % {
'basedir': basedir,
'volname': volume_name
}
if not re.match(backing_file_template, info.backing_file):
msg = _("File %(path)s has invalid backing file "
"%(bfile)s, aborting.") % {'path': path,
'bfile': info.backing_file}
raise exception.RemoteFSException(msg)
info.backing_file = os.path.basename(info.backing_file)
return info
def _qemu_img_info(self, path, volume_name):
raise NotImplementedError()
def _img_commit(self, path):
self._execute('qemu-img', 'commit', path,
run_as_root=self._execute_as_root)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format):
self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
'-F', volume_format, run_as_root=self._execute_as_root)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
:param: info_path: path to file
:param: empty_if_missing: True=return empty dict if no file
"""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path, volume['name'])
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path, volume['name'])
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str
(in a hex format).
"""
return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, share):
"""Return mount point for share.
:param share: example 172.18.194.100:/var/fs
"""
return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share):
"""Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs
"""
mount_point = self._get_mount_point_for_share(share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point,
run_as_root=self._execute_as_root)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, remotefs_share):
available, size = self._get_available_capacity(remotefs_share)
return size, available, size - available
def _get_mount_point_base(self):
return self.base
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.
If not, raise an exception.
:param path: path to test
:raises: RemoteFSException
:returns: None
"""
prefix = '.cinder-write-test-' + str(os.getpid()) + '-'
try:
tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
except OSError:
msg = _('Share at %(dir)s is not writable by the '
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.RemoteFSException(msg)
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume['name'])
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s') %
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
volume['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_LI('Deleting stale snapshot: %s') % snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemoteFSException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting snapshot %s:' % snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
vol_path = self._local_volume_dir(snapshot['volume'])
self._ensure_share_writable(vol_path)
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.') % snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s' % snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(
snapshot_path,
snapshot['volume']['name'])
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
LOG.warning(_LW('No backing file found for %s, allowing '
'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(vol_path, base_file)
base_file_img_info = self._qemu_img_info(base_path,
snapshot['volume']['name'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = os.path.join(vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.iteritems():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
msg = 'No %(base_id)s found for %(file)s' % {
'base_id': 'base_id',
'file': snapshot_file}
LOG.debug(msg)
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted, |
# exist) | commited down) |
self._img_commit(snapshot_path)
# Active file has changed
snap_info['active'] = base_file
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted, | (guaranteed to | (may exist)
# exist, not | commited down) | exist, needs |
# used here) | | ptr update) |
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
self._img_commit(snapshot_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume['size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
info = self._qemu_img_info(backing_path_full_path,
snapshot['volume']['name'])
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
self._set_rw_permissions(new_snap_path)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot, providing a
qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
responsible for transitioning the VM between them and handling live
transfers of data between files as required.
If volume is detached, create locally with qemu-img. Cinder handles
manipulation of qcow2 files.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb', (* changed!)
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' } (* added!)
4. Snapshot deletion when volume is attached ('in-use' state):
* When first snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "aaaa" and
makes snapshot with id "bbbb" point to the base image.
Snapshot with id "bbbb" is the active image.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "bbbb" by
pulling volume-1234's data into volume-1234.bbbb. This
(logically) removes snapshot with id "bbbb" and the active
file remains the same.
volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb' }
TODO (deepakcs): Change this once Nova supports blockCommit for
in-use volumes.
5. Snapshot deletion when volume is detached ('available' state):
* When first snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The data from it is merged into its parent.
volume-1234.bbbb is rebased, having volume-1234 as its new
parent.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The base image, volume-1234 becomes the active image for this
volume again.
volume-1234
info file: { 'active': 'volume-1234' } (* changed!)
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
new_snap_path = self._get_new_snap_path(snapshot)
if status == 'in-use':
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
context = snapshot['context']
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s' % result)
except Exception as e:
LOG.error(_LE('Call to Nova to create snapshot failed'))
LOG.exception(e)
raise e
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
'id': snapshot['id'],
'status': s['status']
})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.RemoteFSException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if info['active_file'] == info['snapshot_file']:
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
try:
self._nova.delete_volume_snapshot(
context,
snapshot['id'],
delete_info)
except Exception as e:
LOG.error(_LE('Call to Nova delete snapshot failed'))
LOG.exception(e)
raise e
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
msg = ('status of snapshot %s is '
'still "deleting"... waiting') % snapshot['id']
LOG.debug(msg)
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot['id'],
'status': s['status']}
raise exception.RemoteFSException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot['id']}
raise exception.RemoteFSException(msg)
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._execute('rm', '-f', path_to_delete, run_as_root=True)
|
the-stack_106_17639
|
# Copyright 2018-present Kensho Technologies, LLC.
import random
from .utils import create_edge_statement, create_vertex_statement, get_random_limbs, get_uuid
SPECIES_LIST = (
"Nazgul",
"Pteranodon",
"Dragon",
"Hippogriff",
)
FOOD_LIST = (
"Bacon",
"Lembas",
"Blood pie",
)
NUM_FOODS = 2
def _create_food_statement(food_name):
"""Return a SQL statement to create a Food vertex."""
field_name_to_value = {"name": food_name, "uuid": get_uuid()}
return create_vertex_statement("Food", field_name_to_value)
def _create_species_statement(species_name):
"""Return a SQL statement to create a Species vertex."""
field_name_to_value = {"name": species_name, "limbs": get_random_limbs(), "uuid": get_uuid()}
return create_vertex_statement("Species", field_name_to_value)
def _create_species_eats_statement(from_name, to_name):
"""Return a SQL statement to create a Species_Eats edge."""
if to_name in SPECIES_LIST:
to_class = "Species"
elif to_name in FOOD_LIST:
to_class = "Food"
else:
raise AssertionError("Invalid name for Species_Eats endpoint: {}".format(to_name))
return create_edge_statement("Species_Eats", "Species", from_name, to_class, to_name)
def get_species_generation_commands():
"""Return a list of SQL statements to create all species vertices."""
command_list = []
for food_name in FOOD_LIST:
command_list.append(_create_food_statement(food_name))
for species_name in SPECIES_LIST:
command_list.append(_create_species_statement(species_name))
for species_name in SPECIES_LIST:
for food_or_species_name in random.sample(SPECIES_LIST + FOOD_LIST, NUM_FOODS): # nosec
command_list.append(_create_species_eats_statement(species_name, food_or_species_name))
return command_list
|
the-stack_106_17642
|
#############################################################################################################
##
## Source code for training. In this source code, there are initialize part, training part, ...
##
#############################################################################################################
import glob2
import os
import argparse
import sys
import cv2
import torch
import agent
import numpy as np
from data_loader import Generator
from parameters import Parameters
import test
import evaluation
import util
import copy
p = Parameters()
###############################################################
##
## Training
##
###############################################################
def Training(epoch_model, loss_model, flag):
print('Training')
####################################################################
## Hyper parameter
####################################################################
print('Initializing hyper parameter')
#########################################################################
## Get dataset
#########################################################################
print("Get dataset")
loader = Generator()
##############################
## Get agent and model
##############################
print('Get agent')
if p.model_path == "":
lane_agent = agent.Agent()
else:
lane_agent = agent.Agent()
lane_agent.load_weights(epoch_model, f"tensor({loss_model})")
##############################
## Check GPU
##############################
print('Setup GPU mode')
if torch.cuda.is_available():
lane_agent.cuda()
#torch.backends.cudnn.benchmark=True
##############################
## Loop for training
##############################
print('Training loop')
step = 0
sampling_list = None
loss_though_epoch = 0
min_loss = 9999
if flag == True:
begin_epoch = epoch_model + 1
else:
begin_epoch = 0
for epoch in range(begin_epoch, p.n_epoch):
lane_agent.training_mode()
for inputs, target_lanes, target_h, test_image, data_list in loader.Generate(sampling_list):
#training
#util.visualize_points(inputs[0], target_lanes[0], target_h[0])
print("epoch : " + str(epoch))
print("step : " + str(step))
try:
loss_p = lane_agent.train(inputs, target_lanes, target_h, epoch, lane_agent, data_list)
except:
continue
torch.cuda.synchronize()
loss_p = loss_p.cpu().data
loss_though_epoch = loss_p
if step%1000 == 0:
lane_agent.save_model(int(step/1000), loss_p)
testing(lane_agent, test_image, step, loss_p)
step += 1
if loss_though_epoch < min_loss:
try:
best_model = glob2.glob('savefile/best*')[0]
best_loss = float(best_model.split('_')[1][7:-1])
if loss_though_epoch < best_loss:
os.remove(best_model)
print(f'Best model: ({epoch}, {loss_though_epoch})')
lane_agent.save_model('best', loss_though_epoch)
min_loss = loss_though_epoch
except:
print(f'Best model: ({epoch}, {loss_though_epoch})')
lane_agent.save_model('best', loss_though_epoch)
min_loss = loss_though_epoch
lane_agent.save_model(int(epoch), loss_though_epoch)
sampling_list = copy.deepcopy(lane_agent.get_data_list())
lane_agent.sample_reset()
#evaluation:turn it off when training.
# if epoch >= 0 and epoch%1 == 0:
# print("evaluation")
# lane_agent.evaluate_mode()
# th_list = [0.8]
# index = [3]
# lane_agent.save_model(int(step/100), loss_p)
# for idx in index:
# print("generate result")
# test.evaluation(loader, lane_agent, index = idx, name="test_result_"+str(epoch)+"_"+str(idx)+".json")
# for idx in index:
# print("compute score")
# with open("/home/kym/Dropbox/eval_result2_"+str(idx)+"_.txt", 'a') as make_file:
# make_file.write( "epoch : " + str(epoch) + " loss : " + str(loss_p.cpu().data) )
# make_file.write(evaluation.LaneEval.bench_one_submit("test_result_"+str(epoch)+"_"+str(idx)+".json", "test_label.json"))
# make_file.write("\n")
# with open("eval_result_"+str(idx)+"_.txt", 'a') as make_file:
# make_file.write( "epoch : " + str(epoch) + " loss : " + str(loss_p.cpu().data) )
# make_file.write(evaluation.LaneEval.bench_one_submit("test_result_"+str(epoch)+"_"+str(idx)+".json", "test_label.json"))
# make_file.write("\n")
if int(step)>700000:
break
def testing(lane_agent, test_image, step, loss):
lane_agent.evaluate_mode()
_, _, ti = test.test(lane_agent, np.array([test_image]))
cv2.imwrite('test_result/result_'+str(step)+'_'+str(loss)+'.png', ti[0])
lane_agent.training_mode()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pretrained_model', type=str, default='32_tensor(1.1001)_lane_detection_network.pkl')
parser.add_argument('-m' ,'--model_weight', type=str)
args = vars(parser.parse_args())
flag = False
if args['model_weight']:
model = args['model_weight']
flag = True
else:
model = args['pretrained_model']
epoch_model, loss_model = int(model.split('_')[0]), model.split('_')[1][7:-1]
Training(epoch_model, loss_model, flag)
|
the-stack_106_17643
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
LOG = log.getLogger(__name__)
@obj_base.NovaObjectRegistry.register_if(False)
class LiveMigrateData(obj_base.NovaObject):
fields = {
'is_volume_backed': fields.BooleanField(),
'migration': fields.ObjectField('Migration'),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = {}
if self.obj_attr_is_set('is_volume_backed'):
legacy['is_volume_backed'] = self.is_volume_backed
if self.obj_attr_is_set('migration'):
legacy['migration'] = self.migration
if pre_migration_result:
legacy['pre_live_migration_result'] = {}
return legacy
def from_legacy_dict(self, legacy):
if 'is_volume_backed' in legacy:
self.is_volume_backed = legacy['is_volume_backed']
if 'migration' in legacy:
self.migration = legacy['migration']
@classmethod
def detect_implementation(cls, legacy_dict):
if 'instance_relative_path' in legacy_dict:
obj = LibvirtLiveMigrateData()
elif 'image_type' in legacy_dict:
obj = LibvirtLiveMigrateData()
elif 'migrate_data' in legacy_dict:
obj = XenapiLiveMigrateData()
else:
obj = LiveMigrateData()
obj.from_legacy_dict(legacy_dict)
return obj
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
VERSION = '1.0'
fields = {
# FIXME(danms): some of these can be enums?
'serial': fields.StringField(),
'bus': fields.StringField(),
'dev': fields.StringField(),
'type': fields.StringField(),
'format': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'connection_info_json': fields.StringField(),
}
# NOTE(danms): We don't have a connection_info object right
# now, and instead mostly store/pass it as JSON that we're
# careful with. When we get a connection_info object in the
# future, we should use it here, so make this easy to convert
# for later.
@property
def connection_info(self):
return jsonutils.loads(self.connection_info_json)
@connection_info.setter
def connection_info(self, info):
self.connection_info_json = jsonutils.dumps(info)
def as_disk_info(self):
info_dict = {
'dev': self.dev,
'bus': self.bus,
'type': self.type,
}
if self.obj_attr_is_set('format') and self.format:
info_dict['format'] = self.format
if self.obj_attr_is_set('boot_index') and self.boot_index is not None:
info_dict['boot_index'] = str(self.boot_index)
return info_dict
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateData(LiveMigrateData):
# Version 1.0: Initial version
# Version 1.1: Added target_connect_addr
# Version 1.2: Added 'serial_listen_ports' to allow live migration with
# serial console.
VERSION = '1.2'
fields = {
'filename': fields.StringField(),
# FIXME: image_type should be enum?
'image_type': fields.StringField(),
'block_migration': fields.BooleanField(),
'disk_over_commit': fields.BooleanField(),
'disk_available_mb': fields.IntegerField(nullable=True),
'is_shared_instance_path': fields.BooleanField(),
'is_shared_block_storage': fields.BooleanField(),
'instance_relative_path': fields.StringField(),
'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
'serial_listen_addr': fields.StringField(nullable=True),
'serial_listen_ports': fields.ListOfIntegersField(),
'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
'target_connect_addr': fields.StringField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
super(LibvirtLiveMigrateData, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'serial_listen_ports' in primitive:
del primitive['serial_listen_ports']
if target_version < (1, 1) and 'target_connect_addr' in primitive:
del primitive['target_connect_addr']
def _bdms_to_legacy(self, legacy):
if not self.obj_attr_is_set('bdms'):
return
legacy['volume'] = {}
for bdmi in self.bdms:
legacy['volume'][bdmi.serial] = {
'disk_info': bdmi.as_disk_info(),
'connection_info': bdmi.connection_info}
def _bdms_from_legacy(self, legacy_pre_result):
self.bdms = []
volume = legacy_pre_result.get('volume', {})
for serial in volume:
vol = volume[serial]
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
bdmi.connection_info = vol['connection_info']
bdmi.bus = vol['disk_info']['bus']
bdmi.dev = vol['disk_info']['dev']
bdmi.type = vol['disk_info']['type']
if 'format' in vol:
bdmi.format = vol['disk_info']['format']
if 'boot_index' in vol:
bdmi.boot_index = int(vol['disk_info']['boot_index'])
self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s' % self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'})
legacy.update({k: getattr(self, k) for k in keys
if self.obj_attr_is_set(k)})
graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
transport_target = legacy.pop('target_connect_addr', None)
live_result = {
'graphics_listen_addrs': {
'vnc': graphics_vnc and str(graphics_vnc),
'spice': graphics_spice and str(graphics_spice),
},
'serial_listen_addr': legacy.pop('serial_listen_addr', None),
'target_connect_addr': transport_target,
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s' % legacy)
return legacy
def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s' % legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}:
if k in legacy:
setattr(self, k, legacy[k])
if 'pre_live_migration_result' in legacy:
pre_result = legacy['pre_live_migration_result']
self.graphics_listen_addr_vnc = \
pre_result['graphics_listen_addrs'].get('vnc')
self.graphics_listen_addr_spice = \
pre_result['graphics_listen_addrs'].get('spice')
self.target_connect_addr = pre_result.get('target_connect_addr')
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s' % self)
def is_on_shared_storage(self):
return self.is_shared_block_storage or self.is_shared_instance_path
@obj_base.NovaObjectRegistry.register
class XenapiLiveMigrateData(LiveMigrateData):
VERSION = '1.0'
fields = {
'block_migration': fields.BooleanField(nullable=True),
'destination_sr_ref': fields.StringField(nullable=True),
'migrate_send_data': fields.DictOfStringsField(nullable=True),
'sr_uuid_map': fields.DictOfStringsField(),
'kernel_file': fields.StringField(),
'ramdisk_file': fields.StringField(),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = super(XenapiLiveMigrateData, self).to_legacy_dict()
if self.obj_attr_is_set('block_migration'):
legacy['block_migration'] = self.block_migration
if self.obj_attr_is_set('migrate_send_data'):
legacy['migrate_data'] = {
'migrate_send_data': self.migrate_send_data,
'destination_sr_ref': self.destination_sr_ref,
}
live_result = {
'sr_uuid_map': ('sr_uuid_map' in self and self.sr_uuid_map
or {}),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
return legacy
def from_legacy_dict(self, legacy):
super(XenapiLiveMigrateData, self).from_legacy_dict(legacy)
if 'block_migration' in legacy:
self.block_migration = legacy['block_migration']
else:
self.block_migration = False
if 'migrate_data' in legacy:
self.migrate_send_data = \
legacy['migrate_data']['migrate_send_data']
self.destination_sr_ref = \
legacy['migrate_data']['destination_sr_ref']
if 'pre_live_migration_result' in legacy:
self.sr_uuid_map = \
legacy['pre_live_migration_result']['sr_uuid_map']
@obj_base.NovaObjectRegistry.register
class HyperVLiveMigrateData(LiveMigrateData):
VERSION = '1.0'
|
the-stack_106_17644
|
# Copyright (C) 2020 Electronic Arts Inc. All rights reserved.
import numpy
class Physics:
def __init__(self, game):
self.game = game
def Update(self, verbosity):
self.BoardCollisionUpdate(max(0, verbosity - 1))
self.PlayerCollisionUpdate(max(0, verbosity - 1))
def BoardCollisionUpdate(self, verbosity):
# rectify collisions against boards
arena = self.game.arena
radius = self.game.rules.player_radius
min_x = arena.min_x + radius
max_x = arena.max_x - radius
min_z = arena.min_z + radius
max_z = arena.max_z - radius
for player in self.game.players:
position = player.GetPosition(self.game)
velocity = player.GetVelocity(self.game)
if position[0] < min_x:
position[0] = min_x
velocity[0] = 0
if position[0] > max_x:
position[0] = max_x
velocity[0] = 0
if position[1] < min_z:
position[1] = min_z
velocity[1] = 0
if position[1] > max_z:
position[1] = max_z
velocity[1] = 0
player.SetPosition(self.game, position)
player.SetVelocity(self.game, velocity)
def PlayerCollisionUpdate(self, verbosity):
for player1 in self.game.players:
for player2 in self.game.players:
if player1 is player2:
continue
if numpy.linalg.norm(player1.GetPosition(self.game) - player2.GetPosition(
self.game)) <= self.game.rules.player_radius * 2:
control_player = self.game.control.GetControl()
if self.game.rules.enable_player_collisions:
center = (player1.GetPosition(self.game) + player2.GetPosition(
self.game)) * 0.5
avg_vel = (player1.GetVelocity(self.game) + player2.GetVelocity(
self.game)) * 0.5
for player in [player1, player2]:
delta = player.GetPosition(self.game) - center
dist = numpy.linalg.norm(delta)
if dist > 0.001:
direction = delta / dist
else:
direction = numpy.array([1.0, 0.0])
player.SetPosition(self.game,
center + direction * self.game.rules.player_radius * 1.01)
player.SetVelocity(self.game, avg_vel)
if player1 is control_player and player1.team_side != player2.team_side and player2.GetActionTime(
self.game) == 0:
self.game.CompleteCheck(player1, player2)
elif player2 is control_player and player2.team_side != player1.team_side and player1.GetActionTime(
self.game) == 0:
self.game.CompleteCheck(player2, player1)
def InterceptTest(self, source, target, players, verbosity):
# test each player in list for interception
# if not intercepted the pass/shot would be successful
# interception is a probability based on the ratio of the distance from the interceptor to
# the intercept point and the distance of the intercept point from the start
# as a baseline the probability is simply the ratio, so if the interceptor distance is 0 (along the path)
# the interception is 100% and if the interceptor is just as far away it is 0%
# the interception priority goes to the closest player to the start
traj_delta = (target - source)
traj_distance = numpy.linalg.norm(traj_delta) + 1e-10
traj_dir = traj_delta / traj_distance
if verbosity:
print('intercept test tick %d %f,%f to %f,%f' % (
self.game.tick, source[0], source[1], target[0], target[1]))
through_chance = 1.0
intercepting_player = None
shortest_intercept = traj_distance + 1.0
for player in players:
# project player onto trajectory to find unconstrained intercept point
player_source_delta = player.GetPosition(self.game) - source
intercept_source_dist = traj_dir.dot(player_source_delta)
# if behind the trajectory there is no intercept
if intercept_source_dist > 0.0:
# find distance from player to intercept
if intercept_source_dist > traj_distance:
# adjust intercept point if source or target is closer
if verbosity: print(
'player %s intercept_source_dist %f > %f moving intercept to target' % (
player.name, intercept_source_dist, traj_distance))
intercept = target
intercept_source_dist = traj_distance
else:
intercept = source + traj_dir * intercept_source_dist
player_intercept_dist = numpy.linalg.norm(player.GetPosition(self.game) - intercept)
closest_dist = max(0.0,
player_intercept_dist - self.game.rules.player_intercept_speed * intercept_source_dist)
if closest_dist > self.game.rules.max_intercept_dist:
prob = 0.0
else:
prob = (self.game.rules.max_intercept_chance - self.game.rules.min_intercept_chance) \
* closest_dist / self.game.rules.max_intercept_dist + self.game.rules.min_intercept_chance
through_chance *= 1.0 - prob
if closest_dist < shortest_intercept:
r = numpy.random.random()
# prob = 1.0 - self.game.rules.intercept_scale * player_intercept_dist / traj_distance
if r < prob:
intercepting_player = player
shortest_intercept = closest_dist
if verbosity: print(
'player %s random %f < probability %f so intercepted' % (
player.name, r, prob), 'closest_dist', closest_dist,
'player_intercept_dist', player_intercept_dist, 'intercept_source_dist',
intercept_source_dist)
else:
if verbosity: print(
'player %s random %f > probability %f so not intercepted' % (
player.name, r, prob), 'closest_dist', closest_dist,
'player_intercept_dist', player_intercept_dist, 'intercept_source_dist',
intercept_source_dist)
else:
if verbosity: print(
'player %s player_intercept_dist %f >= shortest_intercept %f so skipping' % (
player.name, player_intercept_dist, shortest_intercept))
else:
if verbosity: print('player %s intercept_source_dist %f is behind' % (
player.name, intercept_source_dist))
return intercepting_player, through_chance
|
the-stack_106_17648
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bcashs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bcashd or BCash-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the BCash data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/BCashCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "BCashCore")
return os.path.expanduser("~/.bcashcore")
def read_bitcoin_config(dbdir):
"""Read the bcash.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bcash.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a BCash JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 29800 if testnet else 19800
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bcashd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bcashd):
info = bcashd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bcashd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bcashd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bcashd):
address_summary = dict()
address_to_account = dict()
for info in bcashd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bcashd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bcashd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bcash-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bcashd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bcashd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bcashd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bcashd.createrawtransaction(inputs, outputs)
signed_rawtx = bcashd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bcashd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bcashd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bcashd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bcashd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bcashd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bcashs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bcashs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bcash.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bcashd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bcashd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bcashd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bcashd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bcashd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bcashd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_17649
|
class Student:
def __init__(self,name):
self.name = name
self.exp = 0
self.lesson = 0
#call Function
#self.AddEXP(10)
#student1.name
# self = student1
def Hello(self):
print('สวัสดีจ้า ผมชื่อ{}'.format(self.name))
def Coding(self):
print('{}:กำลังเขียนโปรแกรม..'.format(self.name))
self.exp += 5
self.lesson += 1
def ShowEXP(self):
print('- {} มีประสบการณ์ {} EXP'.format(self.name,self.exp))
print('- เรียนไแล้ว {} ครั้งแล้ว'.format(self.lesson))
def AddEXP(self,score):
self.exp += score #self.exp = self.exp + score
self.lesson += 1
class SpecialStudent(Student):
def __init__(self,name,father):
super().__init__(name)
self.father = father
mafia = ['Bill Gates','Thomas Edison']
if father in mafia:
self.exp += 100
def AddEXP(self,score):
self.exp += (score*3) #self.exp = self.exp + score
self.lesson += 1
def AskEXP(self,score=10):
print('ครู!! ขอคะแนนพิเศษให้ผมหน่อยสิ {} EXP'.format(score))
self.AddEXP(score)
if __name__ == '__main__':
print('======1 Jan======')
student0 = SpecialStudent('Mark Zuckerberg','Bill Gates')
student0.ShowEXP()
student1 = Student('Albert')
print(student1.name)
student1.Hello()
print('----------')
student2 = Student('Steve')
print(student2.name)
student2.Hello()
print('======2 Jan=====')
print('-----uncle: ใครอยากเรียนโค้ดดิ้ง?---(10 exp)----')
student1.AddEXP(10)
print('======3 Jan=====')
student1.name = 'Albert Einstein'
print('ตอนนี้ exp ของแต่ละคนได้เท่าไรกันแล้ว')
print(student1.name,student1.exp)
print(student2.name,student2.exp)
print('======4 Jan=====')
for i in range(5):
student2.Coding()
student1.ShowEXP()
student2.ShowEXP()
|
the-stack_106_17652
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for pooling operators"""
import tvm
from .. import tag
from .. import generic
@generic.schedule_adaptive_pool.register(["opengl"])
def schedule_adaptive_pool(outs):
"""Schedule for adaptive pool.
Parameters
----------
outs: Array of Tensor
The computation graph description of global_pool
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for adaptive pool.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def _schedule(Pool):
if Pool.op in s.outputs:
Out = Pool
else:
Out = outs[0].op.output(0)
s[Pool].opengl()
s[Out].opengl()
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].opengl()
for tensor in OP.input_tensors:
if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
# schedule global_pool
elif OP.tag.startswith('adaptive_pool'):
Pool = OP.output(0)
_schedule(Pool)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
@generic.schedule_pool.register(["opengl"])
def schedule_pool(outs, layout):
"""Schedule for pool.
Parameters
----------
outs: Array of Tensor
The computation graph description of pool
in the format of an array of tensors.
layout: str
Data layout.
Returns
-------
s: Schedule
The computation schedule for pool.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def _schedule(PaddedInput, Pool):
if isinstance(PaddedInput.op, tvm.tensor.ComputeOp):
s[PaddedInput].opengl()
if Pool.op in s.outputs:
Out = Pool
else:
Out = outs[0].op.output(0)
s[Pool].opengl()
s[Out].opengl()
def traverse(OP):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(OP.tag):
if OP not in s.outputs:
s[OP].compute_inline()
for tensor in OP.input_tensors:
if tensor.op not in scheduled_ops and isinstance(tensor.op, tvm.tensor.ComputeOp):
traverse(tensor.op)
# schedule pool
elif OP.tag.startswith('pool'):
PaddedInput = OP.input_tensors[0]
Pool = OP.output(0)
_schedule(PaddedInput, Pool)
else:
raise RuntimeError("Unsupported operator: %s" % OP.tag)
scheduled_ops.append(OP)
traverse(outs[0].op)
return s
|
the-stack_106_17654
|
import json
from starlette.testclient import TestClient
from syndio_backend_test import api, main, sql
DATA = [
{"id": 1, "gender": "male"},
{"id": 2, "gender": "male"},
{"id": 3, "gender": "male"},
{"id": 4, "gender": "female"},
{"id": 5, "gender": "female"},
{"id": 6, "gender": "female"},
]
def test_sql():
assert list(sql.get_rows(sql.get_db_conn(), "employees")) == DATA
def test_api():
res = TestClient(api.API).get("employees")
assert res.status_code == 200
assert res.json() == DATA
|
the-stack_106_17655
|
# Adapted from test_file.py by Daniel Stutzbach
#from __future__ import unicode_literals
import sys
import os
import unittest
from array import array
from weakref import proxy
from test.test_support import (TESTFN, findfile, check_warnings, run_unittest,
make_bad_fd)
from UserList import UserList
import _fileio
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _fileio._FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEquals(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(bytearray(range(20))))
self.assertEquals(self.f.tell(), 20)
self.f.seek(0)
self.assertEquals(self.f.tell(), 0)
self.f.seek(10)
self.assertEquals(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEquals(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEquals(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEquals(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEquals(f.mode, "wb")
self.assertEquals(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(bytes(bytearray([1, 2])))
self.f.close()
a = array('b', b'x'*10)
self.f = _fileio._FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEquals(array('b', [1, 2]), a[:n])
def testRepr(self):
self.assertEquals(repr(self.f),
"_fileio._FileIO(%d, %s)" % (self.f.fileno(),
repr(self.f.mode)))
def testErrors(self):
f = self.f
self.assert_(not f.isatty())
self.assert_(not f.closed)
#self.assertEquals(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assert_(f.closed)
f = _fileio._FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assert_(not f.closed)
f.close()
self.assert_(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read', 'readinto',
'seek', 'tell', 'truncate', 'write', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assert_(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_fileio._FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _fileio._FileIO(TESTFN, "w")
self.assertEquals(f.readable(), False)
self.assertEquals(f.writable(), True)
self.assertEquals(f.seekable(), True)
f.close()
f = _fileio._FileIO(TESTFN, "r")
self.assertEquals(f.readable(), True)
self.assertEquals(f.writable(), False)
self.assertEquals(f.seekable(), True)
f.close()
f = _fileio._FileIO(TESTFN, "a+")
self.assertEquals(f.readable(), True)
self.assertEquals(f.writable(), True)
self.assertEquals(f.seekable(), True)
self.assertEquals(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _fileio._FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
f = _fileio._FileIO("/dev/tty", "a")
self.assertEquals(f.readable(), False)
self.assertEquals(f.writable(), True)
if sys.platform != "darwin" and \
not sys.platform.startswith('freebsd') and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEquals(f.seekable(), False)
self.assertEquals(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _fileio._FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _fileio._FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _fileio._FileIO, -10)
self.assertRaises(OSError, _fileio._FileIO, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _fileio._FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if s.find(TESTFN) != -1 or s.find(bad_mode) == -1:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _fileio._FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(11))))
f.close()
f = _fileio._FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(bytearray(range(5))):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _fileio._FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings() as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _fileio._FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _fileio._FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
the-stack_106_17661
|
#!/usr/bin/env python3
#
# Copyright 2021 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Classes and functions borrowed from https://opendev.org/zuul/zuul
#
import collections
from copy import deepcopy
import io
import yaml
from znoyder.lib import logger
from znoyder.lib import utils
from znoyder.lib.exceptions import JobTypeError
from znoyder.lib.exceptions import TriggerTypeError
from znoyder.lib.exceptions import YAMLDuplicateKeyError
LOG = logger.LOG
class JobTriggerType(object):
"""Enumeration for the Job Trigger Type"""
# TEMPLATES are same level as trigger type
RANGE = (TEMPLATES, CHECK, GATE, POST, EXPERIMENTAL) = range(5)
@staticmethod
def to_str(column) -> str:
return {
JobTriggerType.TEMPLATES: 'templates',
JobTriggerType.CHECK: 'check',
JobTriggerType.GATE: 'gate',
JobTriggerType.POST: 'post',
JobTriggerType.EXPERIMENTAL: 'experimental',
}[column]
@staticmethod
def to_type(trigger) -> int:
types = {
'templates': JobTriggerType.TEMPLATES,
'check': JobTriggerType.CHECK,
'gate': JobTriggerType.GATE,
'post': JobTriggerType.POST,
'experimental': JobTriggerType.EXPERIMENTAL,
}
if trigger not in types:
raise TriggerTypeError("Job trigger type is not valid: %s" %
trigger)
return types[trigger]
@staticmethod
def get_job_types_str(job_trigger_types) -> list:
"""Gets job types as strings
Args:
job_trigger_types (:obj:`list`): Trigger types as JobTriggerType
Returns:
(:obj:`list`): list of trigger types as strings
"""
# Convert to list if one trigger type was passed
job_trigger_types = [job_trigger_types] if type(job_trigger_types) \
is int else job_trigger_types
for job_type in job_trigger_types:
if job_type not in JobTriggerType.RANGE:
raise JobTypeError("Job type is not valid: %s" %
job_trigger_types)
# We are interested only in jobs defined in project
trigger_types = list(map(lambda x: JobTriggerType.to_str(x),
job_trigger_types))
return trigger_types
class ZuulMark:
# The yaml mark class differs between the C and python versions.
# The C version does not provide a snippet, and also appears to
# lose data under some circumstances.
def __init__(self, start_mark, end_mark, stream):
self.name = start_mark.name
self.index = start_mark.index
self.line = start_mark.line
self.end_line = end_mark.line
self.end_index = end_mark.index
self.column = start_mark.column
self.end_column = end_mark.column
self.snippet = stream[start_mark.index:end_mark.index]
def __str__(self):
return ' in "{name}", line {line}, column {column}'.format(
name=self.name,
line=self.line + 1,
column=self.column + 1,
)
def __eq__(self, other):
return (self.line == other.line and
self.snippet == other.snippet)
def serialize(self):
return {
"name": self.name,
"index": self.index,
"line": self.line,
"end_line": self.end_line,
"end_index": self.end_index,
"column": self.column,
"end_column": self.end_column,
"snippet": self.snippet,
}
@classmethod
def deserialize(cls, data):
o = cls.__new__(cls)
o.__dict__.update(data)
return o
# Check the class ZuulSafeLoader from configloader.py from zuul project
class ZuulSafeLoader(yaml.SafeLoader):
zuul_node_types = frozenset(('job', 'nodeset', 'secret', 'pipeline',
'project', 'project-template',
'semaphore', 'queue', 'pragma'))
def __init__(self, stream, context):
wrapped_stream = io.StringIO(stream)
wrapped_stream.name = str(context)
super(ZuulSafeLoader, self).__init__(wrapped_stream)
self.add_multi_constructor('!encrypted/', self.construct_encrypted)
self.name = str(context)
self.zuul_context = context
self.zuul_stream = stream
@classmethod
def construct_encrypted(cls, loader, tag_suffix, node):
return loader.construct_sequence(node)
def construct_mapping(self, node, deep=False):
keys = set()
for k, v in node.value:
# The key << needs to be treated special since that will merge
# the anchor into the mapping and not create a key on its own.
if k.value == '<<':
continue
if not isinstance(k.value, collections.abc.Hashable):
# This happens with "foo: {{ bar }}"
# This will raise an error in the superclass
# construct_mapping below; ignore it for now.
continue
if k.value in keys:
mark = ZuulMark(node.start_mark, node.end_mark,
self.zuul_stream)
raise YAMLDuplicateKeyError(k.value, node, self.zuul_context,
mark)
keys.add(k.value)
r = super(ZuulSafeLoader, self).construct_mapping(node, deep)
keys = frozenset(r.keys())
if len(keys) == 1 and keys.intersection(self.zuul_node_types):
d = list(r.values())[0]
if isinstance(d, dict):
d['_start_mark'] = ZuulMark(node.start_mark,
node.end_mark,
self.zuul_stream)
d['_source_context'] = self.zuul_context
return r
class ZuulProject(object):
"""A Project represents top level component.
It may define or use jobs directly as well job templates.
Args:
project_name (:obj:`str`): Name of the project e.g. neutron
project_path (:obj:`str`): Local path to the project directory
"""
def __init__(self, project_name=None, project_path=None, templates=None):
if templates is None:
templates = []
self.project_name = project_name
self.project_path = project_path
self.all_templates = templates # defined by other projects
self.project_templates = [] # used by this project
self.defined_templates = [] # defined by this project
self.project_jobs = []
self.config_paths = []
if project_path and not self.project_name:
self.project_name = project_path.strip("/").split("/")[-1]
def get_project_config_files(self) -> list:
if self.config_paths:
return self.config_paths
self.config_paths = utils.get_config_paths(self.project_path)
return self.config_paths
def get_list_of_jobs(self, job_trigger_types=None) -> list:
"""Gets list of jobs for a particular project.
This does not include jobs defined under templates.
Args:
job_trigger_types (:obj:`list`): Trigger types as JobTriggerType
Returns:
(:obj:`list`): list of ZuulJob objects associated with the project
"""
if job_trigger_types is None:
job_trigger_types = []
trigger_types = JobTriggerType.get_job_types_str(job_trigger_types)
LOG.debug('Discovering jobs for trigger: %s' %
trigger_types)
CASE = 'project'
for config_file in self.get_project_config_files():
projects = self._get_entries_from_config(config_file, CASE)
for project in projects:
for trigger_type in trigger_types:
if trigger_type in project:
jobs = project.get(trigger_type).get('jobs')
if not jobs:
continue
for job in jobs:
z_jobs = self._get_jobs_from_entry(job,
trigger_type)
self.project_jobs.extend(z_jobs)
return self.project_jobs
def get_list_of_used_templates(self) -> list:
"""Gets list of templates used by a project.
Returns:
(:obj:`list`): list of ZuulProjectTemplate objects
associated with the project
"""
LOG.debug('Discovering templates')
CASE = 'project'
for config_file in self.get_project_config_files():
project = self._get_entries_from_config(config_file, CASE)
template_str = JobTriggerType.to_str(JobTriggerType.TEMPLATES)
for used_template in project:
if template_str in used_template:
templates = used_template.get(template_str)
LOG.debug("Found templates %s" % templates)
for template in templates:
zuul_template = self._get_availabie_template(template)
self.project_templates.append(zuul_template)
return self.project_templates
def _get_availabie_template(self, template_name) -> object:
"""Gets template from passed by other projects
Args:
template_name (:obj:`str`): Name of the template
Returns:
(:obj:`ZuulProjectTemplate`): project template
"""
found = False
template_obj = None
for template in self.all_templates:
if str(template) == template_name:
found = True
template_obj = template
break
if not found:
LOG.warning("Used template not found in base templates: %s"
% template_name)
template_obj = ZuulProjectTemplate(template_name)
return template_obj
def get_list_of_defined_templates(self, job_trigger_types=None) -> list:
"""Gets list of templates defined in a project.
Args:
job_trigger_types (:obj:`list`): Trigger types as JobTriggerType
Returns:
(:obj:`list`): list of ZuulProjectTemplate objects
associated with the project
"""
if job_trigger_types is None:
job_trigger_types = []
trigger_types = JobTriggerType.get_job_types_str(job_trigger_types)
LOG.debug('Discovering templates and jobs for trigger: %s' %
trigger_types)
CASE = 'project-template'
for config_file in self.get_project_config_files():
project = self._get_entries_from_config(config_file, CASE)
for template in project:
p_template = ZuulProjectTemplate(template.get('name'),
self.project_name,
template)
for trigger in trigger_types:
if trigger in template:
jobs = template.get(trigger).get('jobs')
for job_r in jobs:
job = self._get_jobs_from_entry(job_r, trigger)
p_template.associate_job(job)
self.defined_templates.append(p_template)
return self.defined_templates
def _get_entries_from_config(self, config_file, config_section) -> list:
"""Helper function to get part of the config
Args:
config_file (:obj:`str`): path to the config file
config_section (:obj:`str`): config section
Returns:
(:obj:`list`): partial config
"""
LOG.debug('Discovering section "%s" from config: %s' %
(config_section, config_file))
config = []
with open(config_file, 'r') as file:
data = file.read()
loader = ZuulSafeLoader(data, 'null').get_single_data()
for entry in loader:
if config_section in entry:
config.append(entry.get(config_section))
return config
def _get_jobs_from_entry(self, job_entry, trigger_type) -> list:
"""Helper function to standardize job object as the zuul job entry
may be just a string or dictionary.
Args:
job_entry (:obj:`str`): zuul entry for the job
trigger_type (:obj:`str`): Trigger type as JobTriggerType
Returns:
(:obj:`list`): list of ZuulJob objects associated with the project
"""
jobs = []
if isinstance(job_entry, str):
LOG.debug('Found %s job: %s' % (trigger_type, job_entry))
jobs.append(ZuulJob(job_entry, trigger_type, {}))
else:
for job_name, job_data in job_entry.items():
LOG.debug('Found %s job: %s with options %s' %
(trigger_type, job_name, job_data))
jobs.append(ZuulJob(job_name, trigger_type, job_data))
return jobs
class ZuulProjectTemplate(object):
"""Project Template defines jobs which can be used by multiple
Zuul Projects.
Args:
template_name (:obj:`str`): Template name
"""
def __init__(self, template_name, template_project=None,
template_data=None):
if template_data is None:
template_data = {}
# Project that defines template
self.template_project = template_project
self.template_name = template_name
self.template_data = template_data
self.template_jobs = []
def associate_job(self, job):
if job not in self.template_jobs:
self.template_jobs.extend(job)
else:
LOG.warning('JOB %s defined multiple times' % job)
def get_jobs(self, job_trigger_type):
"""Get jobs associated with template with specific trigger type
Args:
job_trigger_type(:obj:`JobTriggerType`): Trigger type
Returns:
(:obj:`list`): list of ZuulJob objects associated with the project
"""
jobs = []
for job in self.template_jobs:
trigger_type = JobTriggerType.to_type(job.job_trigger_type)
if trigger_type in job_trigger_type:
jobs.append(job)
return jobs
def __str__(self) -> str:
return self.template_name
def __repr__(self) -> str:
return self.template_name
class ZuulJob(object):
"""Zuul Job representation
Args:
job_name (:obj:`str`): Job name
job_trigger_type(:obj:`str`): Trigger type e.g. check/gate/post
job_data(:obj:`dict`): JSON job data
"""
def __init__(self, job_name, job_trigger_type, job_data=None):
if job_data is None:
job_data = {}
self.job_name = job_name
self.job_trigger_type = job_trigger_type
self.job_data = deepcopy(job_data)
def __str__(self) -> str:
return self.job_name
def __repr__(self) -> str:
return self.job_name
|
the-stack_106_17662
|
# -*- coding: utf-8 -*-
import logging
from airflow.hooks.postgres_hook import PostgresHook as AirflowPostgresHook
from airflow.exceptions import AirflowException
class PostgresHook(AirflowPostgresHook):
def update_row(self, table, rows, primary_key=None, commit_every=0):
"""
A generic way to update a set of tuples into a table,
the whole set of inserts is treated as one transaction
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuple
:param primary_key: The name of primary key column
:type primary_key: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
conn = self.get_conn()
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
cur = conn.cursor()
row_number = 0
row_total = len(rows)
for row in rows:
row_number += 1
# l = ["%s = '%s'" % (k, v) for k, v in row.iteritems()]
if not hasattr(row, primary_key):
raise AirflowException('Row without ID for update: {}...'.format(row))
where = "%s = %s" % (primary_key, self._serialize_cell(getattr(row, primary_key), conn))
values = ["%s = %s" % (field, self._serialize_cell(getattr(row, field), conn)) for field in row._fields]
logging.info("Updating line {} of {}. {}...".format(
row_number,
row_total,
where
)
)
cur.execute(
"UPDATE {0} SET {1} WHERE {2};".format(
table,
", ".join(values),
where
)
)
if commit_every and row_number % commit_every == 0:
conn.commit()
logging.info("Loaded {row_number} into {table} rows so far...".format(**locals()))
conn.commit()
cur.close()
conn.close()
logging.info("Done loading. Loaded a total of {row_number} rows...".format(**locals()))
|
the-stack_106_17663
|
import json
import os
import sys
import pika
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import imports.broker as broker
import imports.db as db
from imports.logging import get_logger
import imports.requests
READING_QUEUE_NAME = "albums"
WRITING_QUEUE_NAME = "tracks"
MAX_RETRY_ATTEMPTS = 10
log = get_logger(os.path.basename(__file__))
def main():
def update_album(cursor, data):
copyrights = []
for copyright in data["copyrights"]:
copyrights.append(copyright["text"])
try:
cursor.execute(
"UPDATE albums SET popularity=%s, label=%s, copyrights=%s WHERE spotify_id=%s;",
(
data["popularity"],
data["label"],
copyrights,
data["id"],
),
)
except Exception as e:
log.error("Unhandled exception", exception=e, exc_info=True)
else:
log.info("💿 Album's details updated", spotify_id=data["id"], object="album")
consume_channel = broker.create_channel(READING_QUEUE_NAME)
publish_channel = broker.create_channel(WRITING_QUEUE_NAME)
db_connection, cursor = db.init_connection()
sp = spotipy.Spotify(
auth_manager=SpotifyClientCredentials(),
retries=3,
status_retries=3,
backoff_factor=0.3,
)
# Build artist data array to add it to the tracks
cursor.execute("SELECT name, genres, popularity, followers FROM artists")
artist_data = {
artist_data[0]: [artist_data[1], artist_data[2], artist_data[3]]
for artist_data in cursor.fetchall()
}
def callback(ch, method, properties, body):
message = json.loads(body.decode())
album_id = message["spotify_id"]
album_name = message["album_name"]
album_artist = message["album_artist"]
album_artist_spotify_id = message["album_artist_spotify_id"]
log.info(
"💿 Processing album", spotify_id=album_id, album=album_name, object="album"
)
attempts = 0
while attempts < MAX_RETRY_ATTEMPTS:
try:
result = sp.album(album_id=album_id)
log.info(
"Trying API request",
attempt=attempts,
spotify_id=album_id,
object="album",
)
break
except Exception as e:
attempts += 1
log.exception(
"Unhandled exception",
exception=e,
attempt=attempts,
spotify_id=album_id,
object="album",
exc_info=True,
)
update_album(cursor, result)
tracks = result["tracks"]["items"]
album_release_date = result["release_date"]
if len(album_release_date) == 4:
if "0000" == album_release_date:
album_release_date = "0001"
album_release_date += "-01-01"
elif len(album_release_date) == 7:
album_release_date += "-01"
for i, item in enumerate(tracks):
artists = []
genres = []
main_artist_popularity = None
main_artist_followers = None
for artist in item["artists"]:
artists.append(artist["name"])
# We use data of the last artist from the track, that exists in `artists` table
# We should either grab that data from Spotify
# TODO: or take the maximum from artists we have (the most popular is the most importatnt)
if artist["name"] in artist_data:
genres.extend(artist_data[artist["name"]][0])
main_artist_popularity = artist_data[artist["name"]][1]
main_artist_followers = artist_data[artist["name"]][2]
genres = list(set(genres))
try:
cursor.execute(
"INSERT INTO tracks (spotify_id, name, main_artist, main_artist_popularity, main_artist_followers, all_artists, all_artists_string, release_date, genres, genres_string, track_number, disc_number, duration_ms, explicit, preview_url, from_album, from_album_spotify_id, album_artist, album_artist_spotify_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT DO NOTHING;",
(
item["id"],
item["name"],
artists[0],
main_artist_popularity,
main_artist_followers,
artists,
" ".join(artists),
album_release_date,
genres,
" ".join(genres),
item["track_number"],
item["disc_number"],
item["duration_ms"],
item["explicit"],
item["preview_url"],
album_name,
album_id,
album_artist,
album_artist_spotify_id,
),
)
except Exception as e:
log.exception("Unhandled exception", exception=e, exc_info=True)
else:
log.info(
"🎧 Track " + ("saved" if cursor.rowcount else "exists"),
spotify_id=item["id"],
status="saved",
object="track",
)
# Publish to queue only if it was added (which means it was not in the db yet)
if cursor.rowcount:
publish_channel.basic_publish(
exchange="",
routing_key=WRITING_QUEUE_NAME,
body=item["id"],
properties=pika.BasicProperties(
delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE
),
)
ch.basic_ack(method.delivery_tag)
consume_channel.basic_qos(prefetch_count=1)
consume_channel.basic_consume(
on_message_callback=callback, queue=READING_QUEUE_NAME
)
print(" [*] Waiting for messages. To exit press CTRL+C")
consume_channel.start_consuming()
# Clean up and close connections
broker.close_connection()
db.close_connection(db_connection, cursor)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
the-stack_106_17664
|
"""
WNT Client
==========
Simple example on how to communicate with the
wirepas network tool services.
.. Copyright:
Copyright 2019 Wirepas Ltd under Apache License, Version 2.0.
See file LICENSE for full license details.
"""
from .handlers import Backend
from .settings import WNTSettings
from wirepas_backend_client.tools import ParserHelper, LoggerHelper
def main():
""" Main entrypoint to connect and talk to a WNT instance """
PARSER = ParserHelper(description="WNT client arguments")
PARSER.add_file_settings()
PARSER.add_wnt()
PARSER.add_fluentd()
SETTINGS = PARSER.settings(settings_class=WNTSettings)
LOGGER = LoggerHelper(
module_name="wm-wnt-viewer", args=SETTINGS, level=SETTINGS.debug_level
).setup()
if SETTINGS.sanity():
Backend(settings=SETTINGS, logger=LOGGER).run(False)
else:
print("There is something wrong with your WNT arguments.")
print(SETTINGS)
if __name__ == "__main__":
main()
|
the-stack_106_17665
|
###############################################################################
#
# Metadata - A class for writing the Excel XLSX Metadata file.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2021, John McNamara, [email protected]
#
from . import xmlwriter
class Metadata(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Metadata file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Metadata, self).__init__()
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the metadata element.
self._write_metadata()
# Write the metadataTypes element.
self._write_metadata_types()
# Write the futureMetadata element.
self._write_future_metadata()
# Write the cellMetadata element.
self._write_cell_metadata()
self._xml_end_tag('metadata')
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_metadata(self):
# Write the <metadata> element.
xmlns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main'
schema = 'http://schemas.microsoft.com/office'
xmlns_xda = schema + '/spreadsheetml/2017/dynamicarray'
attributes = [
('xmlns', xmlns),
('xmlns:xda', xmlns_xda),
]
self._xml_start_tag('metadata', attributes)
def _write_metadata_types(self):
# Write the <metadataTypes> element.
attributes = [('count', 1)]
self._xml_start_tag('metadataTypes', attributes)
# Write the metadataType element.
self._write_metadata_type()
self._xml_end_tag('metadataTypes')
def _write_metadata_type(self):
# Write the <metadataType> element.
attributes = [
('name', 'XLDAPR'),
('minSupportedVersion', 120000),
('copy', 1),
('pasteAll', 1),
('pasteValues', 1),
('merge', 1),
('splitFirst', 1),
('rowColShift', 1),
('clearFormats', 1),
('clearComments', 1),
('assign', 1),
('coerce', 1),
('cellMeta', 1),
]
self._xml_empty_tag('metadataType', attributes)
def _write_future_metadata(self):
# Write the <futureMetadata> element.
attributes = [
('name', 'XLDAPR'),
('count', 1),
]
self._xml_start_tag('futureMetadata', attributes)
self._xml_start_tag('bk')
self._xml_start_tag('extLst')
# Write the ext element.
self._write_ext()
self._xml_end_tag('extLst')
self._xml_end_tag('bk')
self._xml_end_tag('futureMetadata')
def _write_ext(self):
# Write the <ext> element.
attributes = [('uri', '{bdbb8cdc-fa1e-496e-a857-3c3f30c029c3}')]
self._xml_start_tag('ext', attributes)
# Write the xda:dynamicArrayProperties element.
self._write_xda_dynamic_array_properties()
self._xml_end_tag('ext')
def _write_xda_dynamic_array_properties(self):
# Write the <xda:dynamicArrayProperties> element.
attributes = [
('fDynamic', 1),
('fCollapsed', 0),
]
self._xml_empty_tag('xda:dynamicArrayProperties', attributes)
def _write_cell_metadata(self):
# Write the <cellMetadata> element.
attributes = [('count', 1)]
self._xml_start_tag('cellMetadata', attributes)
self._xml_start_tag('bk')
# Write the rc element.
self._write_rc()
self._xml_end_tag('bk')
self._xml_end_tag('cellMetadata')
def _write_rc(self):
# Write the <rc> element.
attributes = [
('t', 1),
('v', 0),
]
self._xml_empty_tag('rc', attributes)
|
the-stack_106_17668
|
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'oro_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
sys.exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
sys.exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
the-stack_106_17669
|
import random
from collections import OrderedDict
from datetime import datetime, timedelta
import django_tables2 as tables
import olympia.core.logger
from django.conf import settings
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.db.models import Count, F, Q
from django.template import loader
from django.urls import reverse
from django.utils import translation
from django.utils.translation import gettext_lazy as _, ngettext
from olympia import amo
from olympia.access import acl
from olympia.activity.models import ActivityLog
from olympia.activity.utils import log_and_notify, send_activity_mail
from olympia.addons.models import Addon, AddonApprovalsCounter, AddonReviewerFlags
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.utils import to_language
from olympia.constants.promoted import RECOMMENDED
from olympia.lib.crypto.signing import sign_file
from olympia.reviewers.models import (
AutoApprovalSummary,
ReviewerScore,
ReviewerSubscription,
ViewUnlistedAllList,
get_flags,
get_flags_for_row,
)
from olympia.reviewers.templatetags.jinja_helpers import format_score
from olympia.users.models import UserProfile
from olympia.users.utils import get_task_user
from olympia.versions.models import VersionReviewerFlags
import jinja2
log = olympia.core.logger.getLogger('z.mailer')
class ItemStateTable(object):
def increment_item(self):
self.item_number += 1
def set_page(self, page):
self.item_number = page.start_index()
def safe_substitute(string, *args):
return string % tuple(jinja2.escape(arg) for arg in args)
class ReviewerQueueTable(tables.Table, ItemStateTable):
addon_name = tables.Column(verbose_name=_('Add-on'))
addon_type_id = tables.Column(verbose_name=_('Type'))
waiting_time_min = tables.Column(verbose_name=_('Waiting Time'))
flags = tables.Column(verbose_name=_('Flags'), orderable=False)
class Meta:
orderable = True
@classmethod
def get_queryset(cls, admin_reviewer=False):
return cls.Meta.model.objects.all()
def render_addon_name(self, record):
url = reverse('reviewers.review', args=[record.addon_slug])
self.increment_item()
return '<a href="%s">%s <em>%s</em></a>' % (
url,
jinja2.escape(record.addon_name),
jinja2.escape(record.latest_version),
)
def render_addon_type_id(self, record):
return amo.ADDON_TYPE[record.addon_type_id]
def render_flags(self, record):
if not hasattr(record, 'flags'):
record.flags = get_flags_for_row(record)
return ''.join(
'<div class="app-icon ed-sprite-%s" title="%s"></div>' % flag
for flag in record.flags
)
@classmethod
def translate_sort_cols(cls, colname):
legacy_sorts = {
'name': 'addon_name',
'age': 'waiting_time_min',
'type': 'addon_type_id',
}
return legacy_sorts.get(colname, colname)
def render_waiting_time_min(self, record):
if record.waiting_time_min == 0:
r = _('moments ago')
elif record.waiting_time_hours == 0:
# L10n: first argument is number of minutes
r = ngettext('{0} minute', '{0} minutes', record.waiting_time_min).format(
record.waiting_time_min
)
elif record.waiting_time_days == 0:
# L10n: first argument is number of hours
r = ngettext('{0} hour', '{0} hours', record.waiting_time_hours).format(
record.waiting_time_hours
)
else:
# L10n: first argument is number of days
r = ngettext('{0} day', '{0} days', record.waiting_time_days).format(
record.waiting_time_days
)
return jinja2.escape(r)
@classmethod
def default_order_by(cls):
return '-waiting_time_min'
class ViewUnlistedAllListTable(tables.Table, ItemStateTable):
id = tables.Column(verbose_name=_('ID'))
addon_name = tables.Column(verbose_name=_('Add-on'))
guid = tables.Column(verbose_name=_('GUID'))
authors = tables.Column(verbose_name=_('Authors'), orderable=False)
@classmethod
def get_queryset(cls, admin_reviewer=False):
return ViewUnlistedAllList.objects.all()
def render_addon_name(self, record):
url = reverse(
'reviewers.review',
args=[
'unlisted',
record.addon_slug if record.addon_slug is not None else record.id,
],
)
self.increment_item()
return safe_substitute('<a href="%s">%s</a>', url, record.addon_name)
def render_guid(self, record):
return safe_substitute('%s', record.guid)
def render_authors(self, record):
authors = record.authors
if not len(authors):
return ''
more = ' '.join(safe_substitute('%s', uname) for (_, uname) in authors)
author_links = ' '.join(
safe_substitute(
'<a href="%s">%s</a>', UserProfile.create_user_url(id_), uname
)
for (id_, uname) in authors[0:3]
)
return '<span title="%s">%s%s</span>' % (
more,
author_links,
' ...' if len(authors) > 3 else '',
)
@classmethod
def default_order_by(cls):
return '-id'
def view_table_factory(viewqueue):
class ViewQueueTable(ReviewerQueueTable):
@classmethod
def get_queryset(cls, admin_reviewer=False):
return viewqueue.objects.all()
return ViewQueueTable
class ModernAddonQueueTable(ReviewerQueueTable):
addon_name = tables.Column(verbose_name=_('Add-on'), accessor='name')
# Override empty_values for flags so that they can be displayed even if the
# model does not have a flags attribute.
flags = tables.Column(verbose_name=_('Flags'), empty_values=(), orderable=False)
last_human_review = tables.DateTimeColumn(
verbose_name=_('Last Review'),
accessor='addonapprovalscounter__last_human_review',
)
code_weight = tables.Column(
verbose_name=_('Code Weight'),
accessor='_current_version__autoapprovalsummary__code_weight',
)
metadata_weight = tables.Column(
verbose_name=_('Metadata Weight'),
accessor='_current_version__autoapprovalsummary__metadata_weight',
)
weight = tables.Column(
verbose_name=_('Total Weight'),
accessor='_current_version__autoapprovalsummary__weight',
)
score = tables.Column(
verbose_name=_('Maliciousness Score'),
accessor='_current_version__autoapprovalsummary__score',
)
class Meta(ReviewerQueueTable.Meta):
fields = (
'addon_name',
'flags',
'last_human_review',
'code_weight',
'metadata_weight',
'weight',
'score',
)
# Exclude base fields ReviewerQueueTable has that we don't want.
exclude = (
'addon_type_id',
'waiting_time_min',
)
orderable = False
def render_flags(self, record):
if not hasattr(record, 'flags'):
record.flags = get_flags(record, record.current_version)
return super(ModernAddonQueueTable, self).render_flags(record)
def _get_addon_name_url(self, record):
return reverse('reviewers.review', args=[record.slug])
def render_addon_name(self, record):
url = self._get_addon_name_url(record)
return '<a href="%s">%s <em>%s</em></a>' % (
url,
jinja2.escape(record.name),
jinja2.escape(record.current_version),
)
def render_last_human_review(self, value):
return naturaltime(value) if value else ''
def render_weight(self, *, record, value):
return '<span title="%s">%d</span>' % (
'\n'.join(
record.current_version.autoapprovalsummary.get_pretty_weight_info()
),
value,
)
def render_score(self, value):
return format_score(value)
render_last_content_review = render_last_human_review
class PendingRejectionTable(ModernAddonQueueTable):
deadline = tables.Column(
verbose_name=_('Pending Rejection Deadline'),
accessor='_current_version__reviewerflags__pending_rejection',
)
class Meta(ModernAddonQueueTable.Meta):
fields = (
'addon_name',
'flags',
'last_human_review',
'deadline',
'code_weight',
'metadata_weight',
'weight',
'score',
)
@classmethod
def get_queryset(cls, admin_reviewer=False):
return Addon.objects.get_pending_rejection_queue(admin_reviewer=admin_reviewer)
def render_deadline(self, value):
return naturaltime(value) if value else ''
class AutoApprovedTable(ModernAddonQueueTable):
@classmethod
def get_queryset(cls, admin_reviewer=False):
return Addon.objects.get_auto_approved_queue(admin_reviewer=admin_reviewer)
class ContentReviewTable(AutoApprovedTable):
last_updated = tables.DateTimeColumn(verbose_name=_('Last Updated'))
class Meta(ReviewerQueueTable.Meta):
fields = ('addon_name', 'flags', 'last_updated')
# Exclude base fields ReviewerQueueTable has that we don't want.
exclude = (
'addon_type_id',
'last_human_review',
'waiting_time_min',
'code_weight',
'metadata_weight',
'weight',
)
orderable = False
@classmethod
def get_queryset(cls, admin_reviewer=False):
return Addon.objects.get_content_review_queue(admin_reviewer=admin_reviewer)
def render_last_updated(self, value):
return naturaltime(value) if value else ''
def _get_addon_name_url(self, record):
return reverse('reviewers.review', args=['content', record.slug])
class ScannersReviewTable(AutoApprovedTable):
listed_text = _('Listed versions needing human review ({0})')
unlisted_text = _('Unlisted versions needing human review ({0})')
@classmethod
def get_queryset(cls, admin_reviewer=False):
return Addon.objects.get_scanners_queue(admin_reviewer=admin_reviewer).annotate(
unlisted_versions_that_need_human_review=Count(
'versions',
filter=Q(
versions__needs_human_review=True,
versions__channel=amo.RELEASE_CHANNEL_UNLISTED,
),
),
listed_versions_that_need_human_review=Count(
'versions',
filter=Q(
versions__needs_human_review=True,
versions__channel=amo.RELEASE_CHANNEL_LISTED,
),
),
)
def render_addon_name(self, record):
rval = [jinja2.escape(record.name)]
if record.listed_versions_that_need_human_review:
url = reverse('reviewers.review', args=[record.slug])
rval.append(
'<a href="%s">%s</a>'
% (
url,
self.listed_text.format(
record.listed_versions_that_need_human_review
),
)
)
if record.unlisted_versions_that_need_human_review:
url = reverse('reviewers.review', args=['unlisted', record.slug])
rval.append(
'<a href="%s">%s</a>'
% (
url,
self.unlisted_text.format(
record.unlisted_versions_that_need_human_review
),
)
)
return ''.join(rval)
class MadReviewTable(ScannersReviewTable):
listed_text = _('Listed version')
unlisted_text = _('Unlisted versions ({0})')
@classmethod
def get_queryset(cls, admin_reviewer=False):
return Addon.objects.get_mad_queue(admin_reviewer=admin_reviewer).annotate(
unlisted_versions_that_need_human_review=Count(
'versions',
filter=Q(
versions__reviewerflags__needs_human_review_by_mad=True,
versions__channel=amo.RELEASE_CHANNEL_UNLISTED,
),
),
listed_versions_that_need_human_review=F(
'_current_version__reviewerflags__needs_human_review_by_mad'
),
)
class ReviewHelper(object):
"""
A class that builds enough to render the form back to the user and
process off to the correct handler.
"""
def __init__(self, request=None, addon=None, version=None, content_review=False):
self.handler = None
self.required = {}
self.addon = addon
self.version = version
self.content_review = content_review
self.set_review_handler(request)
self.actions = self.get_actions(request)
@property
def redirect_url(self):
return self.handler.redirect_url
def set_data(self, data):
self.handler.set_data(data)
def set_review_handler(self, request):
"""Set the handler property."""
if self.version and self.version.channel == amo.RELEASE_CHANNEL_UNLISTED:
self.handler = ReviewUnlisted(
request,
self.addon,
self.version,
'unlisted',
content_review=self.content_review,
)
elif self.addon.status == amo.STATUS_NOMINATED:
self.handler = ReviewAddon(
request,
self.addon,
self.version,
'nominated',
content_review=self.content_review,
)
else:
self.handler = ReviewFiles(
request,
self.addon,
self.version,
'pending',
content_review=self.content_review,
)
def get_actions(self, request):
actions = OrderedDict()
if request is None:
# If request is not set, it means we are just (ab)using the
# ReviewHelper for its `handler` attribute and we don't care about
# the actions.
return actions
# 2 kind of checks are made for the review page.
# - Base permission checks to access the review page itself, done in
# the review() view
# - A more specific check for each action, done below, restricting
# their availability while not affecting whether the user can see
# the review page or not.
version_is_unlisted = (
self.version and self.version.channel == amo.RELEASE_CHANNEL_UNLISTED
)
promoted_group = self.addon.promoted_group(currently_approved=False)
# Default permissions / admin needed values if it's just a regular
# code review, nothing fancy.
permission = amo.permissions.ADDONS_REVIEW
permission_post_review = amo.permissions.ADDONS_REVIEW
is_admin_needed = (
self.addon.needs_admin_content_review or self.addon.needs_admin_code_review
)
is_admin_needed_post_review = is_admin_needed
# More complex/specific cases.
if promoted_group == RECOMMENDED:
permission = amo.permissions.ADDONS_RECOMMENDED_REVIEW
permission_post_review = permission
elif version_is_unlisted:
is_admin_needed = self.addon.needs_admin_code_review
permission = amo.permissions.ADDONS_REVIEW_UNLISTED
permission_post_review = permission
elif promoted_group.admin_review:
is_admin_needed = is_admin_needed_post_review = True
elif self.content_review:
is_admin_needed = self.addon.needs_admin_content_review
permission = amo.permissions.ADDONS_CONTENT_REVIEW
elif self.addon.type == amo.ADDON_STATICTHEME:
is_admin_needed = self.addon.needs_admin_theme_review
permission = amo.permissions.STATIC_THEMES_REVIEW
permission_post_review = permission
# In addition, if the latest (or current for post-review) version is
# pending rejection, an admin is needed.
if self.version and self.version.pending_rejection:
is_admin_needed = True
if self.addon.current_version and self.addon.current_version.pending_rejection:
is_admin_needed_post_review = True
# Whatever permission values we set, we override if an admin is needed.
if is_admin_needed:
permission = amo.permissions.REVIEWS_ADMIN
if is_admin_needed_post_review:
permission_post_review = amo.permissions.REVIEWS_ADMIN
# Is the current user a reviewer for this kind of add-on ?
is_reviewer = acl.is_reviewer(request, self.addon)
# Is the current user an appropriate reviewer, not only for this kind
# of add-on, but also for the state the add-on is in ? (Allows more
# impactful actions).
is_appropriate_reviewer = acl.action_allowed_user(request.user, permission)
is_appropriate_reviewer_post_review = acl.action_allowed_user(
request.user, permission_post_review
)
addon_is_complete = self.addon.status not in (
amo.STATUS_NULL,
amo.STATUS_DELETED,
)
addon_is_incomplete_and_version_is_unlisted = (
self.addon.status == amo.STATUS_NULL and version_is_unlisted
)
addon_is_reviewable = (
addon_is_complete or addon_is_incomplete_and_version_is_unlisted
)
version_is_unreviewed = self.version and self.version.is_unreviewed
addon_is_valid = self.addon.is_public() or self.addon.is_unreviewed()
addon_is_valid_and_version_is_listed = (
addon_is_valid
and self.version
and self.version.channel == amo.RELEASE_CHANNEL_LISTED
)
current_version_is_listed_and_auto_approved = (
self.version
and self.version.channel == amo.RELEASE_CHANNEL_LISTED
and self.addon.current_version
and self.addon.current_version.was_auto_approved
)
version_is_blocked = self.version and self.version.is_blocked
# Special logic for availability of reject multiple action:
if version_is_unlisted:
can_reject_multiple = is_appropriate_reviewer
elif (
self.content_review
or promoted_group.pre_review
or self.addon.type == amo.ADDON_STATICTHEME
):
can_reject_multiple = (
addon_is_valid_and_version_is_listed and is_appropriate_reviewer
)
else:
# When doing a code review, this action is also available to
# users with Addons:PostReview even if the current version hasn't
# been auto-approved, provided that the add-on isn't marked as
# needing admin review.
can_reject_multiple = addon_is_valid_and_version_is_listed and (
is_appropriate_reviewer or is_appropriate_reviewer_post_review
)
# Definitions for all actions.
actions['public'] = {
'method': self.handler.approve_latest_version,
'minimal': False,
'details': _(
'This will approve, sign, and publish this '
'version. The comments will be sent to the '
'developer.'
),
'label': _('Approve'),
'available': (
not self.content_review
and addon_is_reviewable
and version_is_unreviewed
and is_appropriate_reviewer
and not version_is_blocked
),
}
actions['reject'] = {
'method': self.handler.reject_latest_version,
'label': _('Reject'),
'details': _(
'This will reject this version and remove it '
'from the queue. The comments will be sent '
'to the developer.'
),
'minimal': False,
'available': (
not self.content_review
# We specifically don't let the individual reject action be
# available for unlisted review. `reject_latest_version` isn't
# currently implemented for unlisted.
and addon_is_valid_and_version_is_listed
and version_is_unreviewed
and is_appropriate_reviewer
),
}
actions['approve_content'] = {
'method': self.handler.approve_content,
'label': _('Approve Content'),
'details': _(
'This records your approbation of the '
'content of the latest public version, '
'without notifying the developer.'
),
'minimal': False,
'comments': False,
'available': (
self.content_review
and addon_is_valid_and_version_is_listed
and is_appropriate_reviewer
),
}
actions['confirm_auto_approved'] = {
'method': self.handler.confirm_auto_approved,
'label': _('Confirm Approval'),
'details': _(
'The latest public version of this add-on was '
'automatically approved. This records your '
'confirmation of the approval of that version, '
'without notifying the developer.'
),
'minimal': True,
'comments': False,
'available': (
not self.content_review
and addon_is_valid_and_version_is_listed
and current_version_is_listed_and_auto_approved
and is_appropriate_reviewer_post_review
),
}
actions['reject_multiple_versions'] = {
'method': self.handler.reject_multiple_versions,
'label': _('Reject Multiple Versions'),
'minimal': True,
'delayable': not version_is_unlisted,
'versions': True,
'details': _(
'This will reject the selected versions. '
'The comments will be sent to the developer.'
),
'available': (can_reject_multiple),
}
actions['block_multiple_versions'] = {
'method': self.handler.block_multiple_versions,
'label': _('Block Multiple Versions'),
'minimal': True,
'versions': True,
'comments': False,
'details': _(
'This will disable the selected approved '
'versions silently, and open up the block creation '
'admin page.'
),
'available': (
self.addon.type != amo.ADDON_STATICTHEME
and version_is_unlisted
and is_appropriate_reviewer
),
}
actions['confirm_multiple_versions'] = {
'method': self.handler.confirm_multiple_versions,
'label': _('Confirm Multiple Versions'),
'minimal': True,
'versions': True,
'details': _(
'This will confirm approval of the selected '
'versions without notifying the developer.'
),
'comments': False,
'available': (
self.addon.type != amo.ADDON_STATICTHEME
and version_is_unlisted
and is_appropriate_reviewer
),
}
actions['reply'] = {
'method': self.handler.reviewer_reply,
'label': _('Reviewer reply'),
'details': _(
'This will send a message to the developer. '
'You will be notified when they reply.'
),
'minimal': True,
'available': (
self.version is not None
and is_reviewer
and (not promoted_group.admin_review or is_appropriate_reviewer)
),
}
actions['super'] = {
'method': self.handler.process_super_review,
'label': _('Request super-review'),
'details': _(
'If you have concerns about this add-on that '
'an admin reviewer should look into, enter '
'your comments in the area below. They will '
'not be sent to the developer.'
),
'minimal': True,
'available': (self.version is not None and is_reviewer),
}
actions['comment'] = {
'method': self.handler.process_comment,
'label': _('Comment'),
'details': _(
'Make a comment on this version. The developer '
"won't be able to see this."
),
'minimal': True,
'available': (is_reviewer),
}
return OrderedDict(
((key, action) for key, action in actions.items() if action['available'])
)
def process(self):
action = self.handler.data.get('action', '')
if not action:
raise NotImplementedError
return self.actions[action]['method']()
class ReviewBase(object):
def __init__(
self, request, addon, version, review_type, content_review=False, user=None
):
self.request = request
if request:
self.user = user or self.request.user
self.human_review = True
else:
# Use the addons team go-to user "Mozilla" for the automatic
# validations.
self.user = user or get_task_user()
self.human_review = False
self.addon = addon
self.version = version
self.review_type = (
'theme_%s' if addon.type == amo.ADDON_STATICTHEME else 'extension_%s'
) % review_type
self.files = self.version.unreviewed_files if self.version else []
self.content_review = content_review
self.redirect_url = None
def set_addon(self, **kw):
"""Alter addon, set reviewed timestamp on version being reviewed."""
self.addon.update(**kw)
self.version.update(reviewed=datetime.now())
def set_data(self, data):
self.data = data
def set_files(self, status, files, hide_disabled_file=False):
"""Change the files to be the new status."""
for file in files:
file.datestatuschanged = datetime.now()
file.reviewed = datetime.now()
if hide_disabled_file:
file.hide_disabled_file()
file.status = status
file.save()
def set_promoted(self):
group = self.addon.promoted_group(currently_approved=False)
if group and group.pre_review:
# These addons shouldn't be be attempted for auto approval anyway,
# but double check that the cron job isn't trying to approve it.
assert not self.user.id == settings.TASK_USER_ID
self.addon.promotedaddon.approve_for_version(self.version)
def clear_all_needs_human_review_flags_in_channel(self):
"""Clear needs_human_review flags on all versions in the same channel.
To be called when approving a listed version: For listed, the version
reviewers are approving is always the latest listed one, and then users
are supposed to automatically get the update to that version, so we
don't need to care about older ones anymore.
"""
# Do a mass UPDATE.
self.addon.versions.filter(
needs_human_review=True, channel=self.version.channel
).update(needs_human_review=False)
# Another one for the needs_human_review_by_mad flag.
VersionReviewerFlags.objects.filter(
version__addon=self.addon,
version__channel=self.version.channel,
).update(needs_human_review_by_mad=False)
# Also reset it on self.version in case this instance is saved later.
self.version.needs_human_review = False
def clear_specific_needs_human_review_flags(self, version):
"""Clear needs_human_review flags on a specific version."""
if version.needs_human_review:
version.update(needs_human_review=False)
if version.needs_human_review_by_mad:
version.reviewerflags.update(needs_human_review_by_mad=False)
def log_action(self, action, version=None, files=None, timestamp=None):
details = {
'comments': self.data.get('comments', ''),
'reviewtype': self.review_type.split('_')[1],
}
if files is None and self.files:
files = self.files
if files is not None:
details['files'] = [f.id for f in files]
if version is None and self.version:
version = self.version
if version is not None:
details['version'] = version.version
args = (self.addon, version)
else:
args = (self.addon,)
if timestamp is None:
timestamp = datetime.now()
kwargs = {'user': self.user, 'created': timestamp, 'details': details}
self.log_entry = ActivityLog.create(action, *args, **kwargs)
def notify_email(
self, template, subject, perm_setting='reviewer_reviewed', version=None
):
"""Notify the authors that their addon has been reviewed."""
if version is None:
version = self.version
data = self.data.copy() if self.data else {}
data.update(self.get_context_data())
data['tested'] = ''
os, app = data.get('operating_systems'), data.get('applications')
if os and app:
data['tested'] = 'Tested on %s with %s' % (os, app)
elif os and not app:
data['tested'] = 'Tested on %s' % os
elif not os and app:
data['tested'] = 'Tested with %s' % app
subject = subject % (data['name'], self.version.version if self.version else '')
unique_id = (
self.log_entry.id
if hasattr(self, 'log_entry')
else random.randrange(100000)
)
message = loader.get_template('reviewers/emails/%s.ltxt' % template).render(
data
)
send_activity_mail(
subject,
message,
version,
self.addon.authors.all(),
settings.ADDONS_EMAIL,
unique_id,
perm_setting=perm_setting,
)
def get_context_data(self):
addon_url = self.addon.get_url_path(add_prefix=False)
# We need to display the name in some language that is relevant to the
# recipient(s) instead of using the reviewer's. addon.default_locale
# should work.
if self.addon.name and self.addon.name.locale != self.addon.default_locale:
lang = to_language(self.addon.default_locale)
with translation.override(lang):
# Force a reload of translations for this addon.
addon = Addon.unfiltered.get(pk=self.addon.pk)
else:
addon = self.addon
review_url_kw = {'addon_id': self.addon.pk}
if self.version and self.version.channel == amo.RELEASE_CHANNEL_UNLISTED:
review_url_kw['channel'] = 'unlisted'
dev_ver_url = reverse('devhub.addons.versions', args=[self.addon.id])
else:
dev_ver_url = self.addon.get_dev_url('versions')
return {
'name': addon.name,
'number': self.version.version if self.version else '',
'reviewer': self.user.reviewer_name or self.user.name,
'addon_url': absolutify(addon_url),
'dev_versions_url': absolutify(dev_ver_url),
'review_url': absolutify(
reverse('reviewers.review', kwargs=review_url_kw, add_prefix=False)
),
'comments': self.data.get('comments'),
'SITE_URL': settings.SITE_URL,
}
def reviewer_reply(self):
# Default to reviewer reply action.
action = amo.LOG.REVIEWER_REPLY_VERSION
if self.version:
if (
self.version.channel == amo.RELEASE_CHANNEL_UNLISTED
and not self.version.reviewed
):
self.version.update(reviewed=datetime.now())
log.info(
'Sending reviewer reply for %s to authors and other'
'recipients' % self.addon
)
log_and_notify(
action,
self.data['comments'],
self.user,
self.version,
perm_setting='individual_contact',
detail_kwargs={'reviewtype': self.review_type.split('_')[1]},
)
def sign_files(self):
assert not (self.version and self.version.is_blocked)
for file_ in self.files:
if file_.is_experiment:
ActivityLog.create(amo.LOG.EXPERIMENT_SIGNED, file_, user=self.user)
sign_file(file_)
def process_comment(self):
self.log_action(amo.LOG.COMMENT_VERSION)
update_reviewed = (
self.version
and self.version.channel == amo.RELEASE_CHANNEL_UNLISTED
and not self.version.reviewed
)
if update_reviewed:
self.version.update(reviewed=datetime.now())
def approve_latest_version(self):
"""Approve the add-on latest version (potentially setting the add-on to
approved if it was awaiting its first review)."""
# Safeguard to force implementation for unlisted add-ons to completely
# override this method.
assert self.version.channel == amo.RELEASE_CHANNEL_LISTED
# Safeguard to make sure this action is not used for content review
# (it should use confirm_auto_approved instead).
assert not self.content_review
# Sign addon.
self.sign_files()
# Hold onto the status before we change it.
status = self.addon.status
# Save files first, because set_addon checks to make sure there
# is at least one public file or it won't make the addon public.
self.set_files(amo.STATUS_APPROVED, self.files)
self.set_promoted()
if self.set_addon_status:
self.set_addon(status=amo.STATUS_APPROVED)
if self.human_review:
# No need for a human review anymore in this channel.
self.clear_all_needs_human_review_flags_in_channel()
# Clear pending rejection since we approved that version.
VersionReviewerFlags.objects.filter(
version=self.version,
).update(pending_rejection=None)
# An approval took place so we can reset this.
AddonReviewerFlags.objects.update_or_create(
addon=self.addon,
defaults={'auto_approval_disabled_until_next_approval': False},
)
# The counter can be incremented.
AddonApprovalsCounter.increment_for_addon(addon=self.addon)
# Assign reviewer incentive scores.
ReviewerScore.award_points(
self.user, self.addon, status, version=self.version
)
else:
# Automatic approval, reset the counter.
AddonApprovalsCounter.reset_for_addon(addon=self.addon)
self.log_action(amo.LOG.APPROVE_VERSION)
template = '%s_to_approved' % self.review_type
if self.review_type in ['extension_pending', 'theme_pending']:
subject = 'Mozilla Add-ons: %s %s Updated'
else:
subject = 'Mozilla Add-ons: %s %s Approved'
self.notify_email(template, subject)
self.log_public_message()
log.info('Sending email for %s' % (self.addon))
def reject_latest_version(self):
"""Reject the add-on latest version (potentially setting the add-on
back to incomplete if it was awaiting its first review)."""
# Safeguard to force implementation for unlisted add-ons to completely
# override this method.
assert self.version.channel == amo.RELEASE_CHANNEL_LISTED
# Safeguard to make sure this action is not used for content review
# (it should use reject_multiple_versions instead).
assert not self.content_review
# Hold onto the status before we change it.
status = self.addon.status
if self.set_addon_status:
self.set_addon(status=amo.STATUS_NULL)
self.set_files(amo.STATUS_DISABLED, self.files, hide_disabled_file=True)
if self.human_review:
# Clear needs human review flags, but only on the latest version:
# it's the only version we can be certain that the reviewer looked
# at.
self.clear_specific_needs_human_review_flags(self.version)
# Assign reviewer incentive scores.
ReviewerScore.award_points(
self.user, self.addon, status, version=self.version
)
self.log_action(amo.LOG.REJECT_VERSION)
template = '%s_to_rejected' % self.review_type
subject = "Mozilla Add-ons: %s %s didn't pass review"
self.notify_email(template, subject)
self.log_sandbox_message()
log.info('Sending email for %s' % (self.addon))
def process_super_review(self):
"""Mark an add-on as needing admin code, content, or theme review."""
addon_type = self.addon.type
if addon_type == amo.ADDON_STATICTHEME:
needs_admin_property = 'needs_admin_theme_review'
log_action_type = amo.LOG.REQUEST_ADMIN_REVIEW_THEME
elif self.content_review:
needs_admin_property = 'needs_admin_content_review'
log_action_type = amo.LOG.REQUEST_ADMIN_REVIEW_CONTENT
else:
needs_admin_property = 'needs_admin_code_review'
log_action_type = amo.LOG.REQUEST_ADMIN_REVIEW_CODE
AddonReviewerFlags.objects.update_or_create(
addon=self.addon, defaults={needs_admin_property: True}
)
self.log_action(log_action_type)
log.info('%s for %s' % (log_action_type.short, self.addon))
def approve_content(self):
"""Approve content of an add-on."""
channel = self.version.channel
version = self.addon.current_version
# Content review only action.
assert self.content_review
# Doesn't make sense for unlisted versions.
assert channel == amo.RELEASE_CHANNEL_LISTED
# Like confirm auto approval, the approve content action should not
# show the comment box, so override the text in case the reviewer
# switched between actions and accidently submitted some comments from
# another action.
self.data['comments'] = ''
# When doing a content review, don't increment the approvals counter,
# just record the date of the content approval and log it.
AddonApprovalsCounter.approve_content_for_addon(addon=self.addon)
self.log_action(amo.LOG.APPROVE_CONTENT, version=version)
# Assign reviewer incentive scores.
if self.human_review:
is_post_review = channel == amo.RELEASE_CHANNEL_LISTED
ReviewerScore.award_points(
self.user,
self.addon,
self.addon.status,
version=version,
post_review=is_post_review,
content_review=self.content_review,
)
def confirm_auto_approved(self):
"""Confirm an auto-approval decision."""
channel = self.version.channel
if channel == amo.RELEASE_CHANNEL_LISTED:
# When doing an approval in listed channel, the version we care
# about is always current_version and *not* self.version.
# This allows reviewers to confirm approval of a public add-on even
# when their latest version is disabled.
version = self.addon.current_version
else:
# For unlisted, we just use self.version.
version = self.version
# The confirm auto-approval action should not show the comment box,
# so override the text in case the reviewer switched between actions
# and accidently submitted some comments from another action.
self.data['comments'] = ''
self.log_action(amo.LOG.CONFIRM_AUTO_APPROVED, version=version)
if self.human_review:
# Mark the approval as confirmed (handle DoesNotExist, it may have
# been auto-approved before we unified workflow for unlisted and
# listed).
try:
version.autoapprovalsummary.update(confirmed=True)
except AutoApprovalSummary.DoesNotExist:
pass
if channel == amo.RELEASE_CHANNEL_LISTED:
# Clear needs human review flags on past versions in channel.
self.clear_all_needs_human_review_flags_in_channel()
AddonApprovalsCounter.increment_for_addon(addon=self.addon)
else:
# For unlisted versions, only drop the needs_human_review flag
# on the latest version.
self.clear_specific_needs_human_review_flags(self.version)
# Clear the "pending_rejection" flag for all versions (Note that
# the action should only be accessible to admins if the current
# version is pending rejection).
VersionReviewerFlags.objects.filter(
version__addon=self.addon,
version__channel=channel,
).update(pending_rejection=None)
# Assign reviewer incentive scores.
is_post_review = channel == amo.RELEASE_CHANNEL_LISTED
ReviewerScore.award_points(
self.user,
self.addon,
self.addon.status,
version=version,
post_review=is_post_review,
content_review=self.content_review,
)
def reject_multiple_versions(self):
"""Reject a list of versions.
Note: this is used in blocklist.utils.disable_addon_for_block for both
listed and unlisted versions (human_review=False)."""
# self.version and self.files won't point to the versions we want to
# modify in this action, so set them to None before finding the right
# versions.
status = self.addon.status
latest_version = self.version
self.version = None
self.files = None
now = datetime.now()
if self.data.get('delayed_rejection'):
pending_rejection_deadline = now + timedelta(
days=int(self.data['delayed_rejection_days'])
)
else:
pending_rejection_deadline = None
if pending_rejection_deadline:
action_id = (
amo.LOG.REJECT_CONTENT_DELAYED
if self.content_review
else amo.LOG.REJECT_VERSION_DELAYED
)
log.info(
'Marking %s versions %s for delayed rejection'
% (self.addon, ', '.join(str(v.pk) for v in self.data['versions']))
)
else:
action_id = (
amo.LOG.REJECT_CONTENT
if self.content_review
else amo.LOG.REJECT_VERSION
)
log.info(
'Making %s versions %s disabled'
% (self.addon, ', '.join(str(v.pk) for v in self.data['versions']))
)
for version in self.data['versions']:
files = version.files.all()
if not pending_rejection_deadline:
self.set_files(amo.STATUS_DISABLED, files, hide_disabled_file=True)
self.log_action(action_id, version=version, files=files, timestamp=now)
if self.human_review:
# Clear needs human review flags on rejected versions, we
# consider that the reviewer looked at them before rejecting.
self.clear_specific_needs_human_review_flags(version)
# (Re)set pending_rejection. Could be reset to None if doing an
# immediate rejection.
VersionReviewerFlags.objects.update_or_create(
version=version,
defaults={'pending_rejection': pending_rejection_deadline},
)
if pending_rejection_deadline:
# A delayed rejection implies the next version should be manually
# reviewed and the developers should be notified again once the
# deadline is close.
AddonReviewerFlags.objects.update_or_create(
addon=self.addon,
defaults={
'notified_about_expiring_delayed_rejections': False,
'auto_approval_disabled_until_next_approval': True,
},
)
# The reviewer should be automatically subscribed to any new
# versions posted to the same channel.
ReviewerSubscription.objects.get_or_create(
user=self.user, addon=self.addon, channel=latest_version.channel
)
else:
# An immediate one might require the add-on status to change.
self.addon.update_status()
# Assign reviewer incentive scores and send email, if it's an human
# reviewer: if it's not, it's coming from some automation where we
# don't need to notify the developer (we should already have done that
# before) and don't need to award points.
if self.human_review:
channel = latest_version.channel
# Send the email to the developer. We need to pass the latest
# version of the add-on instead of one of the versions we rejected,
# it will be used to generate a token allowing the developer to
# reply, and that only works with the latest version.
self.data['version_numbers'] = ', '.join(
str(v.version) for v in self.data['versions']
)
if pending_rejection_deadline:
template = 'reject_multiple_versions_with_delay'
subject = 'Mozilla Add-ons: %s%s will be disabled on addons.mozilla.org'
elif (
self.addon.status != amo.STATUS_APPROVED
and channel == amo.RELEASE_CHANNEL_LISTED
):
template = 'reject_multiple_versions_disabled_addon'
subject = (
'Mozilla Add-ons: %s%s has been disabled on addons.mozilla.org'
)
else:
template = 'reject_multiple_versions'
subject = 'Mozilla Add-ons: Versions disabled for %s%s'
log.info('Sending email for %s' % (self.addon))
self.notify_email(template, subject, version=latest_version)
ReviewerScore.award_points(
self.user,
self.addon,
status,
version=latest_version,
post_review=True,
content_review=self.content_review,
)
def notify_about_auto_approval_delay(self, version):
"""Notify developers of the add-on when their version has not been
auto-approved for a while."""
template = 'held_for_review'
subject = 'Mozilla Add-ons: %s %s is pending review'
AddonReviewerFlags.objects.update_or_create(
addon=self.addon, defaults={'notified_about_auto_approval_delay': True}
)
self.data['version'] = version
self.notify_email(template, subject, version=version)
def confirm_multiple_versions(self):
raise NotImplementedError # only implemented for unlisted below.
def block_multiple_versions(self):
raise NotImplementedError # only implemented for unlisted below.
class ReviewAddon(ReviewBase):
set_addon_status = True
def log_public_message(self):
log.info('Making %s public' % (self.addon))
def log_sandbox_message(self):
log.info('Making %s disabled' % (self.addon))
class ReviewFiles(ReviewBase):
set_addon_status = False
def log_public_message(self):
log.info(
'Making %s files %s public'
% (self.addon, ', '.join([f.filename for f in self.files]))
)
def log_sandbox_message(self):
log.info(
'Making %s files %s disabled'
% (self.addon, ', '.join([f.filename for f in self.files]))
)
class ReviewUnlisted(ReviewBase):
def approve_latest_version(self):
"""Set an unlisted addon version files to public."""
assert self.version.channel == amo.RELEASE_CHANNEL_UNLISTED
# Sign addon.
self.sign_files()
for file_ in self.files:
ActivityLog.create(amo.LOG.UNLISTED_SIGNED, file_, user=self.user)
self.set_files(amo.STATUS_APPROVED, self.files)
template = 'unlisted_to_reviewed_auto'
subject = 'Mozilla Add-ons: %s %s signed and ready to download'
self.log_action(amo.LOG.APPROVE_VERSION)
if self.human_review:
self.clear_specific_needs_human_review_flags(self.version)
self.notify_email(template, subject, perm_setting=None)
log.info(
'Making %s files %s public'
% (self.addon, ', '.join([f.filename for f in self.files]))
)
log.info('Sending email for %s' % (self.addon))
def block_multiple_versions(self):
min_version = ('0', None)
max_version = ('*', None)
for version in self.data['versions']:
version_str = version.version
if not min_version[1] or version_str < min_version[1]:
min_version = (version, version_str)
if not max_version[1] or version_str > max_version[1]:
max_version = (version, version_str)
params = f'?min={min_version[0].pk}&max={max_version[0].pk}'
self.redirect_url = (
reverse('admin:blocklist_block_addaddon', args=(self.addon.pk,)) + params
)
def confirm_multiple_versions(self):
"""Confirm approval on a list of versions."""
# There shouldn't be any comments for this action.
self.data['comments'] = ''
timestamp = datetime.now()
for version in self.data['versions']:
self.log_action(
amo.LOG.CONFIRM_AUTO_APPROVED, version=version, timestamp=timestamp
)
if self.human_review:
# Mark summary as confirmed if it exists.
try:
version.autoapprovalsummary.update(confirmed=True)
except AutoApprovalSummary.DoesNotExist:
pass
# Clear needs_human_review on rejected versions, we consider
# that the reviewer looked at all versions they are approving.
self.clear_specific_needs_human_review_flags(version)
|
the-stack_106_17670
|
"""The ozw integration."""
import asyncio
from contextlib import suppress
import json
import logging
from openzwavemqtt import OZWManager, OZWOptions
from openzwavemqtt.const import (
EVENT_INSTANCE_EVENT,
EVENT_NODE_ADDED,
EVENT_NODE_CHANGED,
EVENT_NODE_REMOVED,
EVENT_VALUE_ADDED,
EVENT_VALUE_CHANGED,
EVENT_VALUE_REMOVED,
CommandClass,
ValueType,
)
from openzwavemqtt.models.node import OZWNode
from openzwavemqtt.models.value import OZWValue
from openzwavemqtt.util.mqtt_client import MQTTClient
from homeassistant.components import mqtt
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.config_entries import ConfigEntry, ConfigEntryState
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import const
from .const import (
CONF_INTEGRATION_CREATED_ADDON,
CONF_USE_ADDON,
DATA_UNSUBSCRIBE,
DOMAIN,
MANAGER,
NODES_VALUES,
PLATFORMS,
TOPIC_OPENZWAVE,
)
from .discovery import DISCOVERY_SCHEMAS, check_node_schema, check_value_schema
from .entity import (
ZWaveDeviceEntityValues,
create_device_id,
create_device_name,
create_value_id,
)
from .services import ZWaveServices
from .websocket_api import async_register_api
_LOGGER = logging.getLogger(__name__)
DATA_DEVICES = "zwave-mqtt-devices"
DATA_STOP_MQTT_CLIENT = "ozw_stop_mqtt_client"
async def async_setup_entry( # noqa: C901
hass: HomeAssistant, entry: ConfigEntry
) -> bool:
"""Set up ozw from a config entry."""
hass.data.setdefault(DOMAIN, {})
ozw_data = hass.data[DOMAIN][entry.entry_id] = {}
ozw_data[DATA_UNSUBSCRIBE] = []
data_nodes = {}
hass.data[DOMAIN][NODES_VALUES] = data_values = {}
removed_nodes = []
manager_options = {"topic_prefix": f"{TOPIC_OPENZWAVE}/"}
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=DOMAIN)
if entry.data.get(CONF_USE_ADDON):
# Do not use MQTT integration. Use own MQTT client.
# Retrieve discovery info from the OpenZWave add-on.
discovery_info = await hass.components.hassio.async_get_addon_discovery_info(
"core_zwave"
)
if not discovery_info:
_LOGGER.error("Failed to get add-on discovery info")
raise ConfigEntryNotReady
discovery_info_config = discovery_info["config"]
host = discovery_info_config["host"]
port = discovery_info_config["port"]
username = discovery_info_config["username"]
password = discovery_info_config["password"]
mqtt_client = MQTTClient(host, port, username=username, password=password)
manager_options["send_message"] = mqtt_client.send_message
else:
mqtt_entries = hass.config_entries.async_entries("mqtt")
if not mqtt_entries or mqtt_entries[0].state is not ConfigEntryState.LOADED:
_LOGGER.error("MQTT integration is not set up")
return False
mqtt_entry = mqtt_entries[0] # MQTT integration only has one entry.
@callback
def send_message(topic, payload):
if mqtt_entry.state is not ConfigEntryState.LOADED:
_LOGGER.error("MQTT integration is not set up")
return
hass.async_create_task(mqtt.async_publish(hass, topic, json.dumps(payload)))
manager_options["send_message"] = send_message
options = OZWOptions(**manager_options)
manager = OZWManager(options)
hass.data[DOMAIN][MANAGER] = manager
@callback
def async_node_added(node):
# Caution: This is also called on (re)start.
_LOGGER.debug("[NODE ADDED] node_id: %s", node.id)
data_nodes[node.id] = node
if node.id not in data_values:
data_values[node.id] = []
@callback
def async_node_changed(node):
_LOGGER.debug("[NODE CHANGED] node_id: %s", node.id)
data_nodes[node.id] = node
# notify devices about the node change
if node.id not in removed_nodes:
hass.async_create_task(async_handle_node_update(hass, node))
@callback
def async_node_removed(node):
_LOGGER.debug("[NODE REMOVED] node_id: %s", node.id)
data_nodes.pop(node.id)
# node added/removed events also happen on (re)starts of hass/mqtt/ozw
# cleanup device/entity registry if we know this node is permanently deleted
# entities itself are removed by the values logic
if node.id in removed_nodes:
hass.async_create_task(async_handle_remove_node(hass, node))
removed_nodes.remove(node.id)
@callback
def async_instance_event(message):
event = message["event"]
event_data = message["data"]
_LOGGER.debug("[INSTANCE EVENT]: %s - data: %s", event, event_data)
# The actual removal action of a Z-Wave node is reported as instance event
# Only when this event is detected we cleanup the device and entities from hass
# Note: Find a more elegant way of doing this, e.g. a notification of this event from OZW
if event in ("removenode", "removefailednode") and "Node" in event_data:
removed_nodes.append(event_data["Node"])
@callback
def async_value_added(value):
node = value.node
# Clean up node.node_id and node.id use. They are the same.
node_id = value.node.node_id
# Filter out CommandClasses we're definitely not interested in.
if value.command_class in (CommandClass.MANUFACTURER_SPECIFIC,):
return
_LOGGER.debug(
"[VALUE ADDED] node_id: %s - label: %s - value: %s - value_id: %s - CC: %s",
value.node.id,
value.label,
value.value,
value.value_id_key,
value.command_class,
)
node_data_values = data_values[node_id]
# Check if this value should be tracked by an existing entity
value_unique_id = create_value_id(value)
for values in node_data_values:
values.async_check_value(value)
if values.values_id == value_unique_id:
return # this value already has an entity
# Run discovery on it and see if any entities need created
for schema in DISCOVERY_SCHEMAS:
if not check_node_schema(node, schema):
continue
if not check_value_schema(
value, schema[const.DISC_VALUES][const.DISC_PRIMARY]
):
continue
values = ZWaveDeviceEntityValues(hass, options, schema, value)
values.async_setup()
# This is legacy and can be cleaned up since we are in the main thread:
# We create a new list and update the reference here so that
# the list can be safely iterated over in the main thread
data_values[node_id] = node_data_values + [values]
@callback
def async_value_changed(value):
# if an entity belonging to this value needs updating,
# it's handled within the entity logic
_LOGGER.debug(
"[VALUE CHANGED] node_id: %s - label: %s - value: %s - value_id: %s - CC: %s",
value.node.id,
value.label,
value.value,
value.value_id_key,
value.command_class,
)
# Handle a scene activation message
if value.command_class in (
CommandClass.SCENE_ACTIVATION,
CommandClass.CENTRAL_SCENE,
):
async_handle_scene_activated(hass, value)
return
@callback
def async_value_removed(value):
_LOGGER.debug(
"[VALUE REMOVED] node_id: %s - label: %s - value: %s - value_id: %s - CC: %s",
value.node.id,
value.label,
value.value,
value.value_id_key,
value.command_class,
)
# signal all entities using this value for removal
value_unique_id = create_value_id(value)
async_dispatcher_send(hass, const.SIGNAL_DELETE_ENTITY, value_unique_id)
# remove value from our local list
node_data_values = data_values[value.node.id]
node_data_values[:] = [
item for item in node_data_values if item.values_id != value_unique_id
]
# Listen to events for node and value changes
for event, event_callback in (
(EVENT_NODE_ADDED, async_node_added),
(EVENT_NODE_CHANGED, async_node_changed),
(EVENT_NODE_REMOVED, async_node_removed),
(EVENT_VALUE_ADDED, async_value_added),
(EVENT_VALUE_CHANGED, async_value_changed),
(EVENT_VALUE_REMOVED, async_value_removed),
(EVENT_INSTANCE_EVENT, async_instance_event),
):
ozw_data[DATA_UNSUBSCRIBE].append(options.listen(event, event_callback))
# Register Services
services = ZWaveServices(hass, manager)
services.async_register()
# Register WebSocket API
async_register_api(hass)
@callback
def async_receive_message(msg):
manager.receive_message(msg.topic, msg.payload)
async def start_platforms():
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
)
)
if entry.data.get(CONF_USE_ADDON):
mqtt_client_task = asyncio.create_task(mqtt_client.start_client(manager))
async def async_stop_mqtt_client(event=None):
"""Stop the mqtt client.
Do not unsubscribe the manager topic.
"""
mqtt_client_task.cancel()
with suppress(asyncio.CancelledError):
await mqtt_client_task
ozw_data[DATA_UNSUBSCRIBE].append(
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_stop_mqtt_client
)
)
ozw_data[DATA_STOP_MQTT_CLIENT] = async_stop_mqtt_client
else:
ozw_data[DATA_UNSUBSCRIBE].append(
await mqtt.async_subscribe(
hass, f"{manager.options.topic_prefix}#", async_receive_message
)
)
hass.async_create_task(start_platforms())
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
# cleanup platforms
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if not unload_ok:
return False
# unsubscribe all listeners
for unsubscribe_listener in hass.data[DOMAIN][entry.entry_id][DATA_UNSUBSCRIBE]:
unsubscribe_listener()
if entry.data.get(CONF_USE_ADDON):
async_stop_mqtt_client = hass.data[DOMAIN][entry.entry_id][
DATA_STOP_MQTT_CLIENT
]
await async_stop_mqtt_client()
hass.data[DOMAIN].pop(entry.entry_id)
return True
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Remove a config entry."""
if not entry.data.get(CONF_INTEGRATION_CREATED_ADDON):
return
try:
await hass.components.hassio.async_stop_addon("core_zwave")
except HassioAPIError as err:
_LOGGER.error("Failed to stop the OpenZWave add-on: %s", err)
return
try:
await hass.components.hassio.async_uninstall_addon("core_zwave")
except HassioAPIError as err:
_LOGGER.error("Failed to uninstall the OpenZWave add-on: %s", err)
async def async_handle_remove_node(hass: HomeAssistant, node: OZWNode):
"""Handle the removal of a Z-Wave node, removing all traces in device/entity registry."""
dev_registry = await get_dev_reg(hass)
# grab device in device registry attached to this node
dev_id = create_device_id(node)
device = dev_registry.async_get_device({(DOMAIN, dev_id)})
if not device:
return
devices_to_remove = [device.id]
# also grab slave devices (node instances)
for item in dev_registry.devices.values():
if item.via_device_id == device.id:
devices_to_remove.append(item.id)
# remove all devices in registry related to this node
# note: removal of entity registry is handled by core
for dev_id in devices_to_remove:
dev_registry.async_remove_device(dev_id)
async def async_handle_node_update(hass: HomeAssistant, node: OZWNode):
"""
Handle a node updated event from OZW.
Meaning some of the basic info like name/model is updated.
We want these changes to be pushed to the device registry.
"""
dev_registry = await get_dev_reg(hass)
# grab device in device registry attached to this node
dev_id = create_device_id(node)
device = dev_registry.async_get_device({(DOMAIN, dev_id)})
if not device:
return
# update device in device registry with (updated) info
for item in dev_registry.devices.values():
if device.id not in (item.id, item.via_device_id):
continue
dev_name = create_device_name(node)
dev_registry.async_update_device(
item.id,
manufacturer=node.node_manufacturer_name,
model=node.node_product_name,
name=dev_name,
)
@callback
def async_handle_scene_activated(hass: HomeAssistant, scene_value: OZWValue):
"""Handle a (central) scene activation message."""
node_id = scene_value.node.id
ozw_instance_id = scene_value.ozw_instance.id
scene_id = scene_value.index
scene_label = scene_value.label
if scene_value.command_class == CommandClass.SCENE_ACTIVATION:
# legacy/network scene
scene_value_id = scene_value.value
scene_value_label = scene_value.label
else:
# central scene command
if scene_value.type != ValueType.LIST:
return
scene_value_label = scene_value.value["Selected"]
scene_value_id = scene_value.value["Selected_id"]
_LOGGER.debug(
"[SCENE_ACTIVATED] ozw_instance: %s - node_id: %s - scene_id: %s - scene_value_id: %s",
ozw_instance_id,
node_id,
scene_id,
scene_value_id,
)
# Simply forward it to the hass event bus
hass.bus.async_fire(
const.EVENT_SCENE_ACTIVATED,
{
const.ATTR_INSTANCE_ID: ozw_instance_id,
const.ATTR_NODE_ID: node_id,
const.ATTR_SCENE_ID: scene_id,
const.ATTR_SCENE_LABEL: scene_label,
const.ATTR_SCENE_VALUE_ID: scene_value_id,
const.ATTR_SCENE_VALUE_LABEL: scene_value_label,
},
)
|
the-stack_106_17672
|
#!Measurement
'''
baseline:
after: true
before: false
counts: 120
detector: H2
mass: 39.862
settling_time: 15.0
default_fits: nominal
equilibration:
eqtime: 1.0
inlet: H
inlet_delay: 3
outlet: V
use_extraction_eqtime: true
multicollect:
counts: 400
detector: L2(CDD)
isotope: Ar36
peakcenter:
after: false
before: false
detector: H2
detectors:
- H1
- L2(CDD)
isotope: Ar36
integration_time: 1.048576
peakhop:
hops_name: ''
use_peak_hop: false
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2(CDD)')
def main():
info('unknown measurement script')
activate_detectors(*ACTIVE_DETECTORS)
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
set_time_zero()
sniff(eqt)
set_fits()
set_baseline_fits()
#multicollect on active detectors
multicollect(ncounts=mx.multicollect.counts, integration_time=1.048576)
if mx.baseline.after:
#set_integration_time(4.194)
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
#set_integration_time(1.049)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope,
integration_time=mx.peakcenter.integration_time)
if use_cdd_warming:
gosub('warm_cdd', argv=(mx.equilibration.outlet,))
info('finished measure script')
|
the-stack_106_17673
|
"""
Authors: The Vollab Developers 2004-2021
License: BSD 3 clause
Plot implied volatility calculated by Fast Fourier Transform.
Uses the Lets Be Rational library for fast calculation of Black-Scholes implied volatility.
"""
import argparse
import json
import functools
import matplotlib.pyplot as plt
import numpy as np
import env
import vollab as vl
def plot_local_vol(characteristic_function_name, params):
"""
Display plot of local volatility surface.
Args:
characteristic_function_name: The name of the model.
params: Dictionary of parameter values.
"""
tenors = [1.0, 2.0, 3.0, 4.0, 5.0]
# create market parameters
market_params = vl.MarketParams()
market_params.__dict__.update(params)
# create the characteristic function
characteristic_function = vl.create_characteristic_function(characteristic_function_name)
characteristic_function.__dict__.update(params)
# select the range of strikes to plot
strike_selector = functools.partial(vl.select_strike,
0.7 * market_params.spot,
1.3 * market_params.spot)
# calculate the local vol surface
strikes, tenors, local_vol_surface = vl.compute_local_vol_matrix(characteristic_function,
market_params,
strike_selector,
tenors)
tol = 1e-3 if characteristic_function_name == 'BlackScholes' else 1e-8
vl.plot_surface(strikes, tenors, np.transpose(local_vol_surface),
"Local volatility as a function of strike and maturity.",
"Strike", "Maturity", "Volatility",
tol=tol)
print("Close the plot window to continue...")
plt.show()
def main():
""" The main entry point function.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model",
help="The stochastic model",
default="Heston",
choices=vl.characteristic_function_names())
# parameters are a dictionary.
parser.add_argument("-p", "--params",
help="The parameter dictionary.",
default="{}",
type=json.loads)
# parse args
args = parser.parse_args()
# do the plot
plot_local_vol(args.model, args.params)
if __name__ == "__main__":
main()
|
the-stack_106_17674
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoEprintPrinterDeleteModel(object):
def __init__(self):
self._client_id = None
self._client_secret = None
self._eprint_token = None
self._machine_code = None
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def client_secret(self):
return self._client_secret
@client_secret.setter
def client_secret(self, value):
self._client_secret = value
@property
def eprint_token(self):
return self._eprint_token
@eprint_token.setter
def eprint_token(self, value):
self._eprint_token = value
@property
def machine_code(self):
return self._machine_code
@machine_code.setter
def machine_code(self, value):
self._machine_code = value
def to_alipay_dict(self):
params = dict()
if self.client_id:
if hasattr(self.client_id, 'to_alipay_dict'):
params['client_id'] = self.client_id.to_alipay_dict()
else:
params['client_id'] = self.client_id
if self.client_secret:
if hasattr(self.client_secret, 'to_alipay_dict'):
params['client_secret'] = self.client_secret.to_alipay_dict()
else:
params['client_secret'] = self.client_secret
if self.eprint_token:
if hasattr(self.eprint_token, 'to_alipay_dict'):
params['eprint_token'] = self.eprint_token.to_alipay_dict()
else:
params['eprint_token'] = self.eprint_token
if self.machine_code:
if hasattr(self.machine_code, 'to_alipay_dict'):
params['machine_code'] = self.machine_code.to_alipay_dict()
else:
params['machine_code'] = self.machine_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoEprintPrinterDeleteModel()
if 'client_id' in d:
o.client_id = d['client_id']
if 'client_secret' in d:
o.client_secret = d['client_secret']
if 'eprint_token' in d:
o.eprint_token = d['eprint_token']
if 'machine_code' in d:
o.machine_code = d['machine_code']
return o
|
the-stack_106_17676
|
import panflute as pf
from .meta import Meta, MetaFilter, metapreparemethod
from .Number import NumberFilter
from . import utils
class TableCaptionReplace(MetaFilter, NumberFilter):
@metapreparemethod
def prepare(self, doc, meta):
self.top_level = meta.chaptersDepth if meta.chapters else ''
self.tableSeqName = meta.tableTitle # 题注前缀的文字和SEQ的内容相同,以便于在Word中产生相同的前缀
self.chapDelim = self.top_level and meta.chapDelim
self.auto_labels = meta.autoTblLabels
self.section_no = pf.RawInline(
self.top_level and
f'''<w:fldSimple w:instr=" STYLEREF {self.top_level} \\s"/>''', format="openxml")
self.table_no = pf.RawInline(
f'''<w:fldSimple w:instr=" SEQ {self.tableSeqName} \\* ARABIC \\s {self.top_level}"/>''',
format="openxml")
self.table_no2 = pf.RawInline(
f'''<w:fldSimple w:instr=" SEQ {self.tableSeqName} \\c \\* ARABIC \\s {self.top_level} "/>''',
format="openxml")
# 重复上一个编号
def generateTableNumber(self, identifier=''):
# 创建表格编号
return [
pf.Str(self.meta.tableTitle),
pf.Span(self.section_no,
pf.Str(self.chapDelim),
self.table_no,
identifier=identifier),
pf.Str(self.meta.titleDelim)
]
def generateTableNumber2(self, identifier=''):
# 创建表格编号
return [
pf.Str(self.meta.tableTitle2),
pf.Span(self.section_no,
pf.Str(self.chapDelim),
self.table_no2,
),
pf.Str(self.meta.titleDelim)
]
def isSecondCaptionSeparator(self, item):
return isinstance(item, pf.RawInline) and item.format == 'tex' and item.text == self.meta.secondCaptionSeparator
def action(self, elem, doc):
if isinstance(elem, pf.Table):
elem: pf.Table
# pf.debug("Table!")
# 表格下面结构是Table.Caption [Plain]
# table.caption.content[0].content才会得到Plain中的各个元素
# 表格的题注内容长度为0,则表格题注无内容
# 若表格无题注,则直接返回表格,不做处理
if len(elem.caption.content) == 0:
return [elem, pf.Para(pf.Span()) if self.meta.isParaAfterTable else pf.Null()]
# 获取题注元素
captionContent = elem.caption.content[0].content
numberinfo = self.getNumberingInfo(utils.stripLabel(captionContent))
isNeedNumber = numberinfo['numbering']
identifier = numberinfo['identifier']
# 判断是否存在第二题注
for item in captionContent:
if self.isSecondCaptionSeparator(item) and item.index < len(captionContent):
# 只有当第二题注的分隔符后面还有元素的时候,才算存在第二题注
hasSecondCaption = True
secondCaptionIndex = item.index
break
else:
hasSecondCaption = False
# 生成两级题注的文本内容
if hasSecondCaption:
firstCaption = pf.Span(
*captionContent[:secondCaptionIndex], identifier=identifier+":c" if identifier else '')
secondCaption = pf.Span(
*captionContent[secondCaptionIndex+1:], identifier=identifier+':sc' if identifier else '')
else:
firstCaption = pf.Span(
*captionContent, identifier=identifier+":c" if identifier else '')
# 用两级题注的内容和表格编号生成新的题注内容
new_caption = []
if isNeedNumber:
new_caption.extend(self.generateTableNumber(identifier))
new_caption.append(firstCaption)
if hasSecondCaption:
new_caption.append(pf.LineBreak())
if isNeedNumber:
new_caption.extend(self.generateTableNumber2(identifier))
new_caption.append(secondCaption)
elem.caption.content[0].content = pf.ListContainer(*new_caption)
return [elem, pf.Para(pf.Span()) if self.meta.isParaAfterTable else pf.Null()]
def main(doc=None, meta=None):
replacer = TableCaptionReplace(meta=meta)
return pf.run_filter(replacer.action, prepare=replacer.prepare, doc=doc)
if __name__ == "__main__":
main()
|
the-stack_106_17677
|
import codecs
import os
from setuptools import find_packages, setup
VERSION = "0.0.5"
AUTHOR = "Free Law Project"
EMAIL = "[email protected]"
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as file:
return file.read()
reqs_path = HERE + "/requirements.txt"
with open(reqs_path) as reqs_file:
reqs = reqs_file.read().splitlines()
setup(
name="bankruptcy",
description="A bankruptcy document parser.",
license="BSD",
url="https://github.com/freelawproject/bankruptcy-parser",
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
maintainer=AUTHOR,
maintainer_email=EMAIL,
keywords=["legal", "document", "bankruptcy", "PDF", "form"],
long_description=read("README.rst"),
packages=find_packages(exclude=("tests",)),
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=reqs,
test_suite="tests",
)
|
the-stack_106_17681
|
from flask import current_app
from sqlalchemy import desc, and_
from sqlalchemy.orm import aliased
from sqlalchemy.dialects.postgresql import insert
from app import db
from app.dao.dao_utils import transactional
from app.models import InboundSms, InboundSmsHistory, Service, ServiceDataRetention, SMS_TYPE
from app.utils import midnight_n_days_ago
@transactional
def dao_create_inbound_sms(inbound_sms):
db.session.add(inbound_sms)
def dao_get_inbound_sms_for_service(service_id, user_number=None, *, limit_days=None, limit=None):
q = InboundSms.query.filter(
InboundSms.service_id == service_id
).order_by(
InboundSms.created_at.desc()
)
if limit_days is not None:
start_date = midnight_n_days_ago(limit_days)
q = q.filter(InboundSms.created_at >= start_date)
if user_number:
q = q.filter(InboundSms.user_number == user_number)
if limit:
q = q.limit(limit)
return q.all()
def dao_get_paginated_inbound_sms_for_service_for_public_api(
service_id,
older_than=None,
page_size=None
):
if page_size is None:
page_size = current_app.config['PAGE_SIZE']
filters = [InboundSms.service_id == service_id]
if older_than:
older_than_created_at = db.session.query(
InboundSms.created_at).filter(InboundSms.id == older_than).as_scalar()
filters.append(InboundSms.created_at < older_than_created_at)
query = InboundSms.query.filter(*filters)
return query.order_by(desc(InboundSms.created_at)).paginate(
per_page=page_size
).items
def dao_count_inbound_sms_for_service(service_id, limit_days):
return InboundSms.query.filter(
InboundSms.service_id == service_id,
InboundSms.created_at >= midnight_n_days_ago(limit_days)
).count()
def _insert_inbound_sms_history(subquery, query_limit=10000):
offset = 0
inbound_sms_query = db.session.query(
*[x.name for x in InboundSmsHistory.__table__.c]
).filter(InboundSms.id.in_(subquery))
inbound_sms_count = inbound_sms_query.count()
while offset < inbound_sms_count:
statement = insert(InboundSmsHistory).from_select(
InboundSmsHistory.__table__.c,
inbound_sms_query.limit(query_limit).offset(offset)
)
statement = statement.on_conflict_do_nothing(
constraint="inbound_sms_history_pkey"
)
db.session.connection().execute(statement)
offset += query_limit
def _delete_inbound_sms(datetime_to_delete_from, query_filter):
query_limit = 10000
subquery = db.session.query(
InboundSms.id
).filter(
InboundSms.created_at < datetime_to_delete_from,
*query_filter
).limit(
query_limit
).subquery()
deleted = 0
# set to nonzero just to enter the loop
number_deleted = 1
while number_deleted > 0:
_insert_inbound_sms_history(subquery, query_limit=query_limit)
number_deleted = InboundSms.query.filter(InboundSms.id.in_(subquery)).delete(synchronize_session='fetch')
deleted += number_deleted
return deleted
@transactional
def delete_inbound_sms_older_than_retention():
current_app.logger.info('Deleting inbound sms for services with flexible data retention')
flexible_data_retention = ServiceDataRetention.query.join(
ServiceDataRetention.service,
Service.inbound_number
).filter(
ServiceDataRetention.notification_type == SMS_TYPE
).all()
deleted = 0
for f in flexible_data_retention:
n_days_ago = midnight_n_days_ago(f.days_of_retention)
current_app.logger.info("Deleting inbound sms for service id: {}".format(f.service_id))
deleted += _delete_inbound_sms(n_days_ago, query_filter=[InboundSms.service_id == f.service_id])
current_app.logger.info('Deleting inbound sms for services without flexible data retention')
seven_days_ago = midnight_n_days_ago(7)
deleted += _delete_inbound_sms(seven_days_ago, query_filter=[
InboundSms.service_id.notin_(x.service_id for x in flexible_data_retention),
])
current_app.logger.info('Deleted {} inbound sms'.format(deleted))
return deleted
def dao_get_inbound_sms_by_id(service_id, inbound_id):
return InboundSms.query.filter_by(
id=inbound_id,
service_id=service_id
).one()
def dao_get_paginated_most_recent_inbound_sms_by_user_number_for_service(
service_id,
page,
limit_days
):
"""
This query starts from inbound_sms and joins on to itself to find the most recent row for each user_number.
Equivalent sql:
SELECT t1.*
FROM inbound_sms t1
LEFT OUTER JOIN inbound_sms AS t2 ON (
-- identifying
t1.user_number = t2.user_number AND
t1.service_id = t2.service_id AND
-- ordering
t1.created_at < t2.created_at
)
WHERE t2.id IS NULL AND t1.service_id = :service_id
ORDER BY t1.created_at DESC;
LIMIT 50 OFFSET :page
"""
t2 = aliased(InboundSms)
q = db.session.query(
InboundSms
).outerjoin(
t2,
and_(
InboundSms.user_number == t2.user_number,
InboundSms.service_id == t2.service_id,
InboundSms.created_at < t2.created_at,
)
).filter(
t2.id == None, # noqa
InboundSms.service_id == service_id,
InboundSms.created_at >= midnight_n_days_ago(limit_days)
).order_by(
InboundSms.created_at.desc()
)
return q.paginate(
page=page,
per_page=current_app.config['PAGE_SIZE']
)
|
the-stack_106_17682
|
import math
import os
from random import random
from typing import List, Dict
import cv2
import json
import numpy as np
from pycocotools.coco import COCO
from torch.utils.data import Dataset
from pedrec.configs.dataset_configs import CocoDatasetConfig, get_coco_dataset_cfg_default
from pedrec.configs.pedrec_net_config import PedRecNet50Config
from pedrec.datasets.dataset_helper import get_skeleton_2d_affine_transform
from pedrec.models.constants.dataset_constants import DatasetType
from pedrec.models.constants.skeleton_pedrec import SKELETON_PEDREC_JOINTS
from pedrec.models.data_structures import ImageSize
from pedrec.utils.augmentation_helper import get_affine_transforms, get_affine_transform
from pedrec.utils.bb_helper import get_center_bb_from_tl_bb, bb_to_center_scale
from pedrec.utils.skeleton_helper import flip_lr_joints
from pedrec.utils.skeleton_helper_3d import flip_lr_orientation
class CocoDataset(Dataset):
def __init__(self, dataset_path: str, mode: DatasetType, cfg: CocoDatasetConfig,
input_size: ImageSize, transform, flip_all: bool = False):
self.mode = mode
self.flip_all: bool = flip_all
self.cfg = cfg
self.input_size = input_size
self.dataset_path = dataset_path
self.annotation_path = os.path.join(dataset_path, "annotations")
if mode == DatasetType.TRAIN:
self.annotation_path = os.path.join(self.annotation_path, "person_keypoints_train2017.json")
self.img_dir = os.path.join(self.dataset_path, 'train2017')
elif mode == DatasetType.VALIDATE:
self.annotation_path = os.path.join(self.annotation_path, "person_keypoints_val2017.json")
self.img_dir = os.path.join(self.dataset_path, 'val2017')
else:
self.annotation_path = None
self.img_dir = os.path.join(self.dataset_path, 'test2017')
self.mhbow_gt = self.get_mhbow_gt()
self.coco = COCO(self.annotation_path)
coco_classes = [cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())]
self.transform = transform
self.classes = ['__background__'] + coco_classes
self.num_classes = len(self.classes)
self.num_non_humans = 0
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._class_to_coco_ind = dict(zip(coco_classes, self.coco.getCatIds()))
self._coco_ind_to_class_ind = dict([(self._class_to_coco_ind[cls],
self._class_to_ind[cls])
for cls in self.classes[1:]])
self.num_joints = len(SKELETON_PEDREC_JOINTS)
self.orientation_dummy = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], dtype=np.float32)
self.no_human_joint_dummy = np.zeros((self.num_joints, 5), dtype=np.float32)
self.no_human_joint_dummy[4] = 1
self.threed_dummy = np.zeros((self.num_joints, 6), dtype=np.float32)
self.env_position_dummy = np.array([0, 0, 0], dtype=np.float32)
# self.threed_not_available_dummy = np.zeros((self.num_joints, 1), dtype=np.float32)
self.is_real_img = np.ones((1), dtype=np.float32)
self.load_gt()
self.__length = len(self.annotations)
def get_mhbow_gt(self):
if self.mode == DatasetType.TRAIN:
annot_path = os.path.join(self.dataset_path, "annotations", "train_hoe.json")
else:
annot_path = os.path.join(self.dataset_path, "annotations", "val_hoe.json")
with open(annot_path, 'r') as obj_file:
annot = json.load(obj_file)
return annot
def load_gt(self):
self.annotations = []
img_ids = self.coco.getImgIds()
for img_id in img_ids:
img_info = self.coco.loadImgs(img_id)[0]
img_size = np.array([img_info['width'], img_info['height']], dtype=np.float32)
annotation_ids = self.coco.getAnnIds(img_id, iscrowd=False)
annotations = self.coco.loadAnns(annotation_ids)
person_annotations, non_person_annotations = self.get_clean_bb_annotations(annotations, img_size)
for annotation in person_annotations:
orientation = self.orientation_dummy
if self.cfg.use_mebow_orientation:
mhbow_key = f"{img_id}_{annotation['id']}"
if mhbow_key in self.mhbow_gt:
orientation = self.orientation_dummy.copy()
degrees = self.mhbow_gt[mhbow_key] + 90
if degrees < 0:
degrees += 360
if degrees > 360:
degrees -= 360
orientation[0, 1] = math.radians(degrees) / (2 * math.pi)
orientation[0, 4] = 1
# mhbow_orientation =
filename = "{}.jpg".format(str(annotation["image_id"]).zfill(12))
bb = annotation['clean_bb']
bb = get_center_bb_from_tl_bb(bb)
joints = self.get_joint_data(annotation)
center, scale = bb_to_center_scale(bb, self.input_size)
self.annotations.append({
"coco_id": annotation['id'],
"img_filename": filename,
"joints": joints,
"orientation": orientation,
"bb": bb,
"center": center,
"scale": scale,
"img_size": img_size,
"is_human": True
})
# for annotation in non_person_annotations:
# if (self.mode == DatasetType.TRAIN and self.num_non_humans > 50000) or \
# (self.mode == DatasetType.VALIDATE and self.num_non_humans > 10000):
# continue
# self.num_non_humans += 1
# filename = "{}.jpg".format(str(annotation["image_id"]).zfill(12))
# bb = annotation['clean_bb']
# bb = get_center_bb_from_tl_bb(bb)
# joints = self.get_joint_data(annotation)
# center, scale = bb_to_center_scale(bb, self.input_size)
# self.annotations.append({
# "img_filename": filename,
# "joints": joints,
# "bb": bb,
# "center": center,
# "scale": scale,
# "img_size": img_size,
# "is_human": False
# })
def get_joint_data(self, annotation: Dict[str, any]):
joint_data = annotation["keypoints"]
joints = np.zeros((len(SKELETON_PEDREC_JOINTS), 5), dtype=np.float32)
for joint_num in range(17):
curr_idx = joint_num * 3
joints[joint_num][0] = joint_data[curr_idx]
joints[joint_num][1] = joint_data[curr_idx + 1]
joints[joint_num][2] = 1
visibility = joint_data[curr_idx + 2]
if visibility == 0:
joints[joint_num][0] = 0
joints[joint_num][1] = 0
joints[joint_num][2] = 0 # score
joints[joint_num][3] = 0 # visibility
joints[joint_num][4] = 1 # joint supported by dataset
else:
joints[joint_num][2] = 1 # score
joints[joint_num][3] = 1 # visibility
joints[joint_num][4] = 1 # joint supported by dataset
return joints
def get_clean_bb_annotations(self, annotations: List[Dict[str, any]], img_size: np.array):
person_annotations = []
non_person_annotations = []
for annotation in annotations:
x, y, w, h = annotation['bbox']
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((img_size[0] - 1, x1 + np.max((0, w - 1))))
y2 = np.min((img_size[1] - 1, y1 + np.max((0, h - 1))))
if x2 >= x1 and y2 >= y1:
annotation['clean_bb'] = [x1, y1, x2 - x1, y2 - y1]
if not self.is_valid_person_annotation(annotation):
non_person_annotations.append(annotation)
continue
person_annotations.append(annotation)
else:
print("WAHHH")
return person_annotations, non_person_annotations
def is_valid_person_annotation(self, annotation: Dict[str, any]):
cls = self._coco_ind_to_class_ind[annotation['category_id']]
if cls != 1:
return False
if max(annotation['keypoints']) == 0:
return False
if annotation['area'] <= 0:
return False
return True
def __len__(self):
return self.__length
def __getitem__(self, index):
annotations = self.annotations[index]
img_path = os.path.join(self.img_dir, annotations['img_filename'])
# PoseResnet pretrain was trained on BGR, thus keep it for now
# img = cv2.imread(img_path)
img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
img_orig = img.copy()
orientation = annotations['orientation'].copy()
skeleton = annotations['joints'].copy()
center = annotations['center'].copy()
scale = annotations['scale'].copy()
rotation = 0
if self.flip_all:
img = img[:, ::-1, :]
skeleton = flip_lr_joints(skeleton, img.shape[1])
center[0] = img.shape[1] - center[0] - 1
orientation = flip_lr_orientation(orientation)
if self.mode == DatasetType.TRAIN:
scale = scale * np.clip(np.random.randn() * self.cfg.scale_factor + 1,
1 - self.cfg.scale_factor,
1 + self.cfg.scale_factor)
if random() <= 0.6:
rotation = np.clip(np.random.randn() * self.cfg.rotation_factor,
-self.cfg.rotation_factor * 2,
self.cfg.rotation_factor * 2)
if self.cfg.flip and random() <= 0.5:
img = img[:, ::-1, :]
skeleton = flip_lr_joints(skeleton, img.shape[1])
center[0] = img.shape[1] - center[0] - 1
orientation = flip_lr_orientation(orientation)
trans, trans_inv = get_affine_transforms(center, scale, rotation, self.input_size, add_inv=True)
# transx = get_affine_transform(center, scale, rotation, self.input_size)
skeleton = get_skeleton_2d_affine_transform(skeleton, trans, self.input_size)
if np.max(skeleton[:, 2]) == 0: # augmentation screwed up, use unaugmented
img = img_orig.copy()
center = annotations['center'].copy()
scale = annotations['scale'].copy()
rotation = 0
skeleton = annotations['joints'].copy()
trans, trans_inv = get_affine_transforms(center, scale, rotation, self.input_size, add_inv=True)
# trans = get_affine_transform(center, scale, rotation, self.input_size)
skeleton = get_skeleton_2d_affine_transform(skeleton, trans, self.input_size)
model_input = cv2.warpAffine(
img,
trans,
(int(self.input_size.width), int(self.input_size.height)),
flags=cv2.INTER_LINEAR)
# if np.max(skeleton[:, 2]) == 0: # still screwed
# print(f"No visible skeleton COCO: {index} - {annotations['img_filename']}")
skeleton[:, 0] /= model_input.shape[1]
skeleton[:, 1] /= model_input.shape[0]
if self.transform:
model_input = self.transform(model_input)
if np.max(skeleton) > 1:
raise ValueError("WTF")
# skeleton = np.concatenate((skeleton[:, 0:2], self.threed_dummy, skeleton[:, 2:], self.threed_dummy), axis=1)
# np.insert(skeleton, 2, 0, axis=0)
# returns model_input, skeleton, center, scale, rotation, is_real_img
return model_input, {
"skeleton": skeleton,
"skeleton_3d": self.threed_dummy,
"center": center,
"scale": scale,
"rotation": rotation,
"is_real_img": self.is_real_img,
"orientation": orientation,
"env_position_2d": self.env_position_dummy,
"trans_inv": trans_inv.astype(np.float32),
"img_path": img_path,
"idx": index,
"img_size": annotations["img_size"],
# "coco_id": annotations["coco_id"]
}
if __name__ == "__main__":
cfg = PedRecNet50Config()
dataset_cfg = get_coco_dataset_cfg_default()
# MS Coco
dataset_name = "MSCOCO"
dataset = CocoDataset("data/datasets/COCO", DatasetType.VALIDATE, dataset_cfg,
cfg.model.input_size, None)
test = dataset[49]
a = 1
|
the-stack_106_17684
|
import json
import sqlite3, pyodbc
import time
import keyring
import networkx as nx
import dash
import dash_auth
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State, MATCH, ALL
import dash_bootstrap_components as dbc
import plotly.express as px
"""
Requires Dash. Recommended install:
1) Install Anaconda3
2) Install Dash: conda install -c conda-forge dash
3) Install dash-bootstrap: conda install -c conda-forge dash-bootstrap-components
4) Optional pyodbc to connect to mssql: conda install -c conda-forge pyodbc
5) pip install dash-auth
6) conda install tabulate
"""
# Experimental visualization of totalnum data using the output of totalnum_builddb. Uses dash.
# Now supports deployment, multiple sessions, and gunicorn!
# To run with gunicorn: GUNICORN_CMD_ARGS="--bind=0.0.0.0" gunicorn 'totalnum_dashboard_new:initApp("/path/to/database")'
# by Jeff Klann, PHD 10-2018 to 6-2020
instruction = """
1. Optionally choose a site from the dropdown
2. The options in the checkboxes start at the top level of the ontology. To navigate down the tree, check one box, click the right arrow button, and its children are displayed. Likewise, to navigate up the tree, click the left arrow button.
3. Graphs:
* Top-left: Check boxes as desired. Temporal totalnum will appear below, showing the trend in # patients at each refresh.
* Top-right: Check boxes as desired. Site-by-site totalnum will appear, showing the breakdown in # patients per site (max among all refreshes).
* Bottom: Click left or right arrow as desired. Network graph of current ontology level and its children are displayed, with node size indicating the totalnum per item (at selected site, max among all refreshes).
"""
app = dash.Dash(external_stylesheets=[dbc.themes.CERULEAN],suppress_callback_exceptions=False)
# App Auth
# Set username and password in local install, instead of hello world
auth = dash_auth.BasicAuth(
app,
[['hello','world']]
)
# App Layout
app.layout = html.Div([
html.Span(id='hoo'),
dbc.NavbarSimple(children=[
dbc.Button('Help', id='help',color='primary')
],brand="Patient Count Data Quality Dashboard",brand_href='#',brand_style={'align':'left'}),
dbc.Modal(
[
dbc.ModalHeader("Totalnum Dashboard Help"),
dbc.ModalBody(dcc.Markdown(instruction)),
dbc.ModalFooter(
#dbc.Button("Close", id="close", className="ml-auto")
),
],
id="modalHelp",
size='xl'
),
dbc.Row([dbc.Col("Site selection:",width=1),dbc.Col(dbc.RadioItems(id='site', options=[],inline=True),width=4)],justify='center'),
#dbc.Row([dbc.Col("Mode:",width=1),dbc.Col(dbc.RadioItems(id='mode', options=[{'label':'Explore','value':'X'},{'label':'Site Variability','value':'V'},{'label':'Trends over Time','value':'T'},{'label':'Missingness','value':'M'}],inline=True),width=6)],justify='left'),
dbc.Row([
dbc.Col(children=[dbc.Tabs([
dbc.Tab(html.Div([
dcc.Checklist(id='items', options=[{'label': 'No options', 'value': 'none'}], value=[],
labelStyle={'display': 'none'}),
dbc.ButtonGroup([
dbc.Button("no options")
],vertical=True,id="navbuttons"),
html.Br(),
dbc.Button('<--', id='unzoom', outline=True, color='primary'),
dbc.Button('-->', id='zoom', outline=True, color='primary')
],style={'width': '300px','margin':'20px'}),label='Navigate Terms')
])],width=2),
dbc.Col(children=[dbc.Tabs([
# Summary tab
dbc.Tab(dbc.Card([dbc.CardHeader('',id='summary_head'),dbc.CardBody(dcc.Markdown('',id='summary'))],color='secondary',style={'width': '800px'}),label='Summary',tab_id='summary_tab',label_style={"color": "blue"}),
# Explorer tab
dbc.Tab(dbc.Table([html.Tr([
html.Td(dbc.Tabs([
dbc.Tab(dcc.Graph(id='hlevel_graph'),label='Trends Over Time',tab_id='hlevel_tab',disabled=True),
dbc.Tab(dcc.Graph(id='bars_graph',figure=go.Figure(data=go.Bar(y=[2, 3, 1]))),label='Trends Across Sites',tab_id='bars_tab',disabled=True)
],id='graphTabs'))] )
]),label="Explorer",tab_id='explorer_tab',label_style={"color": "blue"}),
# Site variability tab
dbc.Tab(dbc.Tabs([dbc.Tab(
html.Div([dbc.Row([dbc.Col(dcc.Slider(id='slider_siteoutlier',min=0,max=4,step=0.1,value=1)),dbc.Col(html.P('Threshold',id='slidertext_siteoutlier'))]),dbc.Row([
#dbc.Col(dcc.Checklist(id='items_siteoutlier', options=[{'label': 'No options', 'value': 'none'}], value=[],
# labelStyle={'display': 'block'}),width={"size":3,"offset":1}),
dbc.Col(dcc.Graph(id='siteoutlier_graph'),width=9)
])
]),label='Explore'),
dbc.Tab(html.Div("Variability Report (not yet implemented)",id='report_div'),label='Report')]
),label='Site Variability',tab_id='siteoutlier_tab',label_style={"color":"blue"}),
# Missingness tab
dbc.Tab(dbc.Tabs([
# Missingness xplore
dbc.Tab(dcc.Graph(id='missing_graph'),label='Explore',tab_id='missgraph_tab'),
# Missingness report
dbc.Tab([
dbc.Alert("All non-leaf items missing at this site, but not missing at least one other site. Red indicates missing, black just shows hierarchical structure."),
html.Div("# Missingness",id='missing_div'),
dbc.ListGroup([dbc.ListGroupItem(active=True)],id='missing')
],label='Report')
]),label="Missingness",tab_id='missing_tab',label_style={"color": "blue"})
],id='mainTabs',active_tab='summary_tab')],width=9)
]),
#dcc.Graph(id='tree_graph'),
html.Div('written by Jeffrey Klann, PhD'), html.Br(),
# html.Div('errmsghere-todo-', id='msg0'),
html.Div('app_state', id='app_state'),
html.Br()
])
""" For SQLite, dbtype = "SQLITE" and db = filename of db
For MSSQL, dbtype = "MSSQL" and db is pyodbc format: DSN=MYMSSQL;UID=myuser;PWD=mypassword
For MSSQL, dbtype = "MSSQL" and db is a dict of server, user, password, and db (e.g., dbo)
"""
def initApp(*, dbtype="SQLITE",db="/Users/jklann/Google Drive/SCILHS Phase II/Committee, Cores, Panels/Informatics & Technology Core/totalnums/joined/totalnums.db"):
global conn,app,dbstyle,sites
# Initialize dashboard-wide globals
if dbtype=="SQLITE":
conn = sqlite3.connect(db,detect_types=sqlite3.PARSE_DECLTYPES, check_same_thread=False) # Parameter converts datetimes
elif dbtype=="MSSQL":
conn = pyodbc.connect(db)
# Store db type - let's call it db style
dbstyle=dbtype
print("Dash version:"+str(dcc.__version__))
# Get site list and store it in a global
sites = pd.read_sql("select distinct site from totalnums", conn).site.tolist()
if 'All' not in sites: sites.append('All')
options = list(map(lambda a: {'label': a, 'value': a}, sites))
#options.append({'label':'All','value':'All'})
app.layout['site'].options = options
app.layout['site'].value = 'All'
#app.layout['site'].children=[dbc.DropdownMenuItem(x) for x in sites]
return app.server
# This callback just clears the checkboxes when the button is pressed, otherwise they are never cleared when the
# options are updated and hidden checkboxes accumulate in the state.
@ app.callback(
Output('items', 'value'),
[Input('zoom', 'n_clicks'), Input('unzoom', 'n_clicks')]
)
def clearTheChecks(clix, unclix):
return []
# New callback to print help
@app.callback(
Output('modalHelp','is_open'),
[Input('help','n_clicks')],
[State('modalHelp','is_open')]
)
def cbHelp(help,is_open):
if help:
return not is_open
return is_open
# New callback - switch explorer tabs depending on site or All selected
@app.callback(
Output('graphTabs','active_tab'),
[Input('site', 'value')],
[State('app_state', 'children')]
)
def cbSiteSwitchTab(site,app_state):
if site=='All':
return 'bars_tab'
else:
return 'hlevel_tab'
# New callback to print a summary header when a site is selected
@app.callback(
Output('summary_head', 'children'),
[Input('site','value')],
[State('app_state','children')]
)
def cbSummaryHead(site,app_state):
global conn
if site is not None and site!='All':
return site+' Summary'
elif site=='All':
return 'All sites selected'
return ""
# New callback to print a summary when a site is selected
"""@app.callback(
Output('summary', 'children'),
[Input('site','value')],
[State('app_state','children')]
)
def cbSummary(site,app_state):
global conn
if site is not None and site!='All':
appstatedict = json.loads(app_state)
if appstatedict['tab']=='summary_tab' and appstatedict['action']!='':
# TODO: This query is specific to COVID ontology and should reflect some principled way of determining elements for a summary table
query = "select domain, c_name, agg_count from totalnums_recent_joined j inner join toplevel_fullnames f on f.fullname_int=j.fullname_int where c_hlevel<6 and site='" + site + "'"
dfsum = pd.read_sql_query(query, conn)
return dfsum.to_markdown()
elif site=="All":
return "Select a single site for a summary."
return ""
"""
# New callback: update outlier options when the slider is changed
@app.callback(
Output('slidertext_siteoutlier', 'children'),
[Input('slider_siteoutlier','value')],
[State('app_state','children')]
)
def cbSiteoutlierSliderText(slider,app_state):
return "Threshold: " + str(slider)
# New callback: clear the active site selection when a tab is changed
@app.callback(
Output('site', 'value'),
[Input('mainTabs','active_tab')],
[State('app_state','children'),State('site','value')]
)
def cbActiveTabSiteAdjustment(active_tab,app_state,already_site):
print("ACTIVE" + already_site)
return ""
# New callback: update outlier options when the slider is changed
"""@app.callback(
Output('items_siteoutlier', 'options'),
[Input('slider_siteoutlier','value'),Input('site','value')],
[State('app_state','children')]
)"""
def cbSiteoutlierItems(slider,site,app_state):
global conn
c = conn.cursor()
#appstatedict = json.loads(app_state)
if site!='All':
sql = "select c_fullname AS value, c_name AS label from outliers_sites o inner join bigfullname b on o.fullname_int=b.fullname_int where site='%s' and agg_count>average+(%s*stdev)" % (
site,slider)
#sql = "select distinct c_fullname AS value,c_name AS label from totalnums_oldcols t inner join bigfullname b on t.fullname_int=b.fullname_int where c_hlevel='%s' and site='%s' and c_fullname like '%s'" % (
# str(appstatedict['hlevel']), appstatedict['site'], '\\'.join(appstatedict['path']) + '\\%')
items = pd.read_sql_query(sql, conn).to_dict('records')
print(sql)
return items
# New callback: display missingness markdown when a site is selected
@app.callback(
Output('missing_div', 'children'),
[Input('site','value')],
[State('app_state','children')]
)
def cbMissingMd(site,app_state):
global conn
if (app_state == 'app_state'): return {}
appstatedict = json.loads(app_state)
if appstatedict['tab'] == 'missing_tab':
c = conn.cursor()
# TODO: This is all non-leaf missingness. Probably want to look at leaf variance between sites vs. annotated high level missing
query = """select c_fullname, c_tooltip, c_hlevel, c_name from bigfullname fn inner join totalnums_recent r on r.fullname_int=fn.fullname_int
where fn.fullname_int not in (select fullname_int from totalnums_recent where site='{{}}') and c_visualattributes not like 'L%' and c_visualattributes not like '_H%'
order by c_hlevel """
query=query.replace("{{}}",site)
df = pd.read_sql_query(query,conn)
df=df.sort_values(by='c_fullname',axis=0,ascending=True)
# I had this on one line but was too hard to debug
# Compute a readable string for missingness
retval = []
current = []
for x in df.to_dict('records'):
parensplit = x['c_tooltip'].split('\\') # Compute this here bc hlevel is unreliable
if len(parensplit)>2:
for c,s in enumerate(parensplit[2:],start=1):
if s not in current:
txtcolor='red' if c==len(parensplit[2:]) else 'black'
retval.append(html.P(s,style={'margin-top':0,'margin-bottom':0,'padding':0,'margin-left':10*c,'color':('red' if c==len(parensplit[2:]) else 'black')}))
#retval.append(("#"*c)+' ' + str(c) + '.' + ('*'+s+'*' if c==len(parensplit[2:]) else s))
current=parensplit
return retval
#return [dbc.ListGroupItem(x['c_tooltip'].split('\\')[2] + ":" + x['c_name']) if x['c_hlevel']>2 else + ":" + x['c_name']) for x in df.to_dict('records')]
# New callback: display missingness when a site is selected
#@app.callback(
# Output('missing', 'children'),
# [Input('site','value')],
# [State('app_state','children')]
#)
def cbMissing(site,app_state):
global conn
c = conn.cursor()
# TODO: This is all non-leaf missingness. Probably want to look at leaf variance between sites vs. annotated high level missing
query = """select c_tooltip, c_hlevel, c_name from bigfullname fn inner join totalnums_recent r on r.fullname_int=fn.fullname_int
where fn.fullname_int not in (select fullname_int from totalnums_recent where site='{{}}') and c_visualattributes not like 'L%' and c_visualattributes not like '_H%'
order by c_hlevel """
query=query.replace("{{}}",site)
df = pd.read_sql_query(query,conn)
# I had this on one line but was too hard to debug
# Compute a readable string for missingness
retval = []
for x in df.to_dict('records'):
parensplit = x['c_tooltip'].split('\\') # Compute this here bc hlevel is unreliable
if len(parensplit)>2:
txt = parensplit[2] + ":" + x['c_name']
else:
txt = x['c_name']
retval.append(dbc.ListGroupItem(txt))
return retval
#return [dbc.ListGroupItem(x['c_tooltip'].split('\\')[2] + ":" + x['c_name']) if x['c_hlevel']>2 else + ":" + x['c_name']) for x in df.to_dict('records')]
# Run this when the app starts to set the state of things
# Also updates the state JSON when a button is clicked or the dropdown is used
@app.callback(
Output('app_state','children'),
[Input({'type': 'navbutton', 'index': ALL}, 'n_clicks'),Input('zoom', 'n_clicks'), Input('unzoom', 'n_clicks'), Input('site', 'value'),Input('mainTabs','active_tab'),Input('slider_siteoutlier','value')],
[State('items', 'value'), State('items', 'options'),State('app_state','children')]
)
def cbController(nclick_values,zoomclix,unzoomclix,site,tab,slider,checks,options,appstate):
global conn,dbstyle, sites,globalDbFile
if appstate=='app_state':
# New version of Dash, cannot share sqlite across windows
#initApp(db=globalDbFile)
# Initialize the app
c = conn.cursor()
zoom_clix = 0
unzoom_clix = 0
c.execute("select min(c_hlevel) from bigfullname")
hlevel = c.fetchone()[0]
minhlevel = hlevel
#query = "select top 1 c_fullname from bigfullname where c_hlevel=?" if dbstyle=="MSSQL" else "select c_fullname from bigfullname where c_hlevel=? limit 1"
#c.execute(query, str(hlevel)) # Limit 1 for PGSQL
c.execute("select c_fullname from bigfullname where c_hlevel="+str(hlevel)+" limit 1") # NOT SUPPORTING MSSQL RIGHT NOW TODO
pathstart = c.fetchone()[0]
pathstart = pathstart[0:pathstart[1:].find('\\') + 1]
path = [pathstart]
site = 'All' if 'All' in sites else sites[0] # There must be at least 1 site
app_state = {'action':'','zoom_clix': 0, 'unzoom_clix': 0, 'hlevel':hlevel,'minhlevel': minhlevel, 'path': path, 'site': site,'tab':tab, "slider":slider, 'selected':[], 'selected_new':""}
return json.dumps(app_state)
appstatedict = json.loads(appstate)
# If slider was moved but not button click, or callback called on startup, or multiple checked or nothing checked
unclix = 0 if unzoomclix is None else unzoomclix
clix=0 if zoomclix is None else zoomclix
if (slider and slider != appstatedict['slider']):
# Tab changed
appstatedict['slider']=slider
if (tab and tab != appstatedict['tab']):
# Tab changed
appstatedict['tab']=tab
if (site and site != appstatedict['site']):
# Site changed!
appstatedict['site'] = site if site else 'All'
print("New site selected:" + site)
print("Controller - New Site selected")
appstatedict['action']='site'
elif unclix != appstatedict['unzoom_clix']:
appstatedict['unzoom_clix'] = unclix
if int(appstatedict['hlevel']) > int(appstatedict['minhlevel']):
appstatedict['hlevel'] = int(appstatedict['hlevel']) - 1
appstatedict['path'] = appstatedict['path'][:-1]
appstatedict['action']='unzoom'
appstatedict['selected_new']=''
appstatedict['selected']=[]
print("Controller - Unzoom:" + str(appstatedict['path']))
#elif len(checks) == 0 or len(checks) > 1:
# appstatedict['action']='none'
# print("Controller - no action")
elif appstatedict['zoom_clix'] != clix:
appstatedict['zoom_clix'] = clix
appstatedict['hlevel'] = int(appstatedict['hlevel']) + 1
# Use checkbox - appstatedict['path'].append(checks[0][checks[0][:-1].rfind('\\') + 1:-1])
#appstatedict['path'].append(appstatedict['selected_new'][appstatedict['selected_new'][:-1].rfind('\\') + 1:-1])
# new version just rebuilds the path string each time in case there segments like in ACT that just provide version info and are not part of the hierarchy
#appstatedict['path']=appstatedict['selected_new'].split('\\')[:-1]
# even newer version only adds one path element for reverse navigation but preserves untraversed segments in the path
appstatedict['path'].append(
appstatedict['selected_new'][appstatedict['selected_new'].find('\\'.join(appstatedict['path']))+len('\\'.join(appstatedict['path']))+1:-1])
appstatedict['action']='zoom'
appstatedict['selected_new'] = ''
appstatedict['selected'] = []
print("Controller - Zoom:" + str(appstatedict['path']))
# Nav buttons were clicked
# Index is the index element of the id
# nclicks is an int of the number of clicks for this button
# values is a list of nclicks for all buttons in the current list
if 'index' in dash.callback_context.triggered[0]['prop_id']:
parsedContext = json.loads(dash.callback_context.triggered[0]['prop_id'][:-9])
index=parsedContext['index']
nclicks = dash.callback_context.triggered[0]['value']
appstatedict['action']='navclick'
appstatedict['selected_new']=index
if index not in appstatedict['selected']:
appstatedict['selected'].append(index)
else:
appstatedict['selected'].remove(index)
return json.dumps(appstatedict)
# This is the callback when someone clicks the zoom button, which moves down the hierarchy
# It also needs to handle the base case of just setting the state of the items.
# THIS VERSION DOES IT WITH BUTTONS!
@app.callback(
Output('navbuttons', 'children'),
# [Input('zoom', 'n_clicks'), Input('unzoom', 'n_clicks')],
[Input('app_state','children')],
[State('items', 'value'), State('navbuttons', 'children')]
)
def cbNavigateButtons(state, checks, options):
global conn
if (state=='app_state'): return options
appstatedict = json.loads(state)
# Update only if we navigated the ontology or if we're in the outlier tab (which has a bunch of thinks)
if appstatedict['action'] in ('zoom','unzoom','') or appstatedict['tab']=='siteoutlier_tab':
#sql_select = "select distinct c_fullname AS value,c_name AS label from totalnums_oldcols t inner join bigfullname b on t.fullname_int=b.fullname_int "
if appstatedict['tab']=='explorer_tab':
sql_select = "select distinct c_visualattributes, c_fullname AS value,c_name AS label from bigfullname " # speedup compared to totalnums_recent_joined
elif appstatedict['tab']=='siteoutlier_tab':
sql_select = "select distinct abs(pct-average)>(%s*stdev) as outlier,c_fullname AS value,c_name AS label, c_visualattributes from outliers_sites_pct t inner join bigfullname b on t.fullname_int=b.fullname_int " % (
str(appstatedict['slider']))
elif appstatedict['tab']=='missing_tab':
sql_select = """select c_fullname, c_tooltip, c_hlevel, c_name from bigfullname fn inner join totalnums_recent r on r.fullname_int=fn.fullname_int
where fn.fullname_int not in (select fullname_int from totalnums_recent where site='{{}}') and c_visualattributes not like 'L%' and c_visualattributes not like '_H%'
order by c_hlevel """
sql_select = """select case when notin is null then 1 else 0 end outlier, c_fullname as value, c_name as label, c_visualattributes from
(select distinct fn.c_fullname, fn.c_tooltip, fn.c_hlevel, fn.c_name, fn.c_visualattributes, notin.fullname_int notin from bigfullname fn inner join totalnums_recent r on r.fullname_int=fn.fullname_int
left outer join (select * from totalnums_recent where site='{{}}') notin on notin.fullname_int=fn.fullname_int
where c_visualattributes not like 'L%' and c_visualattributes not like '_H%'
order by c_hlevel) x """
sql_select = sql_select.replace("{{}}", appstatedict['site'])
else:
sql_select = "select distinct c_fullname AS value,c_name AS label, c_visualattributes from bigfullname " # speedup compared to totalnums_recent_joined
# Compute the items for the checkboxes and return
# Special logic to get all items in ontology if missing tab or all sites are selected
if appstatedict['site']=='All' or appstatedict['tab']=='missing_tab' or appstatedict['tab']=='explorer_tab':
sql_where=" where c_hlevel='%s' and c_fullname like '%s'" % (
str(appstatedict['hlevel']), '\\'.join(appstatedict['path']) + '\\%')
else:
sql_where = " where c_hlevel='%s' and site='%s' and c_fullname like '%s'" % (
str(appstatedict['hlevel']), appstatedict['site'], '\\'.join(appstatedict['path']) + '\\%')
items = pd.read_sql_query(sql_select+sql_where, conn).to_dict('records')
print(sql_select+sql_where)
out = []
for i in items:
out.append(dbc.Button(i['label'],className="mr-1", id={'type':'navbutton','index':i['value']},
style={'font-size':'10pt'},
color=('danger' if 'outlier' in i and i['outlier']==1 else 'dark'),
outline=(True if i['c_visualattributes'] in ('FAE','FA','FA ','CA ','CAE','CA') else False)))
return out
if appstatedict['action']=='navclick':
appstatedict['selected']
return options
# This callback draws the graph whenever checkboxes change or site is changed
@app.callback(
Output('hlevel_graph', 'figure'),
[Input('app_state', 'children')],
[State('navbuttons', 'children'), State('hlevel_graph', 'figure')]
)
def cbLineGraphButtons(state, navbuttons,oldfig):
global conn
if (state=='app_state'): return {}
start = time.time()
appstatedict = json.loads(state)
if appstatedict['action'] in ('navclick','zoom','site') and appstatedict['tab']=='explorer_tab':
# Get just the available data in the df
sql = "select distinct c_fullname,refresh_date,c_name,c from totalnums_oldcols t inner join bigfullname b on t.fullname_int=b.fullname_int where c_hlevel='%s' and site='%s' and c_fullname like '%s' order by refresh_date asc" % (
appstatedict['hlevel'], appstatedict['site'], '\\'.join(appstatedict['path']) + '\\%')
print(sql)
dfsub = pd.read_sql_query(sql, conn)
traces = []
ymax = 0
for n in appstatedict['selected']:
xf = dfsub[dfsub.c_fullname == n]
if len(xf) > 0:
traces.append(
go.Scatter(x=xf['refresh_date'], y=xf['c'], text=xf.iloc[0, :].c_name, name=xf.iloc[0, :].c_name,
marker={'size': 15}, mode='lines+markers'))
ymax=max(ymax,xf.groupby(by='c_fullname').max()['c'].values[0]) # Fix 11-19 - put the legend in the right place
Cstd=xf['c'].std()
Cmean=xf['c'].mean()
Clow = Cmean - 3*Cstd
if Clow<0: Clow=0
#traces.append(go.Scatter(x=[xf['refresh_date'].min(),xf['refresh_date'].max()],y=[Cmean,Cmean],name='mean of '+xf.iloc[0,:].c_name,mode='lines')) # Mean
#traces.append(go.Scatter(x=[xf['refresh_date'].min(), xf['refresh_date'].max()], y=[Cmean+3*Cstd, Cmean+3*Cstd],
# name='high control of ' + xf.iloc[0, :].c_name, mode='lines'))
#traces.append(go.Scatter(x=[xf['refresh_date'].min(), xf['refresh_date'].max()], y=[Clow, Clow],
# name='low control of ' + xf.iloc[0, :].c_name, mode='lines'))
print("Graph time:"+str(time.time()-start)+",traces:"+str(len(traces)))
layout = {'legend':{'x':0,'y':ymax},'showlegend':True}
return {'data': traces, 'layout': layout}
return oldfig if oldfig is not None else {}
# This callback draws the bar graph whenever checkboxes change
@app.callback(
Output('bars_graph', 'figure'),
[Input('app_state', 'children')],
[State('navbuttons', 'children')]
)
def cbBarGraphButtons(state,navbuttons):
global conn
if (state=='app_state'): return {}
start = time.time()
appstatedict = json.loads(state)
if appstatedict['tab'] == 'explorer_tab':
sql = "select distinct c_fullname,site,c_name,max(c) c from totalnums_oldcols t inner join bigfullname b on t.fullname_int=b.fullname_int where site!='All' and c_hlevel='%s' and c_fullname like '%s' group by c_fullname,site,c_name" % (
appstatedict['hlevel'], '\\'.join(appstatedict['path']) + '\\%')
print(sql)
dfsub = pd.read_sql_query(sql, conn)
"""traces = []
ymax = 0
for n in appstatedict['selected']:
xf = dfsub[dfsub.c_fullname == n]
if len(xf) > 0:
fig = px.bar"""
figure = go.Figure(data=go.Bar(y=[5, 5, 1]))
traces=[]
x = [
["BB+", "BB+", "BB+", "BB", "BB", "BB"],
[16, 17, 18, 16, 17, 18, ]
]
fig = go.Figure()
fig.add_bar(x=x, y=[1, 2, 3, 4, 5, 6])
fig.add_bar(x=x, y=[6, 5, 4, 3, 2, 1])
fig.update_layout(barmode="relative")
return figure
return
""""# Get just the available data in the df
sql = "select distinct c_fullname,site,c_name,max(c) c from totalnums_oldcols t inner join bigfullname b on t.fullname_int=b.fullname_int where site!='All' and c_hlevel='%s' and c_fullname like '%s' group by c_fullname,site,c_name" % (
appstatedict['hlevel'], '\\'.join(appstatedict['path']) + '\\%')
print(sql)
dfsub = pd.read_sql_query(sql, conn)
traces = []
ymax=0
for n in appstatedict['selected']:
xf = dfsub[dfsub.c_fullname == n]
if len(xf) > 0:
ymax = max(ymax, xf.groupby(by='c_fullname').max()['c'].values[0])
traces.append(
#go.Bar(x=xf['site'].tolist(), y=xf['c'].tolist(), text=xf.iloc[0, :].c_name, name=xf.iloc[0, :].c_name))
go.Bar(x=xf['site'].tolist(), y=xf['c'].tolist()))
# marker={'size': 15}, mode='lines+markers'))
print("Bar time:"+str(time.time()-start))
#layout = {'legend':{'x':0,'y':ymax},'showlegend':True}
layout = go.Layout(barmode='stack')
if len(traces)>0:
print(traces)
return {'data': traces, 'layout': layout}
#return {'data': traces}"""
# This callback draws the bar graph whenever checkboxes change
@app.callback(
Output('siteoutlier_graph', 'figure'),
#[Input('items_siteoutlier', 'value')],
[Input('app_state', 'children')],
[State('navbuttons', 'children'), State('hlevel_graph', 'figure')]
)
def cbSiteoutlierGraph(state,navbuttons,oldfig):
global conn
if (state=='app_state'): return {}
start = time.time()
appstatedict = json.loads(state)
if (appstatedict['site'] and appstatedict['site']=='All') or appstatedict['tab']!='siteoutlier_tab': return {} # Not support All sites, must choose one for compare
# Get just the available data in the df
sql = "select distinct c_fullname,site,c_name,max(pct) c from totalnums_recent_pct t inner join bigfullname b on t.fullname_int=b.fullname_int where site!='All' and c_hlevel='%s' and c_fullname like '%s' group by c_fullname,site,c_name" % (
appstatedict['hlevel'], '\\'.join(appstatedict['path']) + '\\%')
dfsub = pd.read_sql_query(sql, conn)
print(sql)
# Also get average
sql = "select distinct c_fullname,c_name,average avg, (%s*stdev) stdev from outliers_sites_pct t inner join bigfullname b on t.fullname_int=b.fullname_int where site!='All' and c_hlevel='%s' and c_fullname like '%s' group by c_fullname,site,c_name" % (
str(appstatedict['slider']),appstatedict['hlevel'], '\\'.join(appstatedict['path']) + '\\%')
dfavg = pd.read_sql_query(sql, conn)
print(sql)
traces = []
n=appstatedict['selected_new']
xf = dfsub[dfsub.c_fullname == n]
xavg = dfavg[dfavg.c_fullname == n]
graph_title= 'Nothing selected'
# Add avg
if len(xavg) > 0:
traces.append(
go.Bar(x=['average' for x in xavg['avg']], y=xavg['avg'], error_y=dict(type='data',visible=True,array=xavg['stdev']),
text=xavg.iloc[0, :].c_name, name='avg'))
#traces.append(
# go.Bar(x=['avg + %s * stdev' % str(appstatedict['slider']) for x in xavg['stdev']], y=xavg['stdev'],
# text=xavg.iloc[0, :].c_name, name='%s * stdev' % str(appstatedict['slider'])))
if len(xf) > 0:
xf_site = xf[xf['site']==appstatedict['site']]
xf_notsite=xf[xf['site']!=appstatedict['site']]
# Site color red or green depending on outlier status
print(xf_site['c'].iloc[0])
site_color= 'rgba(204,50,50,1)' if (abs(xavg['avg']-xf_site['c'].iloc[0])-xavg['stdev']).iloc[0]>0 else 'rgba(50,204,50,1)'
graph_title = xf_site.iloc[0, :].c_name
# Site value
traces.append(
go.Bar(x=xf_site['site'], y=xf_site['c'],
marker={'color':site_color},text=xf_site.iloc[0, :].c_name, name=xf_site['site'].iloc[0]))
# Other site values
traces.append(
go.Bar(x=xf_notsite['site'], y=xf_notsite['c'], text=xf_notsite.iloc[0, :].c_name,
marker={'color':'rgb(204,204,204,1)'},name='Other sites'))
# Add selected site avg
#traces.append(
# go.Bar(x=[appstatedict['site']], y=xavg['stdev'], text=xf.iloc[0, :].c_name, name=xf.iloc[0, :].c_name))
print("Bar time:"+str(time.time()-start))
layout = go.Layout(barmode='stack', title=graph_title)
return {'data': traces,'layout':layout}
if __name__=='__main__':
# MSSQL
""" password_mssql = keyring.get_password(service_name='db.totalnums_mssql',username='i2b2') # You need to previously have set it with set_password
db = {'server':'localMSSQL','user':'i2b2','password':password_mssql,'db':'dbo'}
db="DSN=localMSSQL;UID=i2b2;PWD="+password_mssql
initApp(dbtype='MSSQL',db=db)
"""
# SQLite
globalDbFile = "/Users/jeffklann/HMS/Projects/ACT/totalnum_data/reports/totalnums.db"
initApp(
db="/Users/jeffklann/HMS/Projects/ACT/totalnum_data/reports/totalnums.db")
app.run_server(debug=False,threaded=False)
|
the-stack_106_17686
|
# -*- coding: utf-8 -*-
"""Implements Session to control USB Raw devices
Loosely based on PyUSBTMC:python module to handle
USB-TMC(Test and Measurement class) devices. by Noboru Yamamot, Accl. Lab, KEK, JAPAN
This file is an offspring of the Lantz Project.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from .usbtmc import USBRaw as USBRaw
from .usbutil import find_devices, find_interfaces
def find_raw_devices(
vendor=None, product=None, serial_number=None, custom_match=None, **kwargs
):
"""Find connected USB RAW devices. See usbutil.find_devices for more info.
"""
def is_usbraw(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xFF, bInterfaceSubClass=0xFF))
return find_devices(vendor, product, serial_number, is_usbraw, **kwargs)
class USBRawDevice(USBRaw):
RECV_CHUNK = 1024 ** 2
find_devices = staticmethod(find_raw_devices)
def __init__(self, vendor=None, product=None, serial_number=None, **kwargs):
super(USBRawDevice, self).__init__(vendor, product, serial_number, **kwargs)
if not (self.usb_recv_ep and self.usb_send_ep):
raise ValueError(
"USBRAW device must have both Bulk-In and Bulk-out endpoints."
)
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
begin, end, size = 0, 0, len(data)
bytes_sent = 0
raw_write = super(USBRawDevice, self).write
while not end > size:
begin = end
end = begin + self.RECV_CHUNK
bytes_sent += raw_write(data[begin:end])
return bytes_sent
def read(self, size):
"""Read raw bytes from the instrument.
:param size: amount of bytes to be sent to the instrument
:type size: integer
:return: received bytes
:return type: bytes
"""
raw_read = super(USBRawDevice, self).read
received = bytearray()
while not len(received) >= size:
resp = raw_read(self.RECV_CHUNK)
received.extend(resp)
return bytes(received)
|
the-stack_106_17687
|
'''
Function:
游戏结束界面
作者:
Charles
微信公众号:
Charles的皮卡丘
'''
import sys
import pygame
# 游戏结束界面
class EndInterface(pygame.sprite.Sprite):
def __init__(self, WIDTH, HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.imgs = ['./resource/imgs/end/gameover.png']
self.image = pygame.image.load(self.imgs[0]).convert()
self.rect = self.image.get_rect()
self.rect.center = (WIDTH/2, HEIGHT/2)
# just pass
def update(self):
pass
# 继续游戏按钮
class ContinueButton(pygame.sprite.Sprite):
def __init__(self, position=(400, 409)):
pygame.sprite.Sprite.__init__(self)
self.imgs = ['./resource/imgs/end/continue_black.png', './resource/imgs/end/continue_red.png']
self.img_1 = pygame.image.load(self.imgs[0]).convert()
self.img_2 = pygame.image.load(self.imgs[1]).convert()
self.image = self.img_1
self.rect = self.image.get_rect()
self.rect.center = position
def update(self):
mouse_pos = pygame.mouse.get_pos()
if self.rect.collidepoint(mouse_pos):
self.image = self.img_2
else:
self.image = self.img_1
# 游戏结束类
class END():
def __init__(self, WIDTH, HEIGHT):
self.EI = EndInterface(WIDTH, HEIGHT)
self.CB = ContinueButton()
self.components = pygame.sprite.LayeredUpdates(self.EI, self.CB)
# 外部调用
def update(self, screen):
clock = pygame.time.Clock()
background = pygame.Surface(screen.get_size())
count = 0
flag = True
while True:
count += 1
clock.tick(60)
self.components.clear(screen, background)
self.components.update()
if count % 10 == 0:
count = 0
flag = not flag
if flag:
self.components.draw(screen)
else:
screen.blit(self.EI.image, self.EI.rect)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
mouse_pos = pygame.mouse.get_pos()
if self.CB.rect.collidepoint(mouse_pos):
return True
|
the-stack_106_17691
|
import json
import random
import pytest
from indy_common.authorize.auth_constraints import AuthConstraintForbidden
from indy_common.constants import RS_CONTEXT_TYPE_VALUE, JSON_LD_CONTEXT, RS_SCHEMA_TYPE_VALUE, \
RS_MAPPING_TYPE_VALUE, RS_ENCODING_TYPE_VALUE, RS_CRED_DEF_TYPE_VALUE, RICH_SCHEMA, RICH_SCHEMA_ENCODING, \
RICH_SCHEMA_MAPPING, RICH_SCHEMA_CRED_DEF, RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE
from indy_node.test.api.helper import sdk_write_rich_schema_object_and_check, \
sdk_build_rich_schema_request
from indy_node.test.helper import rich_schemas_enabled_scope
from indy_node.test.rich_schema.templates import W3C_BASE_CONTEXT, RICH_SCHEMA_EX1, RICH_SCHEMA_ENCODING_EX1, \
RICH_SCHEMA_MAPPING_EX1, RICH_SCHEMA_CRED_DEF_EX1, RICH_SCHEMA_PRES_DEF_EX1
from plenum.common.constants import TXN_PAYLOAD_METADATA_REQ_ID
from plenum.common.exceptions import RequestRejectedException, RequestNackedException
from plenum.common.types import OPERATION
from plenum.common.util import randomString
from plenum.test.helper import sdk_sign_and_submit_req, sdk_get_and_check_replies
@pytest.fixture(scope="module")
def tconf(tconf):
with rich_schemas_enabled_scope(tconf):
yield tconf
# The order of creation is essential as some rich schema object reference others by ID
# Encoding's id must be equal to the one used in RICH_SCHEMA_MAPPING_EX1
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id',
[(JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, W3C_BASE_CONTEXT, randomString()),
(RICH_SCHEMA, RS_SCHEMA_TYPE_VALUE, RICH_SCHEMA_EX1, RICH_SCHEMA_EX1['@id']),
(RICH_SCHEMA_ENCODING, RS_ENCODING_TYPE_VALUE, RICH_SCHEMA_ENCODING_EX1, "did:sov:1x9F8ZmxuvDqRiqqY29x6dx9oU4qwFTkPbDpWtwGbdUsrCD"),
(RICH_SCHEMA_MAPPING, RS_MAPPING_TYPE_VALUE, RICH_SCHEMA_MAPPING_EX1, RICH_SCHEMA_MAPPING_EX1['@id']),
(RICH_SCHEMA_CRED_DEF, RS_CRED_DEF_TYPE_VALUE, RICH_SCHEMA_CRED_DEF_EX1, randomString()),
(RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE, RICH_SCHEMA_PRES_DEF_EX1, RICH_SCHEMA_PRES_DEF_EX1['@id'])])
def test_send_rich_schema_obj(looper, sdk_pool_handle, sdk_wallet_endorser,
txn_type, rs_type, content, rs_id):
# 1. check that write is successful
request = sdk_build_rich_schema_request(looper, sdk_wallet_endorser,
txn_type, rs_id=rs_id, rs_name=randomString(),
rs_version='1.0', rs_type=rs_type,
rs_content=json.dumps(content))
req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_endorser, request)
rep1 = sdk_get_and_check_replies(looper, [req])
# 2. check that sending the same request gets the same reply
req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_endorser, request)
rep2 = sdk_get_and_check_replies(looper, [req])
assert rep1 == rep2
# 3. check that using a different reqId for the same request gets an error
request2 = json.loads(request)
request2[TXN_PAYLOAD_METADATA_REQ_ID] = random.randint(10, 1000000000)
request2 = json.dumps(request2)
with pytest.raises(RequestRejectedException,
match=str(AuthConstraintForbidden())):
req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_endorser, request2)
sdk_get_and_check_replies(looper, [req])
@pytest.mark.parametrize('txn_type, rs_type, content, rs_id',
[(JSON_LD_CONTEXT, RS_CONTEXT_TYPE_VALUE, W3C_BASE_CONTEXT, randomString()),
(RICH_SCHEMA, RS_SCHEMA_TYPE_VALUE, RICH_SCHEMA_EX1, RICH_SCHEMA_EX1['@id']),
(RICH_SCHEMA_ENCODING, RS_ENCODING_TYPE_VALUE, RICH_SCHEMA_ENCODING_EX1, "did:sov:1x9F8ZmxuvDqRiqqY29x6dx9oU4qwFTkPbDpWtwGbdUsrCD"),
(RICH_SCHEMA_MAPPING, RS_MAPPING_TYPE_VALUE, RICH_SCHEMA_MAPPING_EX1, RICH_SCHEMA_MAPPING_EX1['@id']),
(RICH_SCHEMA_CRED_DEF, RS_CRED_DEF_TYPE_VALUE, RICH_SCHEMA_CRED_DEF_EX1, randomString()),
(RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE, RICH_SCHEMA_PRES_DEF_EX1, RICH_SCHEMA_PRES_DEF_EX1['@id'])])
@pytest.mark.parametrize('missing_field',
["id", "rsName", "rsVersion", "content", "rsType"])
def test_validate_fail_missing_fields(looper, sdk_pool_handle, sdk_wallet_endorser,
txn_type, rs_type, content, rs_id, missing_field):
request = sdk_build_rich_schema_request(looper, sdk_wallet_endorser,
txn_type, rs_id=rs_id, rs_name=randomString(),
rs_version='1.0', rs_type=rs_type,
rs_content=json.dumps(content))
request = json.loads(request)
request[OPERATION].pop(missing_field, None)
request = json.dumps(request)
with pytest.raises(RequestNackedException, match='missed fields'):
req = sdk_sign_and_submit_req(sdk_pool_handle, sdk_wallet_endorser, request)
sdk_get_and_check_replies(looper, [req])
|
the-stack_106_17692
|
# Natural Language Toolkit: Word Sense Disambiguation Algorithms
#
# Authors: Liling Tan <[email protected]>,
# Dmitrijs Milajevs <[email protected]>
#
# Copyright (C) 2001-2018 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from nltk.corpus import wordnet
def lesk(context_sentence, ambiguous_word, pos=None, synsets=None):
"""Return a synset for an ambiguous word in a context.
:param iter context_sentence: The context sentence where the ambiguous word
occurs, passed as an iterable of words.
:param str ambiguous_word: The ambiguous word that requires WSD.
:param str pos: A specified Part-of-Speech (POS).
:param iter synsets: Possible synsets of the ambiguous word.
:return: ``lesk_sense`` The Synset() object with the highest signature overlaps.
This function is an implementation of the original Lesk algorithm (1986) [1].
Usage example::
>>> lesk(['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'], 'bank', 'n')
Synset('savings_bank.n.02')
[1] Lesk, Michael. "Automatic sense disambiguation using machine
readable dictionaries: how to tell a pine cone from an ice cream
cone." Proceedings of the 5th Annual International Conference on
Systems Documentation. ACM, 1986.
http://dl.acm.org/citation.cfm?id=318728
"""
context = set(context_sentence)
if synsets is None:
synsets = wordnet.synsets(ambiguous_word)
if pos:
synsets = [ss for ss in synsets if str(ss.pos()) == pos]
if not synsets:
return None
_, sense = max(
(len(context.intersection(ss.definition().split())), ss) for ss in synsets
)
return sense
|
the-stack_106_17693
|
"""
This file offers the methods to automatically retrieve the graph Candidatus Peregrinibacteria bacterium GW2011_GWA2_38_36.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CandidatusPeregrinibacteriaBacteriumGw2011Gwa23836(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Peregrinibacteria bacterium GW2011_GWA2_38_36 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Peregrinibacteria bacterium GW2011_GWA2_38_36 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusPeregrinibacteriaBacteriumGw2011Gwa23836",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_17695
|
import time
"""
The knapsack problem is a problem in combinatorial optimization: Given a set of items, each with a weight and a value,
determine the number of each item to include in a collection so that the total weight is less than or equal to a given limit
and the total value is as large as possible. It derives its name from the problem faced by someone who is constrained by a
fixed-size knapsack and must fill it with the most valuable items.
"""
def knapsack_without_dynamic(bag_limit, items_value, items_weight, number_items):
"""
This function calculate in recursive mode the value.
Parameters:
int bag_limit
list items_value
list items_weight
int number_items
Returns:
int
"""
if number_items == 0 or bag_limit == 0:
return 0
if items_weight[number_items - 1] > bag_limit:
return knapsack_without_dynamic(bag_limit, items_value, items_weight, number_items - 1)
else:
return max(items_value[number_items - 1] + knapsack_without_dynamic(bag_limit - items_weight[number_items - 1],
items_value, items_weight, number_items - 1), knapsack_without_dynamic(bag_limit, items_value, items_weight, number_items - 1))
def knapsack_with_dynamic(bag_limit, items_value, items_weight, number_items):
"""
This function calculate and store the result into a matrix where we can retrieve the solution at the end
Parameters:
int bag_limit
list items_value
list items_weight
int number_items
Returns:
int
"""
matrix = [[0 for x in range(bag_limit + 1)] for x in range(number_items + 1)]
# Table in bottom up manner
for i in range(number_items + 1):
for w in range(bag_limit + 1):
if i == 0 or w == 0:
matrix[i][w] = 0
elif items_weight[i - 1] <= w:
matrix[i][w] = max(items_value[i - 1] + matrix[i - 1][w - items_weight[i - 1]], matrix[i - 1][w])
else:
matrix[i][w] = matrix[i - 1][w]
return matrix[total_items][bag_limit]
def execution_time(started):
print('Executed in:', round(time.time() - started, 4), ' seconds')
items_value = [50, 100, 150, 200, 250, 300, 350, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300]
items_weight = [8, 16, 32, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170]
total_items = len(items_value)
limit = 1150
# without dynamic programming
start = time.time()
print(knapsack_without_dynamic(limit, items_value, items_weight, total_items))
execution_time(start)
# with dynamic programing
start = time.time()
print(knapsack_with_dynamic(limit, items_value, items_weight, total_items))
execution_time(start)
|
the-stack_106_17696
|
import pickle
from collections import OrderedDict
from distutils.version import LooseVersion
import cloudpickle
import numpy as np
import pytest
import torch
from torch import nn
from pytorch_lightning.metrics.metric import Metric, MetricCollection
torch.manual_seed(42)
class Dummy(Metric):
name = "Dummy"
def __init__(self):
super().__init__()
self.add_state("x", torch.tensor(0.0), dist_reduce_fx=None)
def update(self):
pass
def compute(self):
pass
class DummyList(Metric):
name = "DummyList"
def __init__(self):
super().__init__()
self.add_state("x", list(), dist_reduce_fx=None)
def update(self):
pass
def compute(self):
pass
def test_inherit():
Dummy()
def test_add_state():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum")
assert a._reductions["a"](torch.tensor([1, 1])) == 2
a.add_state("b", torch.tensor(0), "mean")
assert np.allclose(a._reductions["b"](torch.tensor([1.0, 2.0])).numpy(), 1.5)
a.add_state("c", torch.tensor(0), "cat")
assert a._reductions["c"]([torch.tensor([1]), torch.tensor([1])]).shape == (2, )
with pytest.raises(ValueError):
a.add_state("d1", torch.tensor(0), 'xyz')
with pytest.raises(ValueError):
a.add_state("d2", torch.tensor(0), 42)
with pytest.raises(ValueError):
a.add_state("d3", [torch.tensor(0)], 'sum')
with pytest.raises(ValueError):
a.add_state("d4", 42, 'sum')
def custom_fx(x):
return -1
a.add_state("e", torch.tensor(0), custom_fx)
assert a._reductions["e"](torch.tensor([1, 1])) == -1
def test_add_state_persistent():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum", persistent=True)
assert "a" in a.state_dict()
a.add_state("b", torch.tensor(0), "sum", persistent=False)
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
assert "b" not in a.state_dict()
def test_reset():
class A(Dummy):
pass
class B(DummyList):
pass
a = A()
assert a.x == 0
a.x = torch.tensor(5)
a.reset()
assert a.x == 0
b = B()
assert isinstance(b.x, list) and len(b.x) == 0
b.x = torch.tensor(5)
b.reset()
assert isinstance(b.x, list) and len(b.x) == 0
def test_update():
class A(Dummy):
def update(self, x):
self.x += x
a = A()
assert a.x == 0
assert a._computed is None
a.update(1)
assert a._computed is None
assert a.x == 1
a.update(2)
assert a.x == 3
assert a._computed is None
def test_compute():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert 0 == a.compute()
assert 0 == a.x
a.update(1)
assert a._computed is None
assert a.compute() == 1
assert a._computed == 1
a.update(2)
assert a._computed is None
assert a.compute() == 3
assert a._computed == 3
# called without update, should return cached value
a._computed = 5
assert a.compute() == 5
def test_hash():
class A(Dummy):
pass
class B(DummyList):
pass
a1 = A()
a2 = A()
assert hash(a1) != hash(a2)
b1 = B()
b2 = B()
assert hash(b1) == hash(b2)
assert isinstance(b1.x, list) and len(b1.x) == 0
b1.x.append(torch.tensor(5))
assert isinstance(hash(b1), int) # <- check that nothing crashes
assert isinstance(b1.x, list) and len(b1.x) == 1
b2.x.append(torch.tensor(5))
# Sanity:
assert isinstance(b2.x, list) and len(b2.x) == 1
# Now that they have tensor contents, they should have different hashes:
assert hash(b1) != hash(b2)
def test_forward():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert a(5) == 5
assert a._forward_cache == 5
assert a(8) == 8
assert a._forward_cache == 8
assert a.compute() == 13
class DummyMetric1(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
class DummyMetric2(Dummy):
def update(self, y):
self.x -= y
def compute(self):
return self.x
def test_pickle(tmpdir):
# doesn't tests for DDP
a = DummyMetric1()
a.update(1)
metric_pickled = pickle.dumps(a)
metric_loaded = pickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
metric_loaded.update(5)
assert metric_loaded.compute() == 6
metric_pickled = cloudpickle.dumps(a)
metric_loaded = cloudpickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
def test_state_dict(tmpdir):
""" test that metric states can be removed and added to state dict """
metric = Dummy()
assert metric.state_dict() == OrderedDict()
metric.persistent(True)
assert metric.state_dict() == OrderedDict(x=0)
metric.persistent(False)
assert metric.state_dict() == OrderedDict()
def test_child_metric_state_dict():
""" test that child metric states will be added to parent state dict """
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.metric = Dummy()
self.metric.add_state('a', torch.tensor(0), persistent=True)
self.metric.add_state('b', [], persistent=True)
self.metric.register_buffer('c', torch.tensor(0))
module = TestModule()
expected_state_dict = {
'metric.a': torch.tensor(0),
'metric.b': [],
'metric.c': torch.tensor(0),
}
assert module.state_dict() == expected_state_dict
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
def test_device_and_dtype_transfer(tmpdir):
metric = DummyMetric1()
assert metric.x.is_cuda is False
assert metric.x.dtype == torch.float32
metric = metric.to(device='cuda')
assert metric.x.is_cuda
metric = metric.double()
assert metric.x.dtype == torch.float64
metric = metric.half()
assert metric.x.dtype == torch.float16
def test_metric_collection(tmpdir):
m1 = DummyMetric1()
m2 = DummyMetric2()
metric_collection = MetricCollection([m1, m2])
# Test correct dict structure
assert len(metric_collection) == 2
assert metric_collection['DummyMetric1'] == m1
assert metric_collection['DummyMetric2'] == m2
# Test correct initialization
for name, metric in metric_collection.items():
assert metric.x == 0, f'Metric {name} not initialized correctly'
# Test every metric gets updated
metric_collection.update(5)
for name, metric in metric_collection.items():
assert metric.x.abs() == 5, f'Metric {name} not updated correctly'
# Test compute on each metric
metric_collection.update(-5)
metric_vals = metric_collection.compute()
assert len(metric_vals) == 2
for name, metric_val in metric_vals.items():
assert metric_val == 0, f'Metric {name}.compute not called correctly'
# Test that everything is reset
for name, metric in metric_collection.items():
assert metric.x == 0, f'Metric {name} not reset correctly'
# Test pickable
metric_pickled = pickle.dumps(metric_collection)
metric_loaded = pickle.loads(metric_pickled)
assert isinstance(metric_loaded, MetricCollection)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
def test_device_and_dtype_transfer_metriccollection(tmpdir):
m1 = DummyMetric1()
m2 = DummyMetric2()
metric_collection = MetricCollection([m1, m2])
for _, metric in metric_collection.items():
assert metric.x.is_cuda is False
assert metric.x.dtype == torch.float32
metric_collection = metric_collection.to(device='cuda')
for _, metric in metric_collection.items():
assert metric.x.is_cuda
metric_collection = metric_collection.double()
for _, metric in metric_collection.items():
assert metric.x.dtype == torch.float64
metric_collection = metric_collection.half()
for _, metric in metric_collection.items():
assert metric.x.dtype == torch.float16
def test_metric_collection_wrong_input(tmpdir):
""" Check that errors are raised on wrong input """
m1 = DummyMetric1()
# Not all input are metrics (list)
with pytest.raises(ValueError):
_ = MetricCollection([m1, 5])
# Not all input are metrics (dict)
with pytest.raises(ValueError):
_ = MetricCollection({'metric1': m1, 'metric2': 5})
# Same metric passed in multiple times
with pytest.raises(ValueError, match='Encountered two metrics both named *.'):
_ = MetricCollection([m1, m1])
# Not a list or dict passed in
with pytest.raises(ValueError, match='Unknown input to MetricCollection.'):
_ = MetricCollection(m1)
def test_metric_collection_args_kwargs(tmpdir):
""" Check that args and kwargs gets passed correctly in metric collection,
Checks both update and forward method
"""
m1 = DummyMetric1()
m2 = DummyMetric2()
metric_collection = MetricCollection([m1, m2])
# args gets passed to all metrics
metric_collection.update(5)
assert metric_collection['DummyMetric1'].x == 5
assert metric_collection['DummyMetric2'].x == -5
metric_collection.reset()
_ = metric_collection(5)
assert metric_collection['DummyMetric1'].x == 5
assert metric_collection['DummyMetric2'].x == -5
metric_collection.reset()
# kwargs gets only passed to metrics that it matches
metric_collection.update(x=10, y=20)
assert metric_collection['DummyMetric1'].x == 10
assert metric_collection['DummyMetric2'].x == -20
metric_collection.reset()
_ = metric_collection(x=10, y=20)
assert metric_collection['DummyMetric1'].x == 10
assert metric_collection['DummyMetric2'].x == -20
|
the-stack_106_17697
|
#!/usr/bin/env python -w
#
#
import argparse
import os
# method should be forward or reverse
def Scan(pred_out,threshold,penality,method):
print("### Start {} scaning".format(method))
scan_out = []
predict = dict()
coor2pas = dict()
for pas_id,score in pred_out:
chromosome,coor,strand = pas_id.split(':')
coor = int(coor)
predict[coor] = score
coor2pas[coor] = pas_id
if 'forward' in method:
predict[100000000000] = 1
coor2pas[100000000000] = 'chr'
coor_list = list(coor2pas.keys())
coor_list.sort()
maxPos=0 ## position of peak
start=0 ## peak start
end = 0 ## peak end
peak = 0 ## peak coor
elif 'reverse' in method:
predict[-1] = 1
coor2pas[-1] = 'chr'
coor_list = list(coor2pas.keys())
coor_list.sort(reverse=True)
maxPos= coor_list[0] ## position of peak
start= coor_list[0] ## peak start
end = coor_list[0] ## peak end
peak = coor_list[0] ## peak coor
else:
print('### Error: {} has not been defined'.format(method))
sum=0
maxPoint=0 ## score of peak
peak_score = 0 ##peak score
for coor in coor_list:
score = predict[coor]
if(abs(coor-end)>1):
if(maxPoint>threshold and sum>0):
newpas_id = coor2pas[maxPos]
scan_out.append((newpas_id,maxPoint,maxPos,start,end,peak))
start = coor
end = coor
if(score>0.5):
maxPos = coor
maxPoint = score
peak_score = score
sum = score
else:
maxPos = coor
maxPoint = 0
peak_score = 0
sum = 0
elif(score < 0.5):
sum -= penality
if(sum <= 0):
if(maxPoint > threshold):
newpas_id = coor2pas[maxPos]
scan_out.append((newpas_id,maxPoint,maxPos,start,end,peak))
start = coor
sum=0
maxPoint = 0
peak_score = 0
end = coor
else:
sum += score
if(peak_score < score):
peak_score = score
peak = coor
if(maxPoint < sum):
maxPoint = sum
maxPos = coor
if(sum<1):
start = coor
maxPoint = sum
maxPos = coor
end=coor
print("### End {} scaning".format(method))
return scan_out
def args():
### Argument Parser
parser = argparse.ArgumentParser()
parser.add_argument('--pred_out', default=None, help='output list from evaluation')
parser.add_argument('--threshold', default=0,type=int,help='peak length lower than threshold will be fiter out')
parser.add_argument('--penality', default=1,type=int,help='penality for prediction score lower than 0.5')
parser.add_argument('--method', default='forward',type=str,help='method for scanning, forward or reverse')
args = parser.parse_args()
pred_out = args.pred_out
threshold = args.threshold
penality = args.penality
method = args.method
return pred_out,threshold,penality,method
if __name__ == "__main__":
scan_out = Scan(*args())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.