filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_15434
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 01:24:41 2016
@author: yxl
"""
import wx, os
import wx.lib.agw.aui as aui
from .canvas import Canvas
from ..core.manager import ImageManager, WindowsManager
from ..core.manager import ShotcutManager
from .. import IPy, root_dir
import weakref
class CanvasPanel(wx.Panel):
"""CanvasFrame: derived from the wx.core.Frame"""
## TODO: Main frame ???
def __init__(self, parent=None):
wx.Frame.__init__ ( self, parent)
#self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_3DLIGHT ) )
self.SetSizeHints( wx.Size( 560,-1 ), wx.DefaultSize )
WindowsManager.add(self)
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.Colour( 255, 255, 255 ) )
sizer = wx.BoxSizer( wx.VERTICAL )
self.txt_info = wx.StaticText( self, wx.ID_ANY,
'500*500 pixels 173k',
wx.DefaultPosition, wx.DefaultSize, 0 )
self.txt_info.Wrap( -1 )
sizer.Add( self.txt_info, 0, wx.ALL, 0 )
self.canvas = Canvas(self)
self.canvas.set_handler(self.set_info)
self.handle = None
sizer.Add( self.canvas, 1, wx.EXPAND |wx.ALL, 0 )
self.page = wx.ScrollBar( self, wx.ID_ANY,
wx.DefaultPosition, wx.DefaultSize, wx.SB_HORIZONTAL)
self.page.SetScrollbar(0,0,0,0, refresh=True)
sizer.Add( self.page, 0, wx.ALL|wx.EXPAND, 0 )
self.page.Hide()
self.SetSizer(sizer)
self.Layout()
self.Bind(wx.EVT_SCROLL, self.on_scroll)
# panel.Bind(wx.EVT_CHAR, self.OnKeyDown)
self.opage = 0
#self.Fit()
#self.SetAcceleratorTable(IPy.curapp.shortcut)
'''
def SetTitle(self, title):
parent = self.GetParent()
if not IPy.aui: parent.SetTitle(title)
else: parent.SetPageText(parent.GetPageIndex(self), title)
#print(dir(parent)) #parent.DeletePage(parent.GetPageIndex(self))
'''
def set_handler(self, handle=None):
self.handle = handle
def set_info(self, ips, resize=False):
stk = 'stack' if ips.is3d else 'list'
label='{}/{}; {} {}x{} pixels; {}; {} M'.format(ips.cur+1, ips.get_nslices(),
stk if ips.get_nslices()>1 else '',ips.size[0], ips.size[1],
ips.imgtype, round(ips.get_nbytes()/1024.0/1024.0, 2))
if label != self.txt_info.GetLabel(): self.txt_info.SetLabel(label)
if ips.get_nslices() != self.opage:
self.opage = ips.get_nslices()
if ips.get_nslices()==1 and self.page.Shown:
self.page.Hide()
resize = True
if ips.get_nslices()>1 and not self.page.Shown:
self.page.Show()
resize = True
self.page.SetScrollbar(0, 0, ips.get_nslices()-1, 0, refresh=True)
if resize:
if IPy.uimode()!='ipy': self.Fit()
else:
#self.SetSizer(self.GetSizer())
self.Layout()
#self.GetSizer().Layout()
if not self.handle is None: self.handle(ips, resize)
#print('CanvasFrame:set_info')
#self.page.Show()
def set_ips(self, ips):
self.ips = ips
self.canvas.set_ips(ips)
def on_scroll(self, event):
self.ips.cur = self.page.GetThumbPosition()
self.ips.update()
self.canvas.on_idle(None)
def close(self):
parent = self.GetParent()
if IPy.uimode()=='ij':
parent.Close()
if IPy.uimode()=='ipy':
idx = parent.GetPageIndex(self)
parent.DeletePage(idx)
self.set_handler()
self.canvas.set_handler()
WindowsManager.remove(self)
def __del__(self):pass
class CanvasFrame(wx.Frame):
"""CanvasFrame: derived from the wx.core.Frame"""
## TODO: Main frame ???
def __init__(self, parent=None):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY,
title = wx.EmptyString,
pos = wx.DefaultPosition,
size = wx.Size( -1,-1 ),
style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.canvaspanel = CanvasPanel(self)
logopath = os.path.join(root_dir, 'data/logo.ico')
self.SetIcon(wx.Icon(logopath, wx.BITMAP_TYPE_ICO))
self.Bind(wx.EVT_ACTIVATE, self.on_valid)
self.SetAcceleratorTable(IPy.curapp.shortcut)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.canvaspanel.set_handler(self.set_title)
def set_ips(self, ips):
self.canvaspanel.set_ips(ips)
def set_title(self, ips, resized):
title = ips.title + '' if ips.tool==None else ' [%s]'%ips.tool.title
self.SetTitle(ips.title)
if resized: self.Fit()
def on_valid(self, event):
if event.GetActive():
ImageManager.add(self.canvaspanel.ips)
def on_close(self, event):
self.canvaspanel.set_handler()
self.canvaspanel.canvas.set_handler()
WindowsManager.remove(self.canvaspanel)
event.Skip()
class ImgArtProvider(aui.AuiDefaultDockArt):
def __init__(self, img):
aui.AuiDefaultDockArt.__init__(self)
self.bitmap = wx.Bitmap(img, wx.BITMAP_TYPE_PNG)
def DrawBackground(self, dc, window, orient, rect):
aui.AuiDefaultDockArt.DrawBackground(self, dc, window, orient, rect)
memDC = wx.MemoryDC()
memDC.SelectObject(self.bitmap)
w, h = self.bitmap.GetWidth(), self.bitmap.GetHeight()
dc.Blit((rect[2]-w)//2, (rect[3]-h)//2, w, h, memDC, 0, 0, wx.COPY, True)
#dc.DrawBitmap(self.bitmap, 0, 0)
#dc.DrawRectangle(rect)
class CanvasNoteBook(wx.lib.agw.aui.AuiNotebook):
def __init__(self, parent):
wx.lib.agw.aui.AuiNotebook.__init__( self, parent, wx.ID_ANY,
wx.DefaultPosition, wx.DefaultSize, wx.lib.agw.aui.AUI_NB_DEFAULT_STYLE )
self.Bind( wx.lib.agw.aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.on_pagevalid)
self.Bind( wx.lib.agw.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close)
self.SetArtProvider(aui.AuiSimpleTabArt())
def set_background(self, img):
self.GetAuiManager().SetArtProvider(ImgArtProvider(img))
def add_page(self, panel, ips):
self.AddPage(panel, ips.title, True, wx.NullBitmap )
self.Refresh()
panel.set_handler(lambda ips, res, pan=panel: self.set_title(ips, pan))
def set_title(self, ips, panel):
title = ips.title + '' if ips.tool==None else ' [%s]'%ips.tool.title
self.SetPageText(self.GetPageIndex(panel), title)
def on_pagevalid(self, event):
ImageManager.add(event.GetEventObject().GetPage(event.GetSelection()).ips)
def on_close(self, event):
print('page close')
event.GetEventObject().GetPage(event.GetSelection()).set_handler()
event.GetEventObject().GetPage(event.GetSelection()).canvas.set_handler()
WindowsManager.remove(event.GetEventObject().GetPage(event.GetSelection()))
class VirturlCanvas:
instance = []
class Canvas:
def __init__(self, ips):
self.ips = ips
def __del__(self):
print('virturl canvas deleted!')
def __init__(self, ips):
self.ips = ips
self.canvas = VirturlCanvas.Canvas(ips)
VirturlCanvas.instance.append(self)
ImageManager.add(self)
def close(self): VirturlCanvas.instance.remove(self)
if __name__=='__main__':
app = wx.PySimpleApp()
CanvasFrame().Show(True)
app.MainLoop()
|
the-stack_106_15436
|
# coding: utf-8
"""zmq Socket class"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import errno as errno_mod
from ._cffi import lib as C, ffi
nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length)
new_uint64_pointer = lambda: (ffi.new('uint64_t*'), nsp(ffi.sizeof('uint64_t')))
new_int64_pointer = lambda: (ffi.new('int64_t*'), nsp(ffi.sizeof('int64_t')))
new_int_pointer = lambda: (ffi.new('int*'), nsp(ffi.sizeof('int')))
new_binary_data = lambda length: (
ffi.new('char[%d]' % (length)),
nsp(ffi.sizeof('char') * length),
)
value_uint64_pointer = lambda val: (ffi.new('uint64_t*', val), ffi.sizeof('uint64_t'))
value_int64_pointer = lambda val: (ffi.new('int64_t*', val), ffi.sizeof('int64_t'))
value_int_pointer = lambda val: (ffi.new('int*', val), ffi.sizeof('int'))
value_binary_data = lambda val, length: (
ffi.new('char[%d]' % (length + 1), val),
ffi.sizeof('char') * length,
)
ZMQ_FD_64BIT = ffi.sizeof('ZMQ_FD_T') == 8
IPC_PATH_MAX_LEN = C.get_ipc_path_max_len()
from .message import Frame
from .constants import RCVMORE
from .utils import _retry_sys_call
import zmq
from zmq.error import ZMQError, _check_rc, _check_version
from zmq.utils.strtypes import unicode
def new_pointer_from_opt(option, length=0):
from zmq.sugar.constants import (
int64_sockopts,
bytes_sockopts,
fd_sockopts,
)
if option in int64_sockopts or (ZMQ_FD_64BIT and option in fd_sockopts):
return new_int64_pointer()
elif option in bytes_sockopts:
return new_binary_data(length)
else:
# default
return new_int_pointer()
def value_from_opt_pointer(option, opt_pointer, length=0):
from zmq.sugar.constants import (
int64_sockopts,
bytes_sockopts,
fd_sockopts,
)
if option in int64_sockopts or (ZMQ_FD_64BIT and option in fd_sockopts):
return int(opt_pointer[0])
elif option in bytes_sockopts:
return ffi.buffer(opt_pointer, length)[:]
else:
return int(opt_pointer[0])
def initialize_opt_pointer(option, value, length=0):
from zmq.sugar.constants import (
int64_sockopts,
bytes_sockopts,
fd_sockopts,
)
if option in int64_sockopts or (ZMQ_FD_64BIT and option in fd_sockopts):
return value_int64_pointer(value)
elif option in bytes_sockopts:
return value_binary_data(value, length)
else:
return value_int_pointer(value)
class Socket(object):
context = None
socket_type = None
_zmq_socket = None
_closed = None
_ref = None
_shadow = False
copy_threshold = 0
def __init__(self, context=None, socket_type=None, shadow=None):
self.context = context
if shadow is not None:
if isinstance(shadow, Socket):
shadow = shadow.underlying
self._zmq_socket = ffi.cast("void *", shadow)
self._shadow = True
else:
self._shadow = False
self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type)
if self._zmq_socket == ffi.NULL:
raise ZMQError()
self._closed = False
@property
def underlying(self):
"""The address of the underlying libzmq socket"""
return int(ffi.cast('size_t', self._zmq_socket))
def _check_closed_deep(self):
"""thorough check of whether the socket has been closed,
even if by another entity (e.g. ctx.destroy).
Only used by the `closed` property.
returns True if closed, False otherwise
"""
if self._closed:
return True
try:
self.get(zmq.TYPE)
except ZMQError as e:
if e.errno == zmq.ENOTSOCK:
self._closed = True
return True
else:
raise
return False
@property
def closed(self):
return self._check_closed_deep()
def close(self, linger=None):
rc = 0
if not self._closed and hasattr(self, '_zmq_socket'):
if self._zmq_socket is not None:
if linger is not None:
self.set(zmq.LINGER, linger)
rc = C.zmq_close(self._zmq_socket)
self._closed = True
if rc < 0:
_check_rc(rc)
def bind(self, address):
if isinstance(address, unicode):
address_b = address.encode('utf8')
else:
address_b = address
if isinstance(address, bytes):
address = address_b.decode('utf8')
rc = C.zmq_bind(self._zmq_socket, address_b)
if rc < 0:
if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG:
path = address.split('://', 1)[-1]
msg = (
'ipc path "{0}" is longer than {1} '
'characters (sizeof(sockaddr_un.sun_path)).'.format(
path, IPC_PATH_MAX_LEN
)
)
raise ZMQError(C.zmq_errno(), msg=msg)
elif C.zmq_errno() == errno_mod.ENOENT:
path = address.split('://', 1)[-1]
msg = 'No such file or directory for ipc path "{0}".'.format(path)
raise ZMQError(C.zmq_errno(), msg=msg)
else:
_check_rc(rc)
def unbind(self, address):
_check_version((3, 2), "unbind")
if isinstance(address, unicode):
address = address.encode('utf8')
rc = C.zmq_unbind(self._zmq_socket, address)
_check_rc(rc)
def connect(self, address):
if isinstance(address, unicode):
address = address.encode('utf8')
rc = C.zmq_connect(self._zmq_socket, address)
_check_rc(rc)
def disconnect(self, address):
_check_version((3, 2), "disconnect")
if isinstance(address, unicode):
address = address.encode('utf8')
rc = C.zmq_disconnect(self._zmq_socket, address)
_check_rc(rc)
def set(self, option, value):
length = None
if isinstance(value, unicode):
raise TypeError("unicode not allowed, use bytes")
if isinstance(value, bytes):
if option not in zmq.constants.bytes_sockopts:
raise TypeError("not a bytes sockopt: %s" % option)
length = len(value)
c_data = initialize_opt_pointer(option, value, length)
c_value_pointer = c_data[0]
c_sizet = c_data[1]
_retry_sys_call(
C.zmq_setsockopt,
self._zmq_socket,
option,
ffi.cast('void*', c_value_pointer),
c_sizet,
)
def get(self, option):
c_data = new_pointer_from_opt(option, length=255)
c_value_pointer = c_data[0]
c_sizet_pointer = c_data[1]
_retry_sys_call(
C.zmq_getsockopt, self._zmq_socket, option, c_value_pointer, c_sizet_pointer
)
sz = c_sizet_pointer[0]
v = value_from_opt_pointer(option, c_value_pointer, sz)
if (
option != zmq.IDENTITY
and option in zmq.constants.bytes_sockopts
and v.endswith(b'\0')
):
v = v[:-1]
return v
def _send_copy(self, buf, flags):
"""Send a copy of a bufferable"""
zmq_msg = ffi.new('zmq_msg_t*')
if not isinstance(buf, bytes):
# cast any bufferable data to bytes via memoryview
buf = memoryview(buf).tobytes()
c_message = ffi.new('char[]', buf)
rc = C.zmq_msg_init_size(zmq_msg, len(buf))
_check_rc(rc)
C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(buf))
_retry_sys_call(C.zmq_msg_send, zmq_msg, self._zmq_socket, flags)
rc2 = C.zmq_msg_close(zmq_msg)
_check_rc(rc2)
def _send_frame(self, frame, flags):
"""Send a Frame on this socket in a non-copy manner."""
# Always copy the Frame so the original message isn't garbage collected.
# This doesn't do a real copy, just a reference.
frame_copy = frame.fast_copy()
zmq_msg = frame_copy.zmq_msg
_retry_sys_call(C.zmq_msg_send, zmq_msg, self._zmq_socket, flags)
tracker = frame_copy.tracker
frame_copy.close()
return tracker
def send(self, data, flags=0, copy=False, track=False):
if isinstance(data, unicode):
raise TypeError("Message must be in bytes, not a unicode object")
if copy and not isinstance(data, Frame):
return self._send_copy(data, flags)
else:
close_frame = False
if isinstance(data, Frame):
if track and not data.tracker:
raise ValueError('Not a tracked message')
frame = data
else:
if self.copy_threshold:
buf = memoryview(data)
# always copy messages smaller than copy_threshold
if buf.nbytes < self.copy_threshold:
self._send_copy(buf, flags)
return zmq._FINISHED_TRACKER
frame = Frame(data, track=track, copy_threshold=self.copy_threshold)
close_frame = True
tracker = self._send_frame(frame, flags)
if close_frame:
frame.close()
return tracker
def recv(self, flags=0, copy=True, track=False):
if copy:
zmq_msg = ffi.new('zmq_msg_t*')
C.zmq_msg_init(zmq_msg)
else:
frame = zmq.Frame(track=track)
zmq_msg = frame.zmq_msg
try:
_retry_sys_call(C.zmq_msg_recv, zmq_msg, self._zmq_socket, flags)
except Exception:
if copy:
C.zmq_msg_close(zmq_msg)
raise
if not copy:
return frame
_buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg))
_bytes = _buffer[:]
rc = C.zmq_msg_close(zmq_msg)
_check_rc(rc)
return _bytes
def monitor(self, addr, events=-1):
"""s.monitor(addr, flags)
Start publishing socket events on inproc.
See libzmq docs for zmq_monitor for details.
Note: requires libzmq >= 3.2
Parameters
----------
addr : str
The inproc url used for monitoring. Passing None as
the addr will cause an existing socket monitor to be
deregistered.
events : int [default: zmq.EVENT_ALL]
The zmq event bitmask for which events will be sent to the monitor.
"""
_check_version((3, 2), "monitor")
if events < 0:
events = zmq.EVENT_ALL
if addr is None:
addr = ffi.NULL
if isinstance(addr, unicode):
addr = addr.encode('utf8')
rc = C.zmq_socket_monitor(self._zmq_socket, addr, events)
__all__ = ['Socket', 'IPC_PATH_MAX_LEN']
|
the-stack_106_15437
|
"""
HDF5 database module.
Store the traces in an HDF5 array using pytables.
Implementation Notes
--------------------
This version supports arbitrary objects through ObjectAtom and VLArray
constructs. Ordinary numerical objects are stored in a Table. Each chain
is stored in an individual group called ``chain#``.
Additional Dependencies
-----------------------
* HDF5 version 1.6.5, required by pytables.
* pytables version 2 and up. <http://sourceforge.net/projects/pytables/>
"""
import numpy as np
from numpy import zeros, shape, asarray, hstack, size, dtype
import pymc
from pymc.database import base, pickle
from copy import copy
import tables
import os
import warnings
import sys
import traceback
import warnings
from pymc import six
__all__ = ['Trace', 'Database', 'load']
class TraceObject(base.Trace):
"""HDF5 Trace for Objects."""
def __init__(self, name, getfunc=None, db=None, vlarrays=None):
"""Create a Trace instance.
:Parameters:
obj : pymc object
A Stochastic or Determistic object.
name : string
The trace object name. This is used only if no `obj` is given.
db : Database instance
The database owning this Trace.
vlarrays : sequence
The nodes storing the data for this object.
"""
base.Trace.__init__(self, name, getfunc=getfunc, db=db)
if vlarrays is None:
vlarrays = []
self._vlarrays = vlarrays # This should be a dict keyed by chain.
def tally(self, chain):
"""Adds current value to trace"""
# try:
self._vlarrays[chain].append(self._getfunc())
# except:
# print self._vlarrays, chain
# raise AttributeError
def __getitem__(self, index):
"""Mimic NumPy indexing for arrays."""
chain = self._chain
if chain is not None:
vlarrays = [self._vlarrays[chain]]
else:
vlarrays = self._vlarrays
for i, vlarray in enumerate(vlarrays):
if i == 0:
out = np.asarray(vlarray[index])
else:
out = np.hstack((out, vlarray[index]))
return out
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace (last by default).
:Parameters:
burn : integer
The number of transient steps to skip.
thin : integer
Keep one in thin.
chain : integer
The index of the chain to fetch. If None, return all chains. The
default is to return the last chain.
slicing : slice object
A slice overriding burn and thin assignement.
"""
if chain is not None:
vlarrays = [self._vlarrays[chain]]
else:
vlarrays = self._vlarrays
for i, vlarray in enumerate(vlarrays):
if slicing is not None:
burn, stop, thin = slicing.start, slicing.stop, slicing.step
if slicing is None or stop is None:
stop = len(vlarray)
col = vlarray[burn:stop:thin]
if i == 0:
data = np.asarray(col)
else:
data = hstack((data, col))
return data
__call__ = gettrace
def length(self, chain=-1):
"""Return the length of the trace.
:Parameters:
chain : int or None
The chain index. If None, returns the combined length of all chains.
"""
if chain is not None:
return len(self._vlarrays[chain])
else:
return sum(map(len, self._vlarrays))
class Trace(base.Trace):
"""HDF5 trace
Database backend based on the HDF5 format.
"""
def tally(self, chain):
"""Adds current value to trace"""
self.db._rows[chain][self.name] = self._getfunc()
def __getitem__(self, index):
"""Mimic NumPy indexing for arrays."""
chain = self._chain
if chain is not None:
tables = [self.db._gettables()[chain], ]
else:
tables = self.db._gettables()
out = np.asarray(tables[0].col(self.name))
for table in tables[1:]:
out = np.append(out, table.col(self.name), axis=0)
return out[index]
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace (last by default).
:Parameters:
burn : integer
The number of transient steps to skip.
thin : integer
Keep one in thin.
chain : integer
The index of the chain to fetch. If None, return all chains. The
default is to return the last chain.
slicing : slice object
A slice overriding burn and thin assignement.
"""
if chain is not None:
tables = [self.db._gettables()[chain], ]
else:
tables = self.db._gettables()
for i, table in enumerate(tables):
if slicing is not None:
burn, stop, thin = slicing.start, slicing.stop, slicing.step
if slicing is None or stop is None:
stop = table.nrows
col = table.read(start=burn, stop=stop, step=thin, field=self.name)
if i == 0:
data = np.asarray(col)
else:
data = np.append(data, col, axis=0)
return data
__call__ = gettrace
def hdf5_col(self, chain=-1):
"""Return a pytables column object.
:Parameters:
chain : integer
The index of the chain.
.. note::
This method is specific to the ``hdf5`` backend.
"""
return self.db._tables[chain].colinstances[self.name]
def length(self, chain=-1):
"""Return the length of the trace.
:Parameters:
chain : int or None
The chain index. If None, returns the combined length of all chains.
"""
if chain is not None:
tables = [self.db._gettables()[chain], ]
else:
tables = self.db._gettables()
n = asarray([table.nrows for table in tables])
return n.sum()
class Database(pickle.Database):
"""HDF5 database
Create an HDF5 file <model>.h5. Each chain is stored in a group, and the
stochastics and deterministics are stored as arrays in each group.
"""
def __init__(self, dbname, dbmode='a',
dbcomplevel=0, dbcomplib='zlib', **kwds):
"""Create an HDF5 database instance, where samples are stored in tables.
:Parameters:
dbname : string
Name of the hdf5 file.
dbmode : {'a', 'w', 'r'}
File mode: 'a': append, 'w': overwrite, 'r': read-only.
dbcomplevel : integer (0-9)
Compression level, 0: no compression.
dbcomplib : string
Compression library (zlib, bzip2, lzo)
:Notes:
* zlib has a good compression ratio, although somewhat slow, and
reasonably fast decompression.
* LZO is a fast compression library offering however a low compression
ratio.
* bzip2 has an excellent compression ratio but requires more CPU.
"""
self.__name__ = 'hdf5'
self.dbname = dbname
self.__Trace__ = Trace
self.mode = dbmode
self.trace_names = []
# A list of sequences of names of the objects to tally.
self._traces = {} # A dictionary of the Trace objects.
# Deprecation of complevel and complib
# Remove in V2.1
if 'complevel' in kwds:
warnings.warn(
'complevel has been replaced with dbcomplevel.',
DeprecationWarning)
dbcomplevel = kwds.get('complevel')
if 'complib' in kwds:
warnings.warn(
'complib has been replaced with dbcomplib.',
DeprecationWarning)
dbcomplib = kwds.get('complib')
db_exists = os.path.exists(self.dbname)
self._h5file = tables.openFile(self.dbname, self.mode)
default_filter = tables.Filters(
complevel=dbcomplevel,
complib=dbcomplib)
if self.mode == 'r' or (self.mode == 'a' and db_exists):
self.filter = getattr(self._h5file, 'filters', default_filter)
else:
self.filter = default_filter
self._tables = self._gettables(
) # This should be a dict keyed by chain.
self._rows = len(
self._tables) * [None,
] # This should be a dict keyed by chain.
self._chains = [
gr for gr in self._h5file.listNodes(
"/") if gr._v_name[
:5] == 'chain'] # This should be a dict keyed by chain.
self.chains = len(self._chains)
# LOAD LOGIC
if self.chains > 0:
# Create traces from objects stored in Table.
db = self
for k in db._tables[-1].colnames:
db._traces[k] = Trace(name=k, db=db)
setattr(db, k, db._traces[k])
# Walk nodes proceed from top to bottom, so we need to invert
# the list to have the chains in chronological order.
objects = {}
for chain in self._chains:
for node in db._h5file.walkNodes(chain, classname='VLArray'):
if node._v_name != '_state_':
try:
objects[node._v_name].append(node)
except:
objects[node._v_name] = [node, ]
# Note that the list vlarrays is in reverse order.
for k, vlarrays in six.iteritems(objects):
db._traces[k] = TraceObject(name=k, db=db, vlarrays=vlarrays)
setattr(db, k, db._traces[k])
# Restore table attributes.
# This restores the sampler's state for the last chain.
table = db._tables[-1]
for k in table.attrs._v_attrnamesuser:
setattr(db, k, getattr(table.attrs, k))
# Restore group attributes.
for k in db._chains[-1]._f_listNodes():
if k.__class__ not in [tables.Table, tables.Group]:
setattr(db, k.name, k)
varnames = db._tables[-1].colnames + objects.keys()
db.trace_names = db.chains * [varnames, ]
def connect_model(self, model):
"""Link the Database to the Model instance.
In case a new database is created from scratch, ``connect_model``
creates Trace objects for all tallyable pymc objects defined in
`model`.
If the database is being loaded from an existing file, ``connect_model``
restore the objects trace to their stored value.
:Parameters:
model : pymc.Model instance
An instance holding the pymc objects defining a statistical
model (stochastics, deterministics, data, ...)
"""
# Changed this to allow non-Model models. -AP
if isinstance(model, pymc.Model):
self.model = model
else:
raise AttributeError('Not a Model instance.')
# Restore the state of the Model from an existing Database.
# The `load` method will have already created the Trace objects.
if hasattr(self, '_state_'):
names = set()
for morenames in self.trace_names:
names.update(morenames)
for name, fun in six.iteritems(model._funs_to_tally):
if name in self._traces:
self._traces[name]._getfunc = fun
names.remove(name)
if len(names) > 0:
raise RuntimeError(
"Some objects from the database have not been assigned a getfunc: %s" %
', '.join(names))
# Create a fresh new state. This is now taken care of in initialize.
else:
for name, fun in six.iteritems(model._funs_to_tally):
if np.array(fun()).dtype is np.dtype('object'):
self._traces[
name] = TraceObject(
name,
getfunc=fun,
db=self)
else:
self._traces[name] = Trace(name, getfunc=fun, db=self)
def nchains(self):
"""Return the number of existing chains."""
return len(self._h5file.listNodes('/'))
def _initialize(self, funs_to_tally, length):
"""
Create a group named ``Chain#`` to store all data for this chain.
The group contains one pyTables Table, and at least one subgroup
called ``group#``. This subgroup holds ObjectAtoms, which can hold
pymc objects whose value is not a numerical array.
There is too much stuff in here. ObjectAtoms should get initialized
"""
i = self.chains
self._chains.append(
self._h5file.createGroup(
"/",
'chain%d' %
i,
'Chain #%d' %
i))
current_object_group = self._h5file.createGroup(
self._chains[-1],
'group0',
'Group storing objects.')
group_counter = 0
object_counter = 0
# Create the Table in the chain# group, and ObjectAtoms in
# chain#/group#.
table_descr = {}
for name, fun in six.iteritems(funs_to_tally):
arr = asarray(fun())
if arr.dtype is np.dtype('object'):
self._traces[name]._vlarrays.append(self._h5file.createVLArray(
current_object_group,
name,
tables.ObjectAtom(),
title=name + ' samples.',
filters=self.filter))
object_counter += 1
if object_counter % 4096 == 0:
group_counter += 1
current_object_group = self._h5file.createGroup(
self._chains[-1],
'group%d' % group_counter, 'Group storing objects.')
else:
table_descr[name] = tables.Col.from_dtype(
dtype((arr.dtype, arr.shape)))
table = self._h5file.createTable(self._chains[-1],
'PyMCsamples',
table_descr,
title='PyMC samples',
filters=self.filter,
expectedrows=length)
self._tables.append(table)
self._rows.append(self._tables[-1].row)
# Store data objects
for object in self.model.observed_stochastics:
if object.keep_trace is True:
setattr(table.attrs, object.__name__, object.value)
# Make sure the variables have a corresponding Trace instance.
for name, fun in six.iteritems(funs_to_tally):
if name not in self._traces:
if np.array(fun()).dtype is np.dtype('object'):
self._traces[
name] = TraceObject(
name,
getfunc=fun,
db=self)
else:
self._traces[name] = Trace(name, getfunc=fun, db=self)
self._traces[name]._initialize(self.chains, length)
self.trace_names.append(funs_to_tally.keys())
self.chains += 1
def tally(self, chain=-1):
chain = range(self.chains)[chain]
for name in self.trace_names[chain]:
try:
self._traces[name].tally(chain)
except:
cls, inst, tb = sys.exc_info()
warnings.warn("""
Error tallying %s, will not try to tally it again this chain.
Did you make all the samevariables and step methods tallyable
as were tallyable last time you used the database file?
Error:
%s""" % (name, ''.join(traceback.format_exception(cls, inst, tb))))
self.trace_names[chain].remove(name)
self._rows[chain].append()
self._tables[chain].flush()
self._rows[chain] = self._tables[chain].row
def _finalize(self, chain=-1):
"""Close file."""
# add attributes. Computation time.
# self._tables[chain].flush()
self._h5file.flush()
def savestate(self, state, chain=-1):
"""Store a dictionnary containing the state of the Model and its
StepMethods."""
cur_chain = self._chains[chain]
if hasattr(cur_chain, '_state_'):
cur_chain._state_[0] = state
else:
s = self._h5file.createVLArray(
cur_chain,
'_state_',
tables.ObjectAtom(),
title='The saved state of the sampler',
filters=self.filter)
s.append(state)
self._h5file.flush()
def getstate(self, chain=-1):
if len(self._chains) == 0:
return {}
elif hasattr(self._chains[chain], '_state_'):
if len(self._chains[chain]._state_) > 0:
return self._chains[chain]._state_[0]
else:
return {}
else:
return {}
def _model_trace_description(self):
"""Return a description of the table and the ObjectAtoms to be created.
:Returns:
table_description : dict
A Description of the pyTables table.
ObjectAtomsn : dict
A
in terms of PyTables
columns, and a"""
D = {}
for name, fun in six.iteritems(self.model._funs_to_tally):
arr = asarray(fun())
D[name] = tables.Col.from_dtype(dtype((arr.dtype, arr.shape)))
return D, {}
def _file_trace_description(self):
"""Return a description of the last trace stored in the database."""
table = self._gettables()[-1][0]
return table.description
def _check_compatibility(self):
"""Make sure the next objects to be tallied are compatible with the
stored trace."""
stored_descr = self._file_trace_description()
try:
for k, v in self._model_trace_description():
assert(stored_descr[k][0] == v[0])
except:
raise ValueError(
"The objects to tally are incompatible with the objects stored in the file.")
def _gettables(self):
"""Return a list of hdf5 tables name PyMCsamples.
"""
groups = self._h5file.listNodes("/")
if len(groups) == 0:
return []
else:
return [gr.PyMCsamples for gr in groups if gr._v_name[:5] == 'chain']
def close(self):
self._h5file.close()
def add_attr(self, name, object, description='', chain=-1, array=False):
"""Add an attribute to the chain.
description may not be supported for every date type.
if array is true, create an Array object.
"""
if not np.isscalar(chain):
raise TypeError("chain must be a scalar integer.")
table = self._tables[chain]
if array is False:
table.setAttr(name, object)
obj = getattr(table.attrs, name)
else:
# Create an array in the group
if description == '':
description = name
group = table._g_getparent()
self._h5file.createArray(group, name, object, description)
obj = getattr(group, name)
setattr(self, name, obj)
def load(dbname, dbmode='a'):
"""Load an existing hdf5 database.
Return a Database instance.
:Parameters:
filename : string
Name of the hdf5 database to open.
mode : 'a', 'r'
File mode : 'a': append, 'r': read-only.
"""
if dbmode == 'w':
raise AttributeError("dbmode='w' not allowed for load.")
db = Database(dbname, dbmode=dbmode)
return db
# TODO: Check this. It seems that pickle is pymc.database.pickle, not the
# pickle module.
def save_sampler(sampler):
"""
Dumps a sampler into its hdf5 database.
"""
db = sampler.db
fnode = tables.filenode.newnode(db._h5file, where='/', name='__sampler__')
import pickle
pickle.dump(sampler, fnode)
def restore_sampler(fname):
"""
Creates a new sampler from an hdf5 database.
"""
hf = tables.openFile(fname)
fnode = hf.root.__sampler__
import pickle
sampler = pickle.load(fnode)
return sampler
|
the-stack_106_15441
|
# -*- coding: utf-8 -*-
"""
:codeauthor: Pedro Algarvio ([email protected])
tests.integration.shell.minion
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Import python libs
from __future__ import absolute_import
import getpass
import logging
import os
import platform
import sys
# Import salt libs
import salt.utils.files
import salt.utils.platform
import salt.utils.yaml
# Import Salt Testing libs
import tests.integration.utils
# Import 3rd-party libs
from salt.ext import six
from tests.integration.utils import testprogram
from tests.support.case import ShellCase
from tests.support.mixins import ShellCaseCommonTestsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
DEBUG = True
class MinionTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin):
"""
Various integration tests for the salt-minion executable.
"""
_call_binary_ = "salt-minion"
_test_minions = (
"minion",
"subminion",
)
def _run_initscript(
self, init_script, minions, minion_running, action, exitstatus=None, message=""
):
"""
Wrapper that runs the initscript for the configured minions and
verifies the results.
"""
user = getpass.getuser()
ret = init_script.run(
[action],
catch_stderr=True,
with_retcode=True,
env={
"SALTMINION_CONFIGS": "\n".join(
[
"{0} {1}".format(user, minion.abs_path(minion.config_dir))
for minion in minions
]
),
},
timeout=90,
)
for line in ret[0]:
log.debug("script: salt-minion: stdout: {0}".format(line))
for line in ret[1]:
log.debug("script: salt-minion: stderr: {0}".format(line))
log.debug("exit status: {0}".format(ret[2]))
if six.PY3:
std_out = b"\nSTDOUT:".join(ret[0])
std_err = b"\nSTDERR:".join(ret[1])
else:
std_out = "\nSTDOUT:".join(ret[0])
std_err = "\nSTDERR:".join(ret[1])
# Check minion state
for minion in minions:
self.assertEqual(
minion.is_running(),
minion_running,
'script action "{0}" should result in minion "{1}" {2} and is not.\nSTDOUT:{3}\nSTDERR:{4}'.format(
action,
minion.name,
["stopped", "running"][minion_running],
std_out,
std_err,
),
)
if exitstatus is not None:
self.assertEqual(
ret[2],
exitstatus,
'script action "{0}" {1} exited {2}, must be {3}\nSTDOUT:{4}\nSTDERR:{5}'.format(
action, message, ret[2], exitstatus, std_out, std_err,
),
)
return ret
def _initscript_setup(self, minions):
"""Re-usable setup for running salt-minion tests"""
_minions = []
for mname in minions:
pid_file = "salt-{0}.pid".format(mname)
minion = testprogram.TestDaemonSaltMinion(
name=mname,
root_dir="init_script",
config_dir=os.path.join("etc", mname),
parent_dir=self._test_dir,
pid_file=pid_file,
configs={
"minion": {
"map": {
"pidfile": os.path.join("var", "run", pid_file),
"sock_dir": os.path.join("var", "run", "salt", mname),
"log_file": os.path.join("var", "log", "salt", mname),
},
},
},
)
# Call setup here to ensure config and script exist
minion.setup()
_minions.append(minion)
# Need salt-call, salt-minion for wrapper script
salt_call = testprogram.TestProgramSaltCall(
root_dir="init_script", parent_dir=self._test_dir
)
# Ensure that run-time files are generated
salt_call.setup()
sysconf_dir = os.path.dirname(_minions[0].abs_path(_minions[0].config_dir))
cmd_env = {
"PATH": ":".join(
[salt_call.abs_path(salt_call.script_dir), os.getenv("PATH")]
),
"SALTMINION_DEBUG": "1" if DEBUG else "",
"SALTMINION_PYTHON": sys.executable,
"SALTMINION_SYSCONFDIR": sysconf_dir,
"SALTMINION_BINDIR": _minions[0].abs_path(_minions[0].script_dir),
}
default_dir = os.path.join(sysconf_dir, "default")
if not os.path.exists(default_dir):
os.makedirs(default_dir)
with salt.utils.files.fopen(os.path.join(default_dir, "salt"), "w") as defaults:
# Test suites is quite slow - extend the timeout
defaults.write("TIMEOUT=60\n" "TICK=1\n")
init_script = testprogram.TestProgram(
name="init:salt-minion",
program=os.path.join(RUNTIME_VARS.CODE_DIR, "pkg", "rpm", "salt-minion"),
env=cmd_env,
)
return _minions, salt_call, init_script
@skipIf(True, "Disabled. Test suite hanging")
def test_linux_initscript(self):
"""
Various tests of the init script to verify that it properly controls a salt minion.
"""
pform = platform.uname()[0].lower()
if pform not in ("linux",):
self.skipTest(
"salt-minion init script is unavailable on {1}".format(platform)
)
minions, _, init_script = self._initscript_setup(self._test_minions)
try:
# These tests are grouped together, rather than split into individual test functions,
# because subsequent tests leverage the state from the previous test which minimizes
# setup for each test.
# I take visual readability with aligned columns over strict PEP8
# (bad-whitespace) Exactly one space required after comma
# pylint: disable=C0326
ret = self._run_initscript(
init_script, minions[:1], False, "bogusaction", 2
)
ret = self._run_initscript(
init_script, minions[:1], False, "reload", 3
) # Not implemented
ret = self._run_initscript(
init_script, minions[:1], False, "stop", 0, "when not running"
)
ret = self._run_initscript(
init_script, minions[:1], False, "status", 3, "when not running"
)
ret = self._run_initscript(
init_script, minions[:1], False, "condrestart", 7, "when not running"
)
ret = self._run_initscript(
init_script, minions[:1], False, "try-restart", 7, "when not running"
)
ret = self._run_initscript(
init_script, minions, True, "start", 0, "when not running"
)
ret = self._run_initscript(
init_script, minions, True, "status", 0, "when running"
)
# Verify that PIDs match
mpids = {}
for line in ret[0]:
segs = line.decode(__salt_system_encoding__).split()
minfo = segs[0].split(":")
mpids[minfo[-1]] = int(segs[-1]) if segs[-1].isdigit() else None
for minion in minions:
self.assertEqual(
minion.daemon_pid,
mpids[minion.name],
'PID in "{0}" is {1} and does not match status PID {2}'.format(
minion.abs_path(minion.pid_path),
minion.daemon_pid,
mpids[minion.name],
),
)
ret = self._run_initscript(
init_script, minions, True, "start", 0, "when running"
)
ret = self._run_initscript(
init_script, minions, True, "condrestart", 0, "when running"
)
ret = self._run_initscript(
init_script, minions, True, "try-restart", 0, "when running"
)
ret = self._run_initscript(
init_script, minions, False, "stop", 0, "when running"
)
finally:
# Ensure that minions are shutdown
for minion in minions:
minion.shutdown()
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
def test_exit_status_unknown_user(self):
"""
Ensure correct exit status when the minion is configured to run as an unknown user.
Skipped on windows because daemonization not supported
"""
minion = testprogram.TestDaemonSaltMinion(
name="unknown_user",
configs={"minion": {"map": {"user": "some_unknown_user_xyz"}}},
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
minion.setup()
stdout, stderr, status = minion.run(
args=["-d"], catch_stderr=True, with_retcode=True,
)
try:
self.assert_exit_status(
status,
"EX_NOUSER",
message="unknown user not on system",
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr),
)
finally:
# Although the start-up should fail, call shutdown() to set the
# internal _shutdown flag and avoid the registered atexit calls to
# cause timeout exceptions and respective traceback
minion.shutdown()
# @skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
def test_exit_status_unknown_argument(self):
"""
Ensure correct exit status when an unknown argument is passed to salt-minion.
"""
minion = testprogram.TestDaemonSaltMinion(
name="unknown_argument", parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
minion.setup()
stdout, stderr, status = minion.run(
args=["-d", "--unknown-argument"], catch_stderr=True, with_retcode=True,
)
try:
self.assert_exit_status(
status,
"EX_USAGE",
message="unknown argument",
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr),
)
finally:
# Although the start-up should fail, call shutdown() to set the
# internal _shutdown flag and avoid the registered atexit calls to
# cause timeout exceptions and respective traceback
minion.shutdown()
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
def test_exit_status_correct_usage(self):
"""
Ensure correct exit status when salt-minion starts correctly.
Skipped on windows because daemonization not supported
"""
minion = testprogram.TestDaemonSaltMinion(
name="correct_usage", parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
minion.setup()
stdout, stderr, status = minion.run(
args=["-d"], catch_stderr=True, with_retcode=True,
)
self.assert_exit_status(
status, "EX_OK", message="correct usage", stdout=stdout, stderr=stderr
)
minion.shutdown(wait_for_orphans=3)
|
the-stack_106_15443
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
class TestRescheduleWithVolumesAttached(
test.TestCase, integrated_helpers.InstanceHelperMixin):
"""Regression test for bug 1784353 introduced in Queens.
This regression test asserts that volume backed instances fail to start
when rescheduled due to their volume attachments being deleted by cleanup
code within the compute layer after an initial failure to spawn.
"""
def setUp(self):
super(TestRescheduleWithVolumesAttached, self).setUp()
# Use the new attach flow fixture for cinder
cinder_fixture = nova_fixtures.CinderFixtureNewAttachFlow(self)
self.cinder = self.useFixture(cinder_fixture)
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
fake_network.set_stub_network_methods(self)
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.admin_api
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.flags(compute_driver='fake.FakeRescheduleDriver')
self.start_service('conductor')
self.start_service('scheduler')
# Start two computes to allow the instance to be rescheduled
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.host1 = self.start_service('compute', host='host1')
fake.set_nodes(['host2'])
self.host2 = self.start_service('compute', host='host2')
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_reschedule_with_volume_attached(self):
# Boot a volume backed instance
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server_request = {
'name': 'server',
'flavorRef': self.flavor_id,
'block_device_mapping_v2': [{
'boot_index': 0,
'uuid': volume_id,
'source_type': 'volume',
'destination_type': 'volume'}],
}
server_response = self.api.post_server({'server': server_request})
server_id = server_response['id']
self._wait_for_state_change(self.api, server_response, 'ACTIVE')
attached_volume_ids = self.cinder.volume_ids_for_instance(server_id)
self.assertIn(volume_id, attached_volume_ids)
self.assertEqual(1, len(self.cinder.volume_to_attachment))
# There should only be one attachment record for the volume and
# instance because the original would have been deleted before
# rescheduling off the first host.
self.assertEqual(1, len(self.cinder.volume_to_attachment[volume_id]))
|
the-stack_106_15444
|
''' Provider class for RoI binary segmentation task '''
import pickle
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
import numpy as np
sys.path.append(os.path.join(BASE_DIR, '../sunrgbd_data'))
from utils import roty, load_zipped_pickle
sys.path.append(os.path.join(BASE_DIR, '../../train'))
from box_util import box3d_iou
type2class={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
class2type = {type2class[t]:t for t in type2class}
type2onehotclass={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
type_mean_size = {'bathtub': np.array([0.765840,1.398258,0.472728]),
'bed': np.array([2.114256,1.620300,0.927272]),
'bookshelf': np.array([0.404671,1.071108,1.688889]),
'chair': np.array([0.591958,0.552978,0.827272]),
'desk': np.array([0.695190,1.346299,0.736364]),
'dresser': np.array([0.528526,1.002642,1.172878]),
'night_stand': np.array([0.500618,0.632163,0.683424]),
'sofa': np.array([0.923508,1.867419,0.845495]),
'table': np.array([0.791118,1.279516,0.718182]),
'toilet': np.array([0.699104,0.454178,0.756250])}
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 10
NUM_CLASS = 10
def rotate_pc_along_y(pc, rot_angle):
''' Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval],[sinval, cosval]])
pc[:,[0,2]] = np.dot(pc[:,[0,2]], np.transpose(rotmat))
return pc
def angle2class(angle, num_class):
''' Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
return is class of int32 of 0,1,...,N-1 and a number such that
class*(2pi/N) + number = angle
'''
angle = angle%(2*np.pi)
assert(angle>=0 and angle<=2*np.pi)
angle_per_class = 2*np.pi/float(num_class)
shifted_angle = (angle+angle_per_class/2)%(2*np.pi)
class_id = int(shifted_angle/angle_per_class)
residual_angle = shifted_angle - (class_id*angle_per_class+angle_per_class/2)
return class_id, residual_angle
def class2angle(pred_cls, residual, num_class, to_label_format=True):
''' Inverse function to angle2class '''
angle_per_class = 2*np.pi/float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle>np.pi:
angle = angle - 2*np.pi
return angle
def size2class(size, type_name):
''' Convert 3D box size (l,w,h) to size class and size residual '''
size_class = type2class[type_name]
size_residual = size - type_mean_size[type_name]
return size_class, size_residual
def class2size(pred_cls, residual):
''' Inverse function to size2class '''
mean_size = type_mean_size[class2type[pred_cls]]
return mean_size + residual
def get_3d_box(box_size, heading_angle, center):
''' box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
'''
R = roty(heading_angle)
l,w,h = box_size
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
corners_3d = np.dot(R, np.vstack([x_corners,y_corners,z_corners]))
corners_3d[0,:] = corners_3d[0,:] + center[0];
corners_3d[1,:] = corners_3d[1,:] + center[1];
corners_3d[2,:] = corners_3d[2,:] + center[2];
corners_3d = np.transpose(corners_3d)
return corners_3d
def compute_box3d_iou(center_pred, heading_logits, heading_residuals, size_logits, size_residuals, center_label, heading_class_label, heading_residual_label, size_class_label, size_residual_label):
''' Used for confidence score supervision..
Inputs:
center_pred: (B,3)
heading_logits: (B,NUM_HEADING_BIN)
heading_residuals: (B,NUM_HEADING_BIN)
size_logits: (B,NUM_SIZE_CLUSTER)
size_residuals: (B,NUM_SIZE_CLUSTER,3)
center_label: (B,3)
heading_class_label: (B,)
heading_residual_label: (B,)
size_class_label: (B,)
size_residual_label: (B,3)
Output:
iou2ds: (B,) birdeye view oriented 2d box ious
iou3ds: (B,) 3d box ious
'''
batch_size = heading_logits.shape[0]
heading_class = np.argmax(heading_logits, 1) # B
heading_residual = np.array([heading_residuals[i,heading_class[i]] for i in range(batch_size)]) # B,
size_class = np.argmax(size_logits, 1) # B
size_residual = np.vstack([size_residuals[i,size_class[i],:] for i in range(batch_size)])
iou2d_list = []
iou3d_list = []
for i in range(batch_size):
heading_angle = class2angle(heading_class[i], heading_residual[i], NUM_HEADING_BIN)
box_size = class2size(size_class[i], size_residual[i])
corners_3d = get_3d_box(box_size, heading_angle, center_pred[i])
heading_angle_label = class2angle(heading_class_label[i], heading_residual_label[i], NUM_HEADING_BIN)
box_size_label = class2size(size_class_label[i], size_residual_label[i])
corners_3d_label = get_3d_box(box_size_label, heading_angle_label, center_label[i])
iou_3d, iou_2d = box3d_iou(corners_3d, corners_3d_label)
iou3d_list.append(iou_3d)
iou2d_list.append(iou_2d)
return np.array(iou2d_list, dtype=np.float32), np.array(iou3d_list, dtype=np.float32)
def compare_with_anchor_boxes(center_label, heading_class_label, heading_residual_label, size_class_label, size_residual_label):
''' Compute IoUs between GT box and anchor boxes.
Compute heading,size,center regression from anchor boxes to GT box: NHxNS of them in the order of
heading0: size0,size1,...
heading1: size0,size1,...
...
Inputs:
center_label: (B,3) -- assume this center is already close to (0,0,0) e.g. subtracted stage1_center
heading_class_label: (B,)
heading_residual_label: (B,)
size_class_label: (B,)
size_residual_label: (B,3)
Output:
iou2ds: (B,K) where K = NH*NS
iou3ds: (B,K)
center_residuals: (B,K,3)
heading_residuals: (B,K)
size_residuals: (B,K,3)
'''
B = len(heading_class_label)
K = NUM_HEADING_BIN*NUM_SIZE_CLUSTER
iou3ds = np.zeros((B,K), dtype=np.float32)
iou2ds = np.zeros((B,K), dtype=np.float32)
center_residuals = np.zeros((B,K,3), dtype=np.float32)
heading_residuals = np.zeros((B,K), dtype=np.float32)
size_residuals = np.zeros((B,K,3), dtype=np.float32)
corners_3d_anchor_list = []
heading_anchor_list = []
box_anchor_list = []
for j in range(NUM_HEADING_BIN):
for k in range(NUM_SIZE_CLUSTER):
heading_angle = class2angle(j,0,NUM_HEADING_BIN)
box_size = class2size(k,np.zeros((3,)))
corners_3d_anchor = get_3d_box(box_size, heading_angle, np.zeros((3,)))
corners_3d_anchor_list.append(corners_3d_anchor)
heading_anchor_list.append(heading_angle)
box_anchor_list.append(box_size)
for i in range(B):
heading_angle_label = class2angle(heading_class_label[i], heading_residual_label[i], NUM_HEADING_BIN)
box_size_label = class2size(size_class_label[i], size_residual_label[i])
corners_3d_label = get_3d_box(box_size_label, heading_angle_label, center_label[i])
for j in range(K):
iou_3d, iou_2d = box3d_iou(corners_3d_anchor_list[j], corners_3d_label)
iou3ds[i,j] = iou_3d
iou2ds[i,j] = iou_2d
center_residuals[i,j,:] = center_label[i]
heading_residuals[i,j] = heading_angle_label - heading_anchor_list[j]
size_residuals[i,j,:] = box_size_label - box_anchor_list[j]
return iou2ds, iou3ds, center_residuals, heading_residuals, size_residuals
class ROISegBoxDataset(object):
def __init__(self, npoints, split, random_flip=False, random_shift=False, rotate_to_center=False, overwritten_data_path=None, from_rgb_detection=False, one_hot=False):
self.npoints = npoints
self.random_flip = random_flip
self.random_shift = random_shift
self.rotate_to_center = rotate_to_center
self.one_hot = one_hot
if overwritten_data_path is None:
overwritten_data_path = os.path.join(BASE_DIR, '%s_1002.zip.pickle'%(split))
self.from_rgb_detection = from_rgb_detection
if from_rgb_detection:
self.id_list, self.box2d_list, self.input_list, self.type_list, self.frustum_angle_list, self.prob_list = load_zipped_pickle(overwritten_data_path)
else:
self.id_list,self.box2d_list,self.box3d_list,self.input_list,self.label_list,self.type_list,self.heading_list,self.size_list,self.frustum_angle_list=load_zipped_pickle(overwritten_data_path)
def __len__(self):
return len(self.input_list)
def __getitem__(self, index):
# ------------------------------ INPUTS ----------------------------
rot_angle = self.get_center_view_rot_angle(index)
# compute one hot vector
if self.one_hot:
cls_type = self.type_list[index]
assert(cls_type in ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub'])
one_hot_vec = np.zeros((NUM_CLASS))
one_hot_vec[type2onehotclass[cls_type]] = 1
# Get point cloud
if self.rotate_to_center:
point_set = self.get_center_view_point_set(index)
else:
point_set = self.input_list[index]
# Resample
choice = np.random.choice(point_set.shape[0], self.npoints, replace=True)
point_set = point_set[choice, :]
if self.from_rgb_detection:
if self.one_hot:
return point_set, rot_angle, self.prob_list[index], one_hot_vec
else:
return point_set, rot_angle, self.prob_list[index]
# ------------------------------ LABELS ----------------------------
seg = self.label_list[index]
seg = seg[choice]
# Get center point of 3D box
if self.rotate_to_center:
box3d_center = self.get_center_view_box3d_center(index)
else:
box3d_center = self.get_box3d_center(index)
# Heading
if self.rotate_to_center:
heading_angle = self.heading_list[index] - rot_angle
else:
heading_angle = self.heading_list[index]
# Size
size_class, size_residual = size2class(self.size_list[index], self.type_list[index])
# Data Augmentation
if self.random_flip:
if np.random.random()>0.5:
point_set[:,0] *= -1
box3d_center[0] *= -1
heading_angle = np.pi - heading_angle
# NOTE: rot_angle won't be correct if we have random_flip...
if self.random_shift:
dist = np.sqrt(np.sum(box3d_center[0]**2+box3d_center[1]**2))
shift = np.clip(np.random.randn()*dist*0.05, dist*0.8, dist*1.2)
point_set[:,2] += shift
box3d_center[2] += shift
height_shift = np.random.random()*0.4-0.2 # randomly shift +-0.2 meters
point_set[:,1] += height_shift
box3d_center[1] += height_shift
angle_class, angle_residual = angle2class(heading_angle, NUM_HEADING_BIN)
if self.one_hot:
return point_set, seg, box3d_center, angle_class, angle_residual, size_class, size_residual, rot_angle, one_hot_vec
else:
return point_set, seg, box3d_center, angle_class, angle_residual, size_class, size_residual, rot_angle
def get_center_view_rot_angle(self, index):
return np.pi/2.0 + self.frustum_angle_list[index]
def get_box3d_center(self, index):
box3d_center = (self.box3d_list[index][0,:] + self.box3d_list[index][6,:])/2.0
return box3d_center
def get_center_view_box3d_center(self, index):
box3d_center = (self.box3d_list[index][0,:] + self.box3d_list[index][6,:])/2.0
return rotate_pc_along_y(np.expand_dims(box3d_center,0), self.get_center_view_rot_angle(index)).squeeze()
def get_center_view_box3d(self, index):
box3d = self.box3d_list[index]
box3d_center_view = np.copy(box3d)
return rotate_pc_along_y(box3d_center_view, self.get_center_view_rot_angle(index))
def get_center_view_point_set(self, index):
''' Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
'''
# Use np.copy to avoid corrupting original data
point_set = np.copy(self.input_list[index])
return rotate_pc_along_y(point_set, self.get_center_view_rot_angle(index))
def from_prediction_to_label_format(center, angle_class, angle_res, size_class, size_res, rot_angle):
l,w,h = class2size(size_class, size_res)
ry = class2angle(angle_class, angle_res, NUM_HEADING_BIN) + rot_angle
tx,ty,tz = rotate_pc_along_y(np.expand_dims(center,0),-rot_angle).squeeze()
ty += h/2.0
return h,w,l,tx,ty,tz,ry
if __name__=='__main__':
import mayavi.mlab as mlab
sys.path.append(os.path.join(BASE_DIR, '../../mayavi'))
from viz_util import draw_lidar, draw_gt_boxes3d
median_list = []
dataset = ROISegBoxDataset(2048, split='val', rotate_to_center=True, random_flip=True, random_shift=True, overwritten_data_path='val_1002_mini.zip.pickle')
print(type(dataset.input_list[0][0,0]))
print(dataset.input_list[0].shape)
print(dataset.input_list[2].shape)
input()
for i in range(len(dataset)):
data = dataset[i]
print('Center: ', data[2], 'angle_class: ', data[3], 'angle_res:', data[4], 'size_class: ', data[5], 'size_residual:', data[6], 'real_size:', type_mean_size[class2type[data[5]]]+data[6])
print('Frustum angle: ', dataset.frustum_angle_list[i])
median_list.append(np.median(data[0][:,0]))
print(data[2], dataset.box3d_list[i], median_list[-1])
box3d_from_label = get_3d_box(class2size(data[5],data[6]), class2angle(data[3], data[4],12), data[2])
#input()
## Recover original labels
#rot_angle = dataset.get_center_view_rot_angle(i)
#print dataset.id_list[i]
#print from_prediction_to_label_format(data[2], data[3], data[4], data[5], data[6], rot_angle)
ps = data[0]
seg = data[1]
fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4), fgcolor=None, engine=None, size=(1000, 500))
mlab.points3d(ps[:,0], ps[:,1], ps[:,2], seg, mode='point', colormap='gnuplot', scale_factor=1, figure=fig)
mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.2, figure=fig)
#draw_gt_boxes3d([dataset.get_center_view_box3d(i)], fig)
draw_gt_boxes3d([box3d_from_label], fig, color=(1,0,0))
mlab.orientation_axes()
print(ps[0:10,:])
input()
print(np.mean(np.abs(median_list)))
|
the-stack_106_15445
|
import string
from tkinter import *
from idlelib.Delegator import Delegator
#$ event <<redo>>
#$ win <Control-y>
#$ unix <Alt-z>
#$ event <<undo>>
#$ win <Control-z>
#$ unix <Control-z>
#$ event <<dump-undo-state>>
#$ win <Control-backslash>
#$ unix <Control-backslash>
class UndoDelegator(Delegator):
max_undo = 1000
def __init__(self):
Delegator.__init__(self)
self.reset_undo()
def setdelegate(self, delegate):
if self.delegate is not None:
self.unbind("<<undo>>")
self.unbind("<<redo>>")
self.unbind("<<dump-undo-state>>")
Delegator.setdelegate(self, delegate)
if delegate is not None:
self.bind("<<undo>>", self.undo_event)
self.bind("<<redo>>", self.redo_event)
self.bind("<<dump-undo-state>>", self.dump_event)
def dump_event(self, event):
from pprint import pprint
pprint(self.undolist[:self.pointer])
print("pointer:", self.pointer, end=' ')
print("saved:", self.saved, end=' ')
print("can_merge:", self.can_merge, end=' ')
print("get_saved():", self.get_saved())
pprint(self.undolist[self.pointer:])
return "break"
def reset_undo(self):
self.was_saved = -1
self.pointer = 0
self.undolist = []
self.undoblock = 0 # or a CommandSequence instance
self.set_saved(1)
def set_saved(self, flag):
if flag:
self.saved = self.pointer
else:
self.saved = -1
self.can_merge = False
self.check_saved()
def get_saved(self):
return self.saved == self.pointer
saved_change_hook = None
def set_saved_change_hook(self, hook):
self.saved_change_hook = hook
was_saved = -1
def check_saved(self):
is_saved = self.get_saved()
if is_saved != self.was_saved:
self.was_saved = is_saved
if self.saved_change_hook:
self.saved_change_hook()
def insert(self, index, chars, tags=None):
self.addcmd(InsertCommand(index, chars, tags))
def delete(self, index1, index2=None):
self.addcmd(DeleteCommand(index1, index2))
# Clients should call undo_block_start() and undo_block_stop()
# around a sequence of editing cmds to be treated as a unit by
# undo & redo. Nested matching calls are OK, and the inner calls
# then act like nops. OK too if no editing cmds, or only one
# editing cmd, is issued in between: if no cmds, the whole
# sequence has no effect; and if only one cmd, that cmd is entered
# directly into the undo list, as if undo_block_xxx hadn't been
# called. The intent of all that is to make this scheme easy
# to use: all the client has to worry about is making sure each
# _start() call is matched by a _stop() call.
def undo_block_start(self):
if self.undoblock == 0:
self.undoblock = CommandSequence()
self.undoblock.bump_depth()
def undo_block_stop(self):
if self.undoblock.bump_depth(-1) == 0:
cmd = self.undoblock
self.undoblock = 0
if len(cmd) > 0:
if len(cmd) == 1:
# no need to wrap a single cmd
cmd = cmd.getcmd(0)
# this blk of cmds, or single cmd, has already
# been done, so don't execute it again
self.addcmd(cmd, 0)
def addcmd(self, cmd, execute=True):
if execute:
cmd.do(self.delegate)
if self.undoblock != 0:
self.undoblock.append(cmd)
return
if self.can_merge and self.pointer > 0:
lastcmd = self.undolist[self.pointer-1]
if lastcmd.merge(cmd):
return
self.undolist[self.pointer:] = [cmd]
if self.saved > self.pointer:
self.saved = -1
self.pointer = self.pointer + 1
if len(self.undolist) > self.max_undo:
##print "truncating undo list"
del self.undolist[0]
self.pointer = self.pointer - 1
if self.saved >= 0:
self.saved = self.saved - 1
self.can_merge = True
self.check_saved()
def undo_event(self, event):
if self.pointer == 0:
self.bell()
return "break"
cmd = self.undolist[self.pointer - 1]
cmd.undo(self.delegate)
self.pointer = self.pointer - 1
self.can_merge = False
self.check_saved()
return "break"
def redo_event(self, event):
if self.pointer >= len(self.undolist):
self.bell()
return "break"
cmd = self.undolist[self.pointer]
cmd.redo(self.delegate)
self.pointer = self.pointer + 1
self.can_merge = False
self.check_saved()
return "break"
class Command:
# Base class for Undoable commands
tags = None
def __init__(self, index1, index2, chars, tags=None):
self.marks_before = {}
self.marks_after = {}
self.index1 = index1
self.index2 = index2
self.chars = chars
if tags:
self.tags = tags
def __repr__(self):
s = self.__class__.__name__
t = (self.index1, self.index2, self.chars, self.tags)
if self.tags is None:
t = t[:-1]
return s + repr(t)
def do(self, text):
pass
def redo(self, text):
pass
def undo(self, text):
pass
def merge(self, cmd):
return 0
def save_marks(self, text):
marks = {}
for name in text.mark_names():
if name != "insert" and name != "current":
marks[name] = text.index(name)
return marks
def set_marks(self, text, marks):
for name, index in marks.items():
text.mark_set(name, index)
class InsertCommand(Command):
# Undoable insert command
def __init__(self, index1, chars, tags=None):
Command.__init__(self, index1, None, chars, tags)
def do(self, text):
self.marks_before = self.save_marks(text)
self.index1 = text.index(self.index1)
if text.compare(self.index1, ">", "end-1c"):
# Insert before the final newline
self.index1 = text.index("end-1c")
text.insert(self.index1, self.chars, self.tags)
self.index2 = text.index("%s+%dc" % (self.index1, len(self.chars)))
self.marks_after = self.save_marks(text)
##sys.__stderr__.write("do: %s\n" % self)
def redo(self, text):
text.mark_set('insert', self.index1)
text.insert(self.index1, self.chars, self.tags)
self.set_marks(text, self.marks_after)
text.see('insert')
##sys.__stderr__.write("redo: %s\n" % self)
def undo(self, text):
text.mark_set('insert', self.index1)
text.delete(self.index1, self.index2)
self.set_marks(text, self.marks_before)
text.see('insert')
##sys.__stderr__.write("undo: %s\n" % self)
def merge(self, cmd):
if self.__class__ is not cmd.__class__:
return False
if self.index2 != cmd.index1:
return False
if self.tags != cmd.tags:
return False
if len(cmd.chars) != 1:
return False
if self.chars and \
self.classify(self.chars[-1]) != self.classify(cmd.chars):
return False
self.index2 = cmd.index2
self.chars = self.chars + cmd.chars
return True
alphanumeric = string.ascii_letters + string.digits + "_"
def classify(self, c):
if c in self.alphanumeric:
return "alphanumeric"
if c == "\n":
return "newline"
return "punctuation"
class DeleteCommand(Command):
# Undoable delete command
def __init__(self, index1, index2=None):
Command.__init__(self, index1, index2, None, None)
def do(self, text):
self.marks_before = self.save_marks(text)
self.index1 = text.index(self.index1)
if self.index2:
self.index2 = text.index(self.index2)
else:
self.index2 = text.index(self.index1 + " +1c")
if text.compare(self.index2, ">", "end-1c"):
# Don't delete the final newline
self.index2 = text.index("end-1c")
self.chars = text.get(self.index1, self.index2)
text.delete(self.index1, self.index2)
self.marks_after = self.save_marks(text)
##sys.__stderr__.write("do: %s\n" % self)
def redo(self, text):
text.mark_set('insert', self.index1)
text.delete(self.index1, self.index2)
self.set_marks(text, self.marks_after)
text.see('insert')
##sys.__stderr__.write("redo: %s\n" % self)
def undo(self, text):
text.mark_set('insert', self.index1)
text.insert(self.index1, self.chars)
self.set_marks(text, self.marks_before)
text.see('insert')
##sys.__stderr__.write("undo: %s\n" % self)
class CommandSequence(Command):
# Wrapper for a sequence of undoable cmds to be undone/redone
# as a unit
def __init__(self):
self.cmds = []
self.depth = 0
def __repr__(self):
s = self.__class__.__name__
strs = []
for cmd in self.cmds:
strs.append(" %r" % (cmd,))
return s + "(\n" + ",\n".join(strs) + "\n)"
def __len__(self):
return len(self.cmds)
def append(self, cmd):
self.cmds.append(cmd)
def getcmd(self, i):
return self.cmds[i]
def redo(self, text):
for cmd in self.cmds:
cmd.redo(text)
def undo(self, text):
cmds = self.cmds[:]
cmds.reverse()
for cmd in cmds:
cmd.undo(text)
def bump_depth(self, incr=1):
self.depth = self.depth + incr
return self.depth
def _undo_delegator(parent):
from idlelib.Percolator import Percolator
root = Tk()
root.title("Test UndoDelegator")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = Text(root)
text.config(height=10)
text.pack()
text.focus_set()
p = Percolator(text)
d = UndoDelegator()
p.insertfilter(d)
undo = Button(root, text="Undo", command=lambda:d.undo_event(None))
undo.pack(side='left')
redo = Button(root, text="Redo", command=lambda:d.redo_event(None))
redo.pack(side='left')
dump = Button(root, text="Dump", command=lambda:d.dump_event(None))
dump.pack(side='left')
root.mainloop()
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_undo_delegator)
|
the-stack_106_15446
|
"""Support for Aussie Broadband metric sensors."""
from __future__ import annotations
from typing import Any
from homeassistant.components.sensor import (
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DATA_KILOBYTES, DATA_MEGABYTES, TIME_DAYS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, SERVICE_ID
SENSOR_DESCRIPTIONS: tuple[SensorEntityDescription, ...] = (
# Internet Services sensors
SensorEntityDescription(
key="usedMb",
name="Data Used",
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=DATA_MEGABYTES,
icon="mdi:network",
),
SensorEntityDescription(
key="downloadedMb",
name="Downloaded",
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=DATA_MEGABYTES,
icon="mdi:download-network",
),
SensorEntityDescription(
key="uploadedMb",
name="Uploaded",
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=DATA_MEGABYTES,
icon="mdi:upload-network",
),
# Mobile Phone Services sensors
SensorEntityDescription(
key="national",
name="National Calls",
state_class=SensorStateClass.TOTAL_INCREASING,
icon="mdi:phone",
),
SensorEntityDescription(
key="mobile",
name="Mobile Calls",
state_class=SensorStateClass.TOTAL_INCREASING,
icon="mdi:phone",
),
SensorEntityDescription(
key="international",
name="International Calls",
entity_registry_enabled_default=False,
state_class=SensorStateClass.TOTAL_INCREASING,
icon="mdi:phone-plus",
),
SensorEntityDescription(
key="sms",
name="SMS Sent",
state_class=SensorStateClass.TOTAL_INCREASING,
icon="mdi:message-processing",
),
SensorEntityDescription(
key="internet",
name="Data Used",
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=DATA_KILOBYTES,
icon="mdi:network",
),
SensorEntityDescription(
key="voicemail",
name="Voicemail Calls",
entity_registry_enabled_default=False,
state_class=SensorStateClass.TOTAL_INCREASING,
icon="mdi:phone",
),
SensorEntityDescription(
key="other",
name="Other Calls",
entity_registry_enabled_default=False,
state_class=SensorStateClass.TOTAL_INCREASING,
icon="mdi:phone",
),
# Generic sensors
SensorEntityDescription(
key="daysTotal",
name="Billing Cycle Length",
native_unit_of_measurement=TIME_DAYS,
icon="mdi:calendar-range",
),
SensorEntityDescription(
key="daysRemaining",
name="Billing Cycle Remaining",
native_unit_of_measurement=TIME_DAYS,
icon="mdi:calendar-clock",
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Aussie Broadband sensor platform from a config entry."""
async_add_entities(
[
AussieBroadandSensorEntity(service, description)
for service in hass.data[DOMAIN][entry.entry_id]["services"]
for description in SENSOR_DESCRIPTIONS
if description.key in service["coordinator"].data
]
)
class AussieBroadandSensorEntity(CoordinatorEntity, SensorEntity):
"""Base class for Aussie Broadband metric sensors."""
def __init__(
self, service: dict[str, Any], description: SensorEntityDescription
) -> None:
"""Initialize the sensor."""
super().__init__(service["coordinator"])
self.entity_description = description
self._attr_unique_id = f"{service[SERVICE_ID]}:{description.key}"
self._attr_name = f"{service['name']} {description.name}"
self._attr_device_info = DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, service[SERVICE_ID])},
manufacturer="Aussie Broadband",
configuration_url=f"https://my.aussiebroadband.com.au/#/{service['name']}/{service[SERVICE_ID]}/",
name=service["description"],
model=service["name"],
)
@property
def native_value(self):
"""Return the state of the sensor."""
if self.entity_description.key == "internet":
return self.coordinator.data[self.entity_description.key].get("kbytes")
if self.entity_description.key in ("national", "mobile", "sms"):
return self.coordinator.data[self.entity_description.key].get("calls")
return self.coordinator.data[self.entity_description.key]
|
the-stack_106_15447
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
import collections
from combi._python_toolbox import caching
# (`PermSpace` exported to here from `perm_space.py` to avoid import loop.)
class _FixedMapManagingMixin(object):
'''
Mixin for `PermSpace` to manage the `fixed_map`. (For fixed perm spaces.)
'''
@caching.CachedProperty
def fixed_indices(self):
'''
The indices of any fixed items in this `PermSpace`.
This'll be different from `self.fixed_map.keys()` for dapplied perm
spaces.
'''
if not self.fixed_map:
return ()
return tuple(map(self.domain.index, self.fixed_map))
free_indices = caching.CachedProperty(
lambda self: tuple(item for item in range(self.sequence_length)
if item not in self._undapplied_fixed_map.keys()),
doc='''Integer indices of free items.'''
)
free_keys = caching.CachedProperty(
lambda self: tuple(item for item in self.domain
if item not in self.fixed_map.keys()),
doc='''Indices (possibly from domain) of free items.'''
)
@caching.CachedProperty
def free_values(self):
'''Items that can change between permutations.'''
# This algorithm is required instead of just a one-liner because in the
# case of recurrent sequences, we don't want to remove all the sequence
# items that are in `self.fixed_map.values()` but only as many as there
# are in `self.fixed_map.values()`.
from combi._python_toolbox.nifty_collections import Bag
free_values = []
fixed_counter = Bag(self.fixed_map.values())
for item in self.sequence:
if fixed_counter[item]:
fixed_counter[item] -= 1
else:
free_values.append(item)
return tuple(free_values)
@caching.CachedProperty
def _n_cycles_in_fixed_items_of_just_fixed(self):
'''
The number of cycles in the fixed items of this `PermSpace`.
This is used for degree calculations.
'''
unvisited_items = set(self._undapplied_unrapplied_fixed_map)
n_cycles = 0
while unvisited_items:
starting_item = current_item = next(iter(unvisited_items))
while current_item in unvisited_items:
unvisited_items.remove(current_item)
current_item = \
self._undapplied_unrapplied_fixed_map[current_item]
if current_item == starting_item:
n_cycles += 1
return n_cycles
@caching.CachedProperty
def _undapplied_fixed_map(self):
if self.is_dapplied:
return dict((self.domain.index(key), value) for key, value
in self.fixed_map.items())
else:
return self.fixed_map
@caching.CachedProperty
def _undapplied_unrapplied_fixed_map(self):
if self.is_dapplied or self.is_rapplied:
return dict((self.domain.index(key), self.sequence.index(value))
for key, value in self.fixed_map.items())
else:
return self.fixed_map
@caching.CachedProperty
def _free_values_purified_perm_space(self):
'''
A purified `PermSpace` of the free values in the `PermSpace`.
Non-fixed permutation spaces have this set to `self` in the
constructor.
'''
if self.is_fixed:
return PermSpace(
len(self.free_indices),
n_elements=self.n_elements-len(self.fixed_map)
)
else:
return self.purified
_free_values_unsliced_perm_space = caching.CachedProperty(
lambda self: self._free_values_purified_perm_space.get_degreed(
(degree - self._n_cycles_in_fixed_items_of_just_fixed
for degree in self.degrees)
if self.is_degreed else None).get_rapplied(self.free_values).
get_dapplied(self.free_keys).
get_partialled(self.n_elements - len(self.fixed_map)),
)
|
the-stack_106_15450
|
class Solution:
def numTrees(self, n: int) -> int:
"""
trees(n) = sum(dp(n-1)*dp(1) + dp(n-2)*dp(2) + ...)
[1, 1, 2, 5, ...]
"""
if n < 2:
return 1
dp = [0 for _ in range(n + 1)]
dp[0] = 1
dp[1] = 1
for nodes in range(2, n + 1):
for left in range(nodes):
dp[nodes] += dp[left] * dp[nodes - 1 - left]
return dp[-1]
|
the-stack_106_15451
|
import sys
from Classes.Config import Config
from Classes.Image import AnnotatedImage, ArtificialAnnotatedImage
#sys.path.append(r'D:\DeepLearning\Kaggle\TileImages')
#from tools import rescaleAndTile,getMeanMaskObjectSize
from Classes.Image import AnnotatedImage
import tifffile
import numpy as np
from tqdm import tqdm
import os
import skimage.transform as ski_transform
from skimage import filters
import scipy.misc
import matplotlib.pyplot as plt
import glob
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
from PIL import Image,ImageEnhance
import cv2
from skimage.measure import label
import skimage
from random import randint
from skimage import transform as trf
from random import uniform
INPUT_SHAPE = [1,256,256]
from skimage.measure import label
from skimage import measure
import xml.etree.ElementTree as ET
import PIL.ImageDraw as ImageDraw
import PIL.Image as Image
from skimage.morphology import disk, dilation
class Tools:
MEAN_NUCLEI_SIZE = 0.2
def getLocalDataPath(self,path,content):
config = Config
if (content == 1): # Image
erg = str.replace(path,'/var/www/TisQuant/data/',config.local_data_path)
elif (content==2):
erg = str.replace(path,'/var/www/TisQuant/data/automaticresult/',config.local_data_path + 'automaticresult\\')
elif (content==3):
erg = str.replace(path,'/var/www/TisQuant/data/groundtruth/', config.local_data_path + 'groundtruth\\')
elif (content==2):
erg = str.replace(path,'/var/www/TisQuant/data/database/',config.local_data_path + 'database\\')
else:
erg=path
return erg
def createAndSaveTiles(self,annotated_nuclei,config):
images = []
path_to_img = []
if config.mode == 'test':
diagnosis = []
for i in range(0,annotated_nuclei.images.__len__()):
images.append(annotated_nuclei.images[i].getRaw())
path_to_img.append(annotated_nuclei.path_to_imgs[i])
# Get scales from masks
print("Calculate mean object size ...")
scales_for_conv = self.getNormalizedScales(annotated_nuclei.images)
# Rescale and Tile
print("Rescale and tile images and masks to " + config.outputFolder + "...")
[images,masks,img_index,tile_index,tile_scales] = self.rescaleAndTile(images=images,masks=masks,scales=scales_for_conv,overlap = config.overlap,rescale=config.scale,mode=config.mode,path_to_img=path_to_img)
# Create artificial dataset
if (config.diagnosis.__len__() > 1):
img_name = 'combined'
else:
img_name = config.diagnosis[0]
print("Save tiled dataset ...")
for i in range(0, images.__len__()):
scipy.misc.toimage(images[i], cmin=0.0, cmax=1.0).save(config.outputFolder + '\\images\\Img_' + img_name + '_' + str(i) + '.jpg')
tifffile.imsave(config.outputFolder + '\\masks\\Mask_' + img_name + '_' + str(i) + '.tif',(masks[i]).astype(np.uint8))
if config.mode == 'test':
with open(config.resultsfile, 'a') as f:
#f.write(config.outputFolder + ',' + str(img_index[i]) + ',' + str(tile_index[i]) + "\n")
f.write(img_index[i] + ',' + str(tile_scales[i]) + ',' + str(tile_index[i]) + "\n")
def createAndSaveTilesForSampleSegmentation(self,annotated_nuclei,config,scale):
images = []
path_to_img = []
scales_for_conv = []
for i in range(0,annotated_nuclei.images.__len__()):
images.append(annotated_nuclei.images[i].getRaw())
path_to_img.append(annotated_nuclei.path_to_imgs[i])
scales_for_conv.append(scale)
# Rescale and Tile
print("Rescale and tile images and masks to " + config.outputFolder + "...")
[images,img_index,tile_index,tile_scales] = self.rescaleAndTileForSampleSegmentation(images=images,scales=scales_for_conv,overlap = config.overlap,rescale=config.scale,mode=config.mode,path_to_img=path_to_img)
print(images.__len__())
print(img_index.__len__())
print(tile_index.__len__())
print(tile_scales.__len__())
# Create artificial dataset
print("Save tiled dataset ...")
print(config.outputFolder)
for i in range(0, images.__len__()):
cv2.imwrite(config.outputFolder + '/' + os.path.basename(img_index[i]).replace('.'+os.path.basename(img_index[i]).split('.')[1],'_' + self.getNumeration(i) + '.jpg'),images[i])
#scipy.misc.toimage(images[i], cmin=0.0, cmax=1.0).save(config.outputFolder + '/' + os.path.basename(img_index[i]).replace('.'+os.path.basename(img_index[i]).split('.')[1],'_' + self.getNumeration(i) + '.jpg'))
with open(config.resultsfile, 'a') as f:
f.write(img_index[i] + ',' + str(tile_scales[i]) + ',' + str(tile_index[i]) + "\n")
def getNumeration(self,i):
#return ("0x%0.2X" % i).split('x')[1]
#return str(chr(97+i))
#return str(i)
return "{0:0>4}".format(i)
def createPix2pixDataset(self,annotated_nuclei,config):
images = []
masks = []
for i in range(0,annotated_nuclei.images.__len__()):
images.append(annotated_nuclei.images[i].getRaw())
masks.append(annotated_nuclei.images[i].getMask())
# Get scales from masks
print("Calculate mean object size ...")
#scales_for_conv = self.getNormalizedScales(masks)
scales_for_conv = self.getNormalizedScales(annotated_nuclei.images)
# Rescale and Tile
print("Rescale and tile images and masks ...")
[images,masks,t,t,t] = self.rescaleAndTile(images=images,masks=masks,scales=scales_for_conv,overlap = 20,rescale=config.scale)
# Create artificial dataset
if (config.diagnosis.__len__() > 1):
img_name = 'combined'
else:
img_name = config.diagnosis[0]
print("Create artificial dataset ...")
for i in range(0, images.__len__() - 1):
img_nat = AnnotatedImage();
img_nat.createWithArguments(images[i],masks[i])
img_art = ArtificialAnnotatedImage
img_art = img_art.transformToArtificialImage(img_nat,useBorderObjects=config.useBorderObjects)
img_combined = np.zeros((images[0].shape[0], images[0].shape[1] * 2),np.float32)
img_combined[:,0:INPUT_SHAPE[1]] = img_nat.getRaw()
img_combined[:, INPUT_SHAPE[1]:INPUT_SHAPE[1]*2] = img_art.getRaw()
plt.imshow(img_combined,cmap='gray')
img_to_sav = np.zeros((img_combined.shape[0],img_combined.shape[1],3),np.float32)
img_to_sav[:, :, 0] = img_combined
img_to_sav[:, :, 1] = img_combined
img_to_sav[:, :, 2] = img_combined
#scipy.misc.toimage(img_to_sav, cmin=0.0, cmax=1.0).save(config.outputPath + config.outputFolder + '\\Img_' + str(i) + '.jpg')
scipy.misc.toimage(img_to_sav, cmin=0.0, cmax=1.0).save(config.outputFolder + '\\Img_' + img_name + '_' + str(i) + '.jpg')
e=1
#tifffile.imsave('D:\\DeepLearning\\DataGenerator\\Dataset\\Natural\\Natural_img_' + str(i) + '.tif',(annotated_nuclei.images[i].getRaw() * 255.0).astype(np.uint8))
#img = ArtificialAnnotatedImage.transformToArtificialImage(annotated_nuclei.images[i])
#tifffile.imsave('D:\\DeepLearning\\DataGenerator\\Dataset\\Artificial\\Artificial_img_' + str(i) + '.tif',(img.getRaw() * 255.0).astype(np.uint8))
def rescaleAndTile (self,images=None,masks=None,scales=None,rescale=True,overlap=20,mode=None,path_to_img=None):
img_to_return = []
mask_to_return = []
img_path_to_return = []
index_to_return = []
tile_scales = []
nr_images = images.__len__()
for i in tqdm(range(nr_images)):
if (rescale):
#image = np.float32(ski_transform.resize(images[i], (int(images[i].shape[0] * 1 / (scales[i] / MEAN_NUCLEI_SIZE)), int(images[i].shape[1] * 1 / (scales[i] / MEAN_NUCLEI_SIZE))), mode='reflect'))
#mask = self.rescale_mask(masks[i],int(masks[i].shape[0] * 1 / (scales[i] / self.MEAN_NUCLEI_SIZE)), int(masks[i].shape[1] * 1 / (scales[i] / self.MEAN_NUCLEI_SIZE)))
image = self.rescale_image(images[i],(scales[i],scales[i]))
mask = self.rescale_mask(masks[i], (scales[i],scales[i]),make_labels=True)
else:
image = images[i]
mask = masks[i]
x_running = 0
img_new = []
mask_new = []
thresh_img = []
slicesize = [INPUT_SHAPE[1],INPUT_SHAPE[2],INPUT_SHAPE[0]]
thresh_img.append((np.mean(image[np.where(image < filters.threshold_otsu(image))])))
[y, x] = image.shape
running_index = 0
while (x_running <= (x - overlap)):
y_running = 0
while (y_running <= (y - overlap)):
min_x_orig = x_running
min_x_new = 0
min_y_orig = y_running
min_y_new = 0
max_x_orig = x_running + slicesize[1]
max_x_new = slicesize[1]
max_y_orig = y_running + slicesize[0]
max_y_new = slicesize[0]
try:
img_to_save = np.zeros((slicesize[0], slicesize[1]),dtype=np.float32)
mask_to_save = np.zeros((slicesize[0], slicesize[1]), dtype=np.uint8)
img_to_save = img_to_save + thresh_img[0]
if (x_running == 0):
max_x_orig = slicesize[1] - overlap
min_x_new = overlap
if (y_running == 0):
max_y_orig = slicesize[0] - overlap
min_y_new = overlap
if (max_y_orig > y):
max_y_orig = y
max_y_new = y - y_running
if (max_x_orig > x):
max_x_orig = x
max_x_new = x - x_running
if (x < (slicesize[1]-overlap)):
max_x_new = max_x_new + overlap
if (y < (slicesize[0]-overlap)):
max_y_new = max_y_new + overlap
img_to_save[min_y_new:max_y_new, min_x_new:max_x_new] = image[min_y_orig:max_y_orig, min_x_orig:max_x_orig]
mask_to_save[min_y_new:max_y_new, min_x_new:max_x_new] = mask[min_y_orig:max_y_orig, min_x_orig:max_x_orig]
if (((mask_to_save.max()>0) & ((mask_to_save>0).sum() > 100)) | (mode == 'test')):
img_new.append(img_to_save)
mask_new.append(mask_to_save)
try: # change and check which programm calls the function
img_path_to_return.append(path_to_img[i])
index_to_return.append(running_index)
tile_scales.append(scales[i])
except:
print("No pathes given")
running_index = running_index+1
except:
print('failed to tile....')
input("Press Enter to continue...")
y_running = y_running + slicesize[0] - 2 * overlap
del img_to_save
x_running = x_running + slicesize[1] - 2 * overlap
img_to_return.extend(img_new)
mask_to_return.extend(mask_new)
del img_new
del mask_new
return img_to_return,mask_to_return,img_path_to_return,index_to_return,tile_scales
def rescaleAndTileForSampleSegmentation (self,images=None,scales=None,rescale=True,overlap=20,mode=None,path_to_img=None):
img_to_return = []
pathes_to_return = []
img_path_to_return = []
index_to_return = []
tile_scales = []
nr_images = images.__len__()
print("Rescale ...")
print(rescale)
for i in tqdm(range(nr_images)):
if (rescale):
image = self.rescale_image(images[i],(scales[i],scales[i]))
else:
image = images[i]
cv2.imwrite(r"/root/flo/tmp/test_after_rescaling.jpg",image)
x_running = 0
img_new = []
mask_new = []
thresh_img = []
slicesize = [INPUT_SHAPE[1],INPUT_SHAPE[2],INPUT_SHAPE[0]]
thresh_img.append((np.mean(image[np.where(image < filters.threshold_otsu(image))])))
[y, x] = image.shape
running_index = 0
while (x_running <= (x - overlap)):
y_running = 0
while (y_running <= (y - overlap)):
min_x_orig = x_running
min_x_new = 0
min_y_orig = y_running
min_y_new = 0
max_x_orig = x_running + slicesize[1]
max_x_new = slicesize[1]
max_y_orig = y_running + slicesize[0]
max_y_new = slicesize[0]
try:
img_to_save = np.zeros((slicesize[0], slicesize[1]),dtype=np.float32)
img_to_save = img_to_save + thresh_img[0]
print (str(img_to_save.dtype) + "\n")
if (x_running == 0):
max_x_orig = slicesize[1] - overlap
min_x_new = overlap
if (y_running == 0):
max_y_orig = slicesize[0] - overlap
min_y_new = overlap
if (max_y_orig > y):
max_y_orig = y
max_y_new = y - y_running
if (max_x_orig > x):
max_x_orig = x
max_x_new = x - x_running
if (x < (slicesize[1]-overlap)):
max_x_new = max_x_new + overlap
if (y < (slicesize[0]-overlap)):
max_y_new = max_y_new + overlap
img_to_save[min_y_new:max_y_new, min_x_new:max_x_new] = image[min_y_orig:max_y_orig, min_x_orig:max_x_orig]
img_new.append(img_to_save)
try: # change and check which programm calls the function
img_path_to_return.append(path_to_img[i])
index_to_return.append(running_index)
tile_scales.append(scales[i])
except:
print("No pathes given")
running_index = running_index+1
except:
print('failed to tile....')
input("Press Enter to continue...")
y_running = y_running + slicesize[0] - 2 * overlap
del img_to_save
x_running = x_running + slicesize[1] - 2 * overlap
img_to_return.extend(img_new)
del img_new
return img_to_return,img_path_to_return,index_to_return,tile_scales
def reconstruct_images(self,images=None,predictions=None,scales=None,rescale=True,overlap=20,config=None,label_output=False, dilate_objects=False):
print ("Dilate objects? ")
if dilate_objects:
print ("True")
else:
print ("False")
img_to_return = []
mask_to_return = []
nr_images = images.__len__()
running_ind = 0
for i in tqdm(range(nr_images)):
if (rescale):
image = self.rescale_image(images[i],(scales[i],scales[i]))
else:
image = images[i]
x_running = 0
img_new = []
mask_new = []
thresh_img = []
slicesize = [INPUT_SHAPE[1],INPUT_SHAPE[2],INPUT_SHAPE[0]]
[y, x] = image.shape
img_to_save = np.zeros((y, x), dtype=np.float32)
mask_to_save = np.zeros((y, x), dtype=np.float32)
while (x_running <= (x-overlap)):
print ("Step " + str(x_running) + " from " + str(x-overlap) + " ...")
y_running = 0
while (y_running <= (y-overlap)):
min_x_orig = x_running
min_x_new = 0
min_y_orig = y_running
min_y_new = 0
max_x_orig = x_running + slicesize[1]
max_x_new = slicesize[1]
max_y_orig = y_running + slicesize[0]
max_y_new = slicesize[0]
try:
if (x_running == 0):
max_x_orig = slicesize[1] - overlap
min_x_new = overlap
if (y_running == 0):
max_y_orig = slicesize[0] - overlap
min_y_new = overlap
if (max_y_orig > y):
max_y_orig = y
max_y_new = y - y_running
if (max_x_orig > x):
max_x_orig = x
max_x_new = x - x_running
if (x < (slicesize[1]-overlap)):
max_x_new = max_x_new + overlap
if (y < (slicesize[0]-overlap)):
max_y_new = max_y_new + overlap
# New: only use half of the overlap
if (y_running != 0):
min_y_new = min_y_new + int(overlap/2)
min_y_orig = min_y_orig + int(overlap/2)
if (x_running != 0):
min_x_new = min_x_new + int(overlap/2)
min_x_orig = min_x_orig + int(overlap/2)
img_to_save[min_y_orig:max_y_orig, min_x_orig:max_x_orig] = predictions[running_ind][min_y_new:max_y_new, min_x_new:max_x_new]
mask_to_save[min_y_orig:max_y_orig, min_x_orig:max_x_orig] = predictions[running_ind][min_y_new:max_y_new, min_x_new:max_x_new]>0.5
running_ind = running_ind + 1
except:
e=1
y_running = y_running + slicesize[0] - 2 * overlap
x_running = x_running + slicesize[1] - 2 * overlap
if (rescale):
img_to_save = self.upscale_image(img_to_save,(images[i].shape[0],images[i].shape[1]),config=config)
mask_to_save = self.upscale_mask(mask_to_save,(images[i].shape[0],images[i].shape[1]))
img_to_return.append(img_to_save)
if label_output:
print("Labeling output ...")
mask_labeled = label(self.postprocess_mask(mask_to_save).astype(np.uint8))
if dilate_objects:
for i in tqdm(np.unique(mask_labeled)):
if i>0:
#print("Dilate object!")
tmp = mask_labeled == i
tmp = dilation(tmp,disk(dilate_objects))
mask_labeled[np.where(tmp>0)] = 0
mask_labeled += tmp*i
mask_to_return.append(mask_labeled)
else:
mask_tmp = self.postprocess_mask(mask_to_save)
if dilate_objects:
print ("Dilating objects ...")
for i in np.unique(mask_labeled):
if i>0:
tmp = mask_tmp == i
tmp = dilation(tmp,disk(dilate_objects))
mask_tmp[np.where(tmp>0)] = 0
mask_tmp += tmp
mask_to_return.append(mask_tmp)
del img_to_save
return img_to_return, mask_to_return
def postprocess_mask(self,mask,threshold=20):
mask = label(mask)
print ("Postprocessing mask ...")
for i in tqdm(np.unique(mask)):
if i>0:
if ((mask==i).sum() < threshold):
mask[mask==i] = 0
return mask>0
def rescale_mask(self, image, scale,make_labels=None):
x_factor = int(image.shape[0] * 1 / (scale[0] / self.MEAN_NUCLEI_SIZE))
y_factor = int(image.shape[1] * 1 / (scale[0] / self.MEAN_NUCLEI_SIZE))
im_new = np.zeros([x_factor, y_factor], dtype=np.uint8)
for i in tqdm(range(1,image.max()+1)):
if make_labels:
im_new = im_new + i * (ski_transform.resize(image==i, (x_factor,y_factor),mode='reflect')>0.5)
else:
im_new = im_new + (ski_transform.resize(image==i, (x_factor,y_factor),mode='reflect')>0.5)
return im_new
def upscale_mask(self,image,scale):
image = scipy.ndimage.label(image)[0]
im_new = np.zeros([scale[0], scale[1]], dtype=np.float32)
for i in tqdm(range(1,image.max()+1)):
im_new = im_new + (ski_transform.resize(image==i, (scale[0],scale[1]),mode='reflect')>0.5)
return im_new
#def rescale_image(self,image,x_factor,y_factor):
def rescale_image(self, image, scale):
x_factor = int(image.shape[0] * 1 / (scale[0] / self.MEAN_NUCLEI_SIZE))
y_factor = int(image.shape[1] * 1 / (scale[0] / self.MEAN_NUCLEI_SIZE))
return np.float32(ski_transform.resize(image, (x_factor,y_factor), mode='reflect'))
def upscale_image(self, image, scale,config=None):
if config.net == 'maskrcnn':
return np.float32(ski_transform.resize(image>0, (scale[0], scale[1]), mode='reflect'))>0
else:
return np.float32(ski_transform.resize(image, (scale[0],scale[1]), mode='reflect'))
def getNormalizedScales(self,masks):
scales = []
for mask in tqdm(masks):
#scales.append(int(self.getMeanMaskObjectSize(mask)))
scales.append(int(mask.getMeanMaskObjectSize()))
# Scale groundtruth to be between 0 and 1
print("Scale grountruth to be between 0 and 1 ...")
max_nucl_size = 170
scales_for_conv = [float(x) / max_nucl_size for x in scales]
for i in range(scales_for_conv.__len__()):
if (scales_for_conv[i] > 1):
scales_for_conv[i] = 1
return scales_for_conv
def createTisquantLikeDataset(self,path,output):
print(path)
image_list = glob.glob(os.path.join(path,'results','normal','images','*-outputs.png'))
mask_list = glob.glob(os.path.join(path,'ArtToNat','running','normal','masks','*.tif'))
print(image_list)
print(mask_list)
def elastic_transformations(self,alpha, sigma, image_shape, rng=np.random.RandomState(42),
interpolation_order=1):
"""Returns a function to elastically transform multiple images."""
# Good values for:
# alpha: 2000
# sigma: between 40 and 60
"""`images` is a numpy array of shape (K, M, N) of K images of size M*N."""
# Take measurements
# image_shape = images[0].shape
# Make random fields
# random.seed(nbr_seed)
dx = rng.uniform(-1, 1, image_shape) * alpha
dy = rng.uniform(-1, 1, image_shape) * alpha
# Smooth dx and dy
sdx = gaussian_filter(dx, sigma=sigma, mode='reflect')
sdy = gaussian_filter(dy, sigma=sigma, mode='reflect')
# Make meshgrid
x, y = np.meshgrid(np.arange(image_shape[1]), np.arange(image_shape[0]))
def _elastic_transform_2D(image):
# Distort meshgrid indices
distorted_indices = (y + sdy).reshape(-1, 1), \
(x + sdx).reshape(-1, 1)
# Map cooordinates from image to distorted index set
transformed_image = map_coordinates(image, distorted_indices, mode='reflect',
order=interpolation_order).reshape(image_shape)
return transformed_image
return _elastic_transform_2D
def enhanceImage(self,img,flip_left_right=None,flip_up_down=None,deform=None):
img_list = []
img_list.append(img)
try:
xrange
except NameError:
xrange = range
# flipping
if flip_left_right:
for i in xrange(0,img_list.__len__()):
x = img_list[i].getRaw()
y = img_list[i].getMask()
x = np.fliplr(x)
y = np.fliplr(y)
img_new = AnnotatedImage()
img_new.createWithArguments(x,y)
img_list.append(img_new)
if flip_up_down:
for i in xrange(0, img_list.__len__()):
x = img_list[i].getRaw()
y = img_list[i].getMask()
x = np.flipud(x)
y = np.flipud(y)
img_new = AnnotatedImage()
img_new.createWithArguments(x,y)
img_list.append(img_new)
if deform:
for i in xrange(0, img_list.__len__()):
x = img_list[i].getRaw()
y = img_list[i].getMask()
for t in xrange(0,5):
def_func = self.elastic_transformations(2000, 60, x.shape)
x = def_func(x)
y_new = np.zeros((y.shape[0],y.shape[1]),dtype=np.uint16)
for z in xrange(0,y.max()+1):
y_tmp = def_func((y==z)*255)
y_new = y_new + (z * (y_tmp==255)).astype(np.uint16)
y=y_new
img_new = AnnotatedImage()
img_new.createWithArguments(x,y)
img_list.append(img_new)
return img_list
def arbitraryEnhance(self,annotated_image):
x = annotated_image.getRaw()
y = annotated_image.getMask()
try:
xrange
except NameError:
xrange = range
if randint(0,1): # flip horizontally
x = np.fliplr(x)
y = np.fliplr(y)
if randint(0,1): # flipping vertically
x = np.flipud(x)
y = np.flipud(y)
if 0: #randint(0,1): # deform
def_func = self.elastic_transformations(2000, 60, x.shape)
x = def_func(x)
y_new = np.zeros((y.shape[0],y.shape[1]),dtype=np.uint16)
for z in xrange(0,y.max()+1):
y_tmp = def_func((y==z)*255)
y_new = y_new + (z * (y_tmp==255)).astype(np.uint16)
y=y_new
if randint(0,1): # rotate
x_rot = np.zeros_like(x)
y_rot = np.zeros_like(y)
rot_angle = np.random.randint(-90, 90)
x = trf.rotate(x, rot_angle)
y = trf.rotate(y.squeeze(), rot_angle, order=0)
if randint(0, 1): # enhance brightness
x[x<0] = 0.0
x[x>1.0] = 1.0
x = x + uniform(-np.absolute(0.3-x.mean()),np.absolute(0.3-x.mean()))
#img = Image.fromarray(skimage.img_as_ubyte(x))
#contrast = ImageEnhance.Brightness(img)
#contrast = contrast.enhance(np.random.uniform(0.5,1.5))
#x = np.asarray(contrast).astype(np.float32)
x[x<0] = 0
x[x > 1] = 1.0
if randint(0,1): # gaussian
x = x * 255.0
x = x + np.random.normal(0, 2, [x.shape[0], x.shape[1]])
x[x<0] = 0
x[x > 255] = 255
x = x / 255.0
if randint(0,1): #blur
x = x * 255.0
kernel_size = np.random.randint(1,3)
if (kernel_size%2 == 0):
kernel_size = kernel_size+1;
x = cv2.GaussianBlur(x,(kernel_size,kernel_size),0)
x[x<0] = 0
x[x > 255] = 255
x = x / 255.0
if randint(0,1):
range_scale = uniform(0.8,1.2)
x = ski_transform.resize(x, (int(x.shape[0] * range_scale), int(x.shape[1] * range_scale)), mode='reflect')
y = (ski_transform.resize(y, (int(y.shape[0] * range_scale), int(y.shape[1] * range_scale)), mode='reflect')>0.5)
img_new = AnnotatedImage()
img_new.createWithArguments(x,y)
return img_new
class SVGTools:
svg_str = ''
height=None
width=None
samplingrate = None
def __init__(self,samplingrate=10):
self.samplingrate = int(samplingrate)
def openSVG(self,height,width):
self.height=height
self.width=width
self.svg_str = '<svg height="' + str(height) + '" width="' + str(width) + '" x="0px" y="0px">\n'
def closeSVG(self):
self.svg_str = self.svg_str + '</svg>\n'
def writeToPath(self,path):
file = open(path,'w')
file.write(self.svg_str)
file.close()
def addRawImage(self,name=None,img_path=None):
self.svg_str += '<g id="' + name + '">\n'
self.svg_str = self.svg_str + '\t<image xlink:href = "' + img_path + '" x = "0" y = "0" height = "' + str(self.height) + 'px" width = "' + str(self.width) + 'px" />'
self.svg_str += "\n</g>\n"
def addMaskLayer(self,mask,name,color,opacity):
svg_str = ''
contours = []
for i in range (1,mask.max()+1):
if ((mask==i).sum() > 0):
contours.append(measure.find_contours(mask==i, 0.5))
svg_str = '<g id="' + name + '" opacity="' + str(opacity) + '">'
for index, contour in enumerate(contours):
svg_str = svg_str + '\t<polygon fill="' + color + '" stroke="#800080" points="'
for i in range(0,contour[0].__len__(),self.samplingrate):
svg_str = svg_str + str(int(contour[0][i, 1])) + ',' + str(int(contour[0][i, 0])) + ' '
#svg_str = svg_str +'" style="fill:lime;stroke:purple;stroke-width:1" />\n'
svg_str = svg_str + '" style="stroke:purple;stroke-width:1" />\n'
self.svg_str = self.svg_str + svg_str + '</g>\n'
def getSVGMask(self,img_path=None):
contours = []
for i in range (1,self.mask.max()):
if ((self.mask==i).sum() > 0):
contours.append(measure.find_contours(self.mask==i, 0.5))
#contours = measure.find_contours(self.mask, 1)
svg_str = ''
svg_str = svg_str + '<svg height="' + str(self.mask.shape[0]) + '" width="' + str(self.mask.shape[1]) + '">\n'
for index, contour in enumerate(contours):
svg_str = svg_str + '\t<polygon points="'
for i in range(0,contour[0].__len__(),5):
svg_str = svg_str + str(int(contour[0][i, 1])) + ',' + str(int(contour[0][i, 0])) + ' '
svg_str = svg_str +'" style="fill:lime;stroke:purple;stroke-width:1" />\n'
if img_path:
svg_str = svg_str + '<image xlink:href = "' + img_path + '" x = "0" y = "0" height = "' + str(self.mask.shape[0]) + 'px" width = "' + str(self.mask.shape[1]) + 'px" />'
svg_str = svg_str + '</svg>\n'
return svg_str
def transformSVGToMask(self,path):
print(path)
tree = ET.parse(path)
root = tree.getroot()
#img = np.zeros((root.get("width"),root.get("height")),astype=np.uint8)
image = Image.new("L", (int(root.get("width").split('px')[0]),int(root.get("height").split('px')[0])))
draw = ImageDraw.Draw(image)
for i in range(0,root[3].getchildren().__len__()):
points = []
try:
points_tmp = root[3].getchildren()[i].get("points").split(' ')
for t in points_tmp:
try:
x,y = t.split(',')
points.append((round(float(x)),round(float(y))))
except:
None
except:
None
if points:
draw.polygon((points), fill=i+1)
img = np.array(image)
return img
def transformSVGToMaskNew(self,path):
print(path)
tree = ET.parse(path)
root = tree.getroot()
img = np.zeros((int(root.get("height").split('px')[0]),int(root.get("width").split('px')[0])),dtype=np.uint16)
labels = np.zeros((root[3].getchildren().__len__()))
for i in range(0,root[3].getchildren().__len__()):
labels[i] = i+1
np.random.shuffle(labels)
for i in range(0,root[3].getchildren().__len__()):
image = Image.new("L", (int(root.get("width").split('px')[0]), int(root.get("height").split('px')[0])))
draw = ImageDraw.Draw(image)
points = []
try:
points_tmp = root[3].getchildren()[i].get("points").split(' ')
for t in points_tmp:
try:
x,y = t.split(',')
points.append((round(float(x)),round(float(y))))
except:
None
except:
None
if points:
draw.polygon((points), fill=i+1)
img_tmp = np.array(image)
img[np.where((img_tmp>0).astype(np.uint8) == 1)] = 0
img = img + (img_tmp>0).astype(np.uint16) * labels[i]
return img
|
the-stack_106_15454
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
from psprensa.utilities import toString, infoTableBINs, buscandoNumeroBIN
def index(request):
return render(request,'viewsPrensa/index.html')
@csrf_protect
def consulta_bin(request):
numerobin=""
retorno = " "
if(request.method =="POST"):
numerobin = request.POST["codigobin"]
if(numerobin.isnumeric()):
if(len(numerobin) == 6):
retorno = buscandoNumeroBIN(numerobin)
else:
retorno = "Erro de Validação: Digite um código que contenha apenas 6 digítos númericos!"
else:
retorno = "Erro de Digitação: Digite apenas números!"
print(retorno)
return render(request,'viewsPrensa/index.html',{'mensagemRetorno': retorno})
|
the-stack_106_15455
|
import datetime
import threading
import time
import socket
import random
import struct
import ipaddress
import logging
import json
from collections import defaultdict
import scapy.all as scapyall
import ptf.testutils as testutils
from tests.ptf_runner import ptf_runner
TCP_DST_PORT = 5000
SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
PTFRUNNER_QLEN = 1000
VLAN_INDEX = 0
VLAN_HOSTS = 100
VLAN_BASE_MAC_PATTERN = "72060001{:04}"
LAG_BASE_MAC_PATTERN = '5c010203{:04}'
logger = logging.getLogger(__name__)
class DualTorIO:
def __init__(self, activehost, standbyhost, ptfhost, ptfadapter, tbinfo,
io_ready, tor_vlan_port=None):
self.tor_port = None
self.tor_vlan_port = tor_vlan_port
self.duthost = activehost
self.ptfadapter = ptfadapter
self.ptfhost = ptfhost
self.tbinfo = tbinfo
self.io_ready_event = io_ready
self.dut_mac = self.duthost.facts["router_mac"]
self.active_mac = self.dut_mac
if standbyhost:
self.standby_mac = standbyhost.facts["router_mac"]
self.mux_cable_table = self.duthost.get_running_config_facts()['MUX_CABLE']
if tor_vlan_port:
if tor_vlan_port in self.mux_cable_table:
self.downstream_dst_ip = self.mux_cable_table[tor_vlan_port]['server_ipv4'].split("/")[0]
else:
logger.error("Port {} not found in MUX cable table".format(tor_vlan_port))
else:
self.downstream_dst_ip = None
self.time_to_listen = 180.0
self.sniff_time_incr = 60
self.send_interval = 0.0035 # Inter-packet interval
# How many packets to be sent by sender thread
self.packets_to_send = min(int(self.time_to_listen /
(self.send_interval + 0.0015)), 45000)
self.dataplane = self.ptfadapter.dataplane
self.dataplane.flush()
self.total_disrupt_time = None
self.disrupts_count = None
self.total_disrupt_packets = None
self.max_lost_id = None
self.max_disrupt_time = None
self.received_counter = int()
self.lost_packets = dict()
self.duplicated_packets_count = int()
self.total_lost_packets = None
# This list will contain all unique Payload ID, to filter out received floods.
self.unique_id = set()
mg_facts = self.duthost.get_extended_minigraph_facts(self.tbinfo)
prefix_len = mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['prefixlen'] - 3
test_network = ipaddress.ip_address(
mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['addr']) +\
(1 << (32 - prefix_len))
self.default_ip_range = str(ipaddress.ip_interface(unicode(
str(test_network) + '/{0}'.format(prefix_len))).network)
self.src_addr, mask = self.default_ip_range.split('/')
self.n_hosts = 2**(32 - int(mask))
self.port_indices = mg_facts['minigraph_ptf_indices']
portchannel_info = mg_facts['minigraph_portchannels']
self.port_channel_ports = dict()
for pc in portchannel_info.values():
for member in pc['members']:
self.port_channel_ports.update({member: self.port_indices[member]})
self.server_ip_list = list()
self.vlan_interfaces = mg_facts["minigraph_vlan_interfaces"][VLAN_INDEX]
self.vlan_network = self.vlan_interfaces["subnet"]
self.vlan_ports = dict()
for ifname in mg_facts["minigraph_vlans"].values()[VLAN_INDEX]["members"]:
self.vlan_ports.update({ifname: self.port_indices[ifname]})
self.vlan_host_map = self._generate_vlan_servers()
self.__configure_arp_responder()
vlan_table = self.duthost.get_running_config_facts()['VLAN']
vlan_name = list(vlan_table.keys())[0]
self.vlan_mac = vlan_table[vlan_name]['mac']
logger.info("VLAN ports: {}".format(str(self.vlan_ports.keys())))
logger.info("PORTCHANNEL ports: {}".format(str(self.port_channel_ports.keys())))
def _generate_vlan_servers(self):
"""
@summary: Generates physical port maps which is a set of IP address and
their associated MAC addresses
- MACs are generated sequentially as offsets from VLAN_BASE_MAC_PATTERN
- IP addresses are randomly selected from the given VLAN network
- "Hosts" (IP/MAC pairs) are distributed evenly amongst the ports in the VLAN
"""
for _, config in self.mux_cable_table.items():
self.server_ip_list.append(str(config['server_ipv4'].split("/")[0]))
logger.info("ALL server address:\n {}".format(self.server_ip_list))
vlan_host_map = defaultdict(dict)
addr_list = list(self.server_ip_list)
for _, i in enumerate(range(2, len(self.server_ip_list) + 2)):
port = self.vlan_ports.values()[i % len(self.vlan_ports.values())]
addr = random.choice(addr_list)
# Ensure that we won't get a duplicate ip address
addr_list.remove(addr)
vlan_host_map[port] = [str(addr)]
return vlan_host_map
def __configure_arp_responder(self):
"""
@summary: Generate ARP responder configuration using vlan_host_map.
Copy this configuration to PTF and restart arp_responder
"""
arp_responder_conf = {}
for port in self.vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = self.vlan_host_map[port]
with open("/tmp/from_t1.json", "w") as fp:
json.dump(arp_responder_conf, fp)
self.ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
self.ptfhost.shell("supervisorctl reread && supervisorctl update")
self.ptfhost.shell("supervisorctl restart arp_responder")
logger.info("arp_responder restarted")
def start_io_test(self, traffic_generator=None):
"""
@summary: The entry point to start the TOR dataplane I/O test.
Args:
traffic_generator (function): A callback function to decide the
traffic direction (T1 to server / server to T1)
Allowed values: self.generate_from_t1_to_server or
self.generate_from_server_to_t1
"""
# Check in a conditional for better readability
if traffic_generator == self.generate_from_t1_to_server:
self.generate_from_t1_to_server()
elif traffic_generator == self.generate_from_server_to_t1:
self.generate_from_server_to_t1()
else:
logger.error("Traffic generator not provided or invalid")
return
# start and later join the sender and sniffer threads
self.send_and_sniff(sender=self.traffic_sender_thread,
sniffer=self.traffic_sniffer_thread)
# Sender and sniffer have finished the job. Start examining the collected flow
self.examine_flow()
if self.lost_packets:
self.no_routing_stop, self.no_routing_start =\
datetime.datetime.fromtimestamp(self.no_routing_stop),\
datetime.datetime.fromtimestamp(self.no_routing_start)
logger.error("The longest disruption lasted %.3f seconds."\
"%d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
logger.error("Total disruptions count is %d. All disruptions lasted "\
"%.3f seconds. Total %d packet(s) lost" % \
(self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
def generate_from_t1_to_server(self):
"""
@summary: Generate (not send) the packets to be sent from T1 to server
"""
eth_dst = self.dut_mac
eth_src = self.ptfadapter.dataplane.get_mac(0, 0)
ip_ttl = 255
tcp_dport = TCP_DST_PORT
if self.tor_port:
from_tor_src_port = self.tor_port
else:
from_tor_src_port = random.choice(self.port_channel_ports.keys())
from_tor_src_port_index = None
for port_name, ptf_port_index in self.port_channel_ports.items():
if port_name == from_tor_src_port:
from_tor_src_port_index = ptf_port_index
break
if from_tor_src_port_index is None:
logger.error("Port index {} not found in the list of port channel ports {}"\
.format(from_tor_src_port, self.port_channel_ports.values()))
logger.info("-"*20 + "T1 to server packet" + "-"*20)
logger.info("Source port: {}".format(from_tor_src_port))
logger.info("Ethernet address: dst: {} src: {}".format(eth_dst, eth_src))
if self.downstream_dst_ip:
server_ip_list = [self.downstream_dst_ip]
logger.info("IP address: dst: {} src: random".format(self.downstream_dst_ip))
else:
server_ip_list = self.server_ip_list
logger.info("IP address: dst: random src: random")
logger.info("TCP port: dst: {}".format(tcp_dport))
logger.info("DUT mac: {}".format(self.dut_mac))
logger.info("VLAN mac: {}".format(self.vlan_mac))
logger.info("-"*50)
self.packets_list = []
for i in range(self.packets_to_send):
tcp_tx_packet = testutils.simple_tcp_packet(
eth_dst=eth_dst,
eth_src=eth_src,
ip_dst=random.choice(server_ip_list),
ip_src=self.random_host_ip(),
ip_ttl=ip_ttl,
tcp_dport=tcp_dport)
payload = str(i) + 'X' * 60
packet = scapyall.Ether(str(tcp_tx_packet))
packet.load = payload
self.packets_list.append((from_tor_src_port_index, str(packet)))
self.sent_pkt_dst_mac = self.dut_mac
self.received_pkt_src_mac = [self.vlan_mac]
def generate_from_server_to_t1(self):
"""
@summary: Generate (not send) the packets to be sent from server to T1
"""
eth_src = self.ptfadapter.dataplane.get_mac(0, 0)
if self.tor_vlan_port:
from_server_src_port = self.tor_vlan_port
else:
from_server_src_port = random.choice(self.vlan_ports.values())
self.from_server_src_addr = random.choice(
self.vlan_host_map[from_server_src_port])
self.from_server_dst_addr = self.random_host_ip()
tcp_dport = TCP_DST_PORT
tcp_tx_packet = testutils.simple_tcp_packet(
eth_dst=self.vlan_mac,
eth_src=eth_src,
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
tcp_dport=tcp_dport
)
logger.info("-"*20 + "Server to T1 packet" + "-"*20)
logger.info("Source port: {}".format(from_server_src_port))
logger.info("Ethernet address: dst: {} src: {}".format(self.vlan_mac, eth_src))
logger.info("IP address: dst: {} src: {}".format(self.from_server_dst_addr,
self.from_server_src_addr))
logger.info("TCP port: dst: {} src: 1234".format(tcp_dport))
logger.info("Active ToR MAC: {}, Standby ToR MAC: {}".format(self.active_mac,
self.standby_mac))
logger.info("VLAN MAC: {}".format(self.vlan_mac))
logger.info("-"*50)
self.packets_list = []
for i in range(self.packets_to_send):
payload = str(i) + 'X' * 60
packet = scapyall.Ether(str(tcp_tx_packet))
packet.load = payload
self.packets_list.append((from_server_src_port, str(packet)))
self.sent_pkt_dst_mac = self.vlan_mac
self.received_pkt_src_mac = [self.active_mac, self.standby_mac]
def random_host_ip(self):
"""
@summary: Helper method to find a random host IP for generating a random src/dst IP address
Returns:
host_ip (str): Random IP address
"""
host_number = random.randint(2, self.n_hosts - 2)
if host_number > (self.n_hosts - 2):
raise Exception("host number {} is greater than number of hosts {}\
in the network {}".format(
host_number, self.n_hosts - 2, self.default_ip_range))
src_addr_n = struct.unpack(">I", socket.inet_aton(self.src_addr))[0]
net_addr_n = src_addr_n & (2**32 - self.n_hosts)
host_addr_n = net_addr_n + host_number
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
return host_ip
def send_and_sniff(self, sender, sniffer):
"""
@summary: This method starts and joins two background threads in parallel: sender and sniffer
"""
self.sender_thr = threading.Thread(target=sender)
self.sniff_thr = threading.Thread(target=sniffer)
self.sniffer_started = threading.Event()
self.sniff_thr.start()
self.sender_thr.start()
self.sniff_thr.join()
self.sender_thr.join()
def traffic_sender_thread(self):
"""
@summary: Generalized Sender thread (to be used for traffic in both directions)
Waits for a signal from the `traffic_sniffer_thread` before actually starting.
This is to make sure that that packets are not sent before they are ready to be captured.
"""
logger.info("Sender waiting to send {} packets".format(len(self.packets_list)))
self.sniffer_started.wait(timeout=10)
sender_start = datetime.datetime.now()
logger.info("Sender started at {}".format(str(sender_start)))
# Signal data_plane_utils that sender and sniffer threads have begun
self.io_ready_event.set()
for entry in self.packets_list:
time.sleep(self.send_interval)
testutils.send_packet(self.ptfadapter, *entry)
logger.info("Sender has been running for {}".format(
str(datetime.datetime.now() - sender_start)))
def traffic_sniffer_thread(self):
"""
@summary: Generalized sniffer thread (to be used for traffic in both directions)
Starts `scapy_sniff` thread, and waits for its setup before signalling the sender thread to start
"""
wait = self.time_to_listen + self.sniff_time_incr
sniffer_start = datetime.datetime.now()
logger.info("Sniffer started at {}".format(str(sniffer_start)))
sniff_filter = "tcp and tcp dst port {} and tcp src port 1234 and not icmp".format(TCP_DST_PORT)
scapy_sniffer = threading.Thread(target=self.scapy_sniff, kwargs={'sniff_timeout': wait,
'sniff_filter': sniff_filter})
scapy_sniffer.start()
time.sleep(2) # Let the scapy sniff initialize completely.
self.sniffer_started.set() # Unblock waiter for the send_in_background.
scapy_sniffer.join()
logger.info("Sniffer has been running for {}".format(str(datetime.datetime.now() - sniffer_start)))
self.sniffer_started.clear()
def scapy_sniff(self, sniff_timeout=180, sniff_filter=''):
"""
@summary: PTF runner - runs a sniffer in PTF container.
Running sniffer in sonic-mgmt container has missing SOCKET problem
and permission issues (scapy and tcpdump require root user)
The remote function listens on all ports. Once found, all packets
are dumped to local pcap file, and all packets are saved to
self.all_packets as scapy type.
Args:
sniff_timeout (int): Duration in seconds to sniff the traffic
sniff_filter (str): Filter that Scapy will use to collect only relevant packets
"""
capture_pcap = '/tmp/capture.pcap'
sniffer_log = '/tmp/dualtor-sniffer.log'
result = ptf_runner(
self.ptfhost,
"ptftests",
"dualtor_sniffer.Sniff",
qlen=PTFRUNNER_QLEN,
platform_dir="ptftests",
platform="remote",
params={
"sniff_timeout" : sniff_timeout,
"sniff_filter" : sniff_filter,
"capture_pcap": capture_pcap,
"sniffer_log": sniffer_log,
"port_filter_expression": 'not (arp and ether src {})\
and not tcp'.format(self.dut_mac)
},
log_file=sniffer_log,
module_ignore_errors=False
)
logger.debug("Ptf_runner result: {}".format(result))
logger.info('Fetching log files from ptf and dut hosts')
logs_list = [
{'src': sniffer_log, 'dest': '/tmp/', 'flat': True, 'fail_on_missing': False},
{'src': capture_pcap, 'dest': '/tmp/', 'flat': True, 'fail_on_missing': False}
]
for log_item in logs_list:
self.ptfhost.fetch(**log_item)
self.all_packets = scapyall.rdpcap(capture_pcap)
logger.info("Number of all packets captured: {}".format(len(self.all_packets)))
def get_total_disruptions(self):
return self.disrupts_count
def get_longest_disruption(self):
return self.max_disrupt_time
def get_total_disrupted_packets(self):
return self.total_disrupt_packets
def get_total_sent_packets(self):
return len(self.packets_list)
def get_total_received_packets(self):
return self.received_counter
def get_total_lost_packets(self):
return self.total_lost_packets
def get_total_disrupt_time(self):
return self.total_disrupt_time
def get_duplicated_packets_count(self):
return self.duplicated_packets_count
def no_flood(self, packet):
"""
@summary: This method filters packets which are unique (i.e. no floods).
"""
if (not int(str(packet[scapyall.TCP].payload).replace('X',''))in self.unique_id)\
and (packet[scapyall.Ether].src in self.received_pkt_src_mac):
# This is a unique (no flooded) received packet.
self.unique_id.add(int(str(packet[scapyall.TCP].payload).replace('X','')))
return True
elif packet[scapyall.Ether].dst == self.sent_pkt_dst_mac:
# This is a sent packet.
return True
else:
return False
def examine_flow(self):
"""
@summary: This method examines packets collected by sniffer thread
The method compares TCP payloads of the packets one by one (assuming all
payloads are consecutive integers), and the losses if found - are treated
as disruptions in Dataplane forwarding. All disruptions are saved to
self.lost_packets dictionary, in format:
disrupt_start_id = (missing_packets_count, disrupt_time,
disrupt_start_timestamp, disrupt_stop_timestamp)
"""
examine_start = datetime.datetime.now()
logger.info("Packet flow examine started {}".format(str(examine_start)))
if not self.all_packets:
logger.error("self.all_packets not defined.")
return None
# Filter out packets and remove floods:
filtered_packets = [ pkt for pkt in self.all_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == TCP_DST_PORT and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
logger.info("Number of filtered packets captured: {}".format(len(filtered_packets)))
# Re-arrange packets, if delayed, by Payload ID and Timestamp:
packets = sorted(filtered_packets, key = lambda packet: (
int(str(packet[scapyall.TCP].payload).replace('X','')), packet.time ))
self.max_disrupt, self.total_disruption = 0, 0
if not packets or len(packets) == 0:
logger.error("Sniffer failed to capture any traffic")
return
else:
logger.info("Measuring traffic disruptions..")
filename = '/tmp/capture_filtered.pcap'
scapyall.wrpcap(filename, packets)
logger.info("Filtered pcap dumped to {}".format(filename))
self.examine_each_packet(packets)
self.disrupts_count = len(self.lost_packets) # Total disrupt counter.
if self.lost_packets:
# Find the longest loss with the longest time:
_, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start,
self.no_routing_stop) = \
max(self.lost_packets.items(), key = lambda item:item[1][0:2])
self.total_disrupt_packets = sum([item[0] for item in self.lost_packets.values()])
self.total_disrupt_time = sum([item[1] for item in self.lost_packets.values()])
elif self.total_lost_packets == 0:
self.max_lost_id = 0
self.max_disrupt_time = 0
self.total_disrupt_packets = 0
self.total_disrupt_time = 0
logger.info("Gaps in forwarding not found.")
logger.info("Packet flow examine finished after {}".format(
str(datetime.datetime.now() - examine_start)))
logger.info("Total number of filtered incoming packets captured {}".format(
self.received_counter))
logger.info("Number of duplicated packets received: {}".format(
self.duplicated_packets_count))
logger.info("Number of packets lost: {}".format(self.total_lost_packets))
def examine_each_packet(self, packets):
lost_packets = dict()
sent_packets = dict()
duplicated_packets_count = 0
prev_payload, prev_time = None, None
sent_payload = 0
disruption_start, disruption_stop = None, None
received_counter = 0 # Counts packets from dut.
for packet in packets:
if packet[scapyall.Ether].dst == self.sent_pkt_dst_mac:
# This is a sent packet - keep track of it as payload_id:timestamp.
sent_payload = int(str(packet[scapyall.TCP].payload).replace('X',''))
sent_packets[sent_payload] = packet.time
continue
if packet[scapyall.Ether].src in self.received_pkt_src_mac:
# This is a received packet.
received_time = packet.time
received_payload = int(str(packet[scapyall.TCP].payload).replace('X',''))
if received_payload == prev_payload:
# make account for packet duplication, and keep looking for a
# new and unique received packet
duplicated_packets_count = duplicated_packets_count + 1
continue
received_counter += 1
if not (received_payload and received_time):
# This is the first valid received packet.
prev_payload = received_payload
prev_time = received_time
continue
if received_payload - prev_payload > 1:
# Packets in a row are missing, a disruption.
lost_id = (received_payload - 1) - prev_payload # How many packets lost in a row.
# How long disrupt lasted.
disrupt = (sent_packets[received_payload] - sent_packets[prev_payload + 1])
# Add disruption to the lost_packets dict:
lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time)
logger.info("Disruption between packet ID %d and %d. For %.4f " % (
prev_payload, received_payload, disrupt))
if not disruption_start:
disruption_start = datetime.datetime.fromtimestamp(prev_time)
disruption_stop = datetime.datetime.fromtimestamp(received_time)
prev_payload = received_payload
prev_time = received_time
self.total_lost_packets = len(sent_packets) - received_counter
self.received_counter = received_counter
self.lost_packets = lost_packets
self.duplicated_packets_count = duplicated_packets_count
if self.received_counter == 0:
logger.error("Sniffer failed to filter any traffic from DUT")
if self.lost_packets:
logger.info("Disruptions happen between {} and {}.".format(
str(disruption_start), str(disruption_stop)))
def check_tcp_payload(self, packet):
"""
@summary: Helper method
Returns: Bool: True if a packet is not corrupted and has a valid TCP
sequential TCP Payload
"""
try:
int(str(packet[scapyall.TCP].payload).replace('X','')) in range(
self.packets_to_send)
return True
except Exception as err:
return False
|
the-stack_106_15456
|
from IPython.core.display import HTML
from IPython.display import display
import numpy as np
import random
from collections import OrderedDict
import pyqtgraph as pg
from qtpy.QtWidgets import QMainWindow, QProgressBar, QVBoxLayout, QTableWidgetSelectionRange, QTableWidgetItem
from qtpy import QtGui
from __code import load_ui
import os
from NeuNorm.normalization import Normalization
from __code.ui_roi_selection import Ui_MainWindow as UiMainWindow
from __code.config import percentage_of_images_to_use_for_roi_selection, \
minimum_number_of_images_to_use_for_roi_selection
class Interface(QMainWindow):
roi_width = 0.01
roi_selected = {} #nice formatting of list_roi for outside access
list_of_files = None
live_data = []
o_norm = None
roi_column_width = 70
integrated_image = None
integrated_image_size = {'width': -1, 'height': -1}
list_roi = {} # 'row": {'x0':None, 'y0': None, 'x1': None, 'y1': None}
default_roi = {'x0': 0, 'y0': 0, 'x1': 50, 'y1': 50, 'id': None}
def __init__(self, parent=None,
o_norm=None,
list_of_files=None,
percentage_of_data_to_use=None,
callback=None,
display_info_message=True):
if display_info_message:
display(HTML('<span style="font-size: 20px; color:blue">Check UI that popped up \
(maybe hidden behind this browser!)</span>'))
if o_norm:
self.o_norm = o_norm
if list_of_files:
self.list_of_files = list_of_files
if percentage_of_data_to_use is None:
percentage_of_data_to_use = percentage_of_images_to_use_for_roi_selection
self.percentage_of_data_to_use = percentage_of_data_to_use
# method called when leaving the application, if any
self.callback = callback
super(QMainWindow, self).__init__(parent)
ui_full_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
os.path.join('ui', 'ui_roi_selection.ui'))
self.ui = load_ui(ui_full_path, baseinstance=self)
self.init_statusbar()
self.setWindowTitle("Background ROI Selection Tool")
self.ui.image_view = pg.ImageView()
self.ui.image_view.ui.roiBtn.hide()
self.ui.image_view.ui.menuBtn.hide()
top_layout = QVBoxLayout()
top_layout.addWidget(self.ui.image_view)
self.ui.widget.setLayout(top_layout)
self.init_widgets()
self.integrate_images()
self.display_image()
def init_widgets(self):
nbr_columns = self.ui.table_roi.columnCount()
for _col in range(nbr_columns):
self.ui.table_roi.setColumnWidth(_col, self.roi_column_width)
def init_statusbar(self):
self.eventProgress = QProgressBar(self.ui.statusbar)
self.eventProgress.setMinimumSize(20, 14)
self.eventProgress.setMaximumSize(540, 100)
self.eventProgress.setVisible(False)
self.ui.statusbar.addPermanentWidget(self.eventProgress)
# self.parent.eventProgress = QtGui.QProgressBar(self.ui.statusbar)
# self.parent.eventProgress.setMinimumSize(20, 14)
# self.parent.eventProgress.setMaximumSize(540, 100)
# self.parent.eventProgress.setVisible(False)
# self.ui.statusbar.addPermanentWidget(self.parent.eventProgress)
def __get_recap(self, data_array):
if data_array:
[height, width] = np.shape(data_array[0])
nbr_sample = len(data_array)
else:
nbr_sample = '0'
[height, width] = ['N/A', 'N/A']
return [nbr_sample, height, width]
def __built_html_table_row_3_columns(self, name, nbr, height, width):
_html = '<tr><td>' + str(name) + '</td><td>' + str(nbr) + '</td><td>' + str(height) + \
'*' + str(width) + '</td></tr>'
return _html
def recap(self):
"""Display nbr of files loaded and size. This can be used to figure why a normalization failed"""
[nbr_sample, height_sample, width_sample] = self.__get_recap(self.o_norm.data['sample']['data'])
[nbr_ob, height_ob, width_ob] = self.__get_recap(self.o_norm.data['ob']['data'])
[nbr_df, height_df, width_df] = self.__get_recap(self.o_norm.data['df']['data'])
html = '<table><tr><td width="30%"><strong>Type</strong></td><td><strong>Number</strong></td><td>' + \
'<strong>Size (height*width)</strong></td></tr>'
html += self.__built_html_table_row_3_columns('sample', nbr_sample, height_sample, width_sample)
html += self.__built_html_table_row_3_columns('ob', nbr_ob, height_ob, width_ob)
html += self.__built_html_table_row_3_columns('df', nbr_df, height_df, width_df)
html += '</table>'
display(HTML(html))
def integrate_images(self):
percentage_of_data_to_use = self.percentage_of_data_to_use
if self.o_norm:
nbr_files = len(self.o_norm.data['sample']['data'])
else:
nbr_files = len(self.list_of_files)
if nbr_files < minimum_number_of_images_to_use_for_roi_selection:
nbr_files_to_use = nbr_files
else:
nbr_files_to_use = np.int(percentage_of_data_to_use * nbr_files)
if nbr_files_to_use < minimum_number_of_images_to_use_for_roi_selection:
nbr_files_to_use = minimum_number_of_images_to_use_for_roi_selection
random_list = random.sample(range(0, nbr_files), nbr_files_to_use)
if self.o_norm:
list_data_to_use = [self.o_norm.data['sample']['data'][_index] for _index in random_list]
else:
o_norm = Normalization()
list_of_files = np.array(self.list_of_files)
list_of_files = list(list_of_files[random_list])
o_norm.load(file=list_of_files, notebook=True)
list_data_to_use = o_norm.data['sample']['data']
self.integrated_image = np.mean(list_data_to_use, axis=0)
[_height, _width] = np.shape(self.integrated_image)
self.integrated_image_size['height'] = _height
self.integrated_image_size['width'] = _width
def _clean_image(self, image):
_result_inf = np.where(np.isinf(image))
image[_result_inf] = np.NaN
return image
def display_image(self):
_image = np.transpose(self.integrated_image)
_image = self._clean_image(_image)
self.ui.image_view.setImage(_image)
def remove_row_entry(self, row):
_roi_id = self.list_roi[row]['id']
self.ui.image_view.removeItem(_roi_id)
del self.list_roi[row]
#rename row
new_list_roi = {}
new_row_index = 0
for _previous_row_index in self.list_roi.keys():
new_list_roi[new_row_index] = self.list_roi[_previous_row_index]
new_row_index += 1
self.list_roi = new_list_roi
def remove_roi_button_clicked(self):
self.ui.table_roi.blockSignals(True)
_selection = self.ui.table_roi.selectedRanges()
row = _selection[0].topRow()
old_nbr_row = self.ui.table_roi.rowCount()
# remove entry from list of roi
self.remove_row_entry(row)
# update table of rois
self.update_table_roi_ui()
self.ui.table_roi.blockSignals(False)
self.check_add_remove_button_widgets_status()
# update selection
new_nbr_row = self.ui.table_roi.rowCount()
if new_nbr_row == 0:
return
if row == (old_nbr_row-1):
row = new_nbr_row - 1
_new_selection = QTableWidgetSelectionRange(row, 0, row, 3)
self.ui.table_roi.setRangeSelected(_new_selection, True)
def clear_table(self):
nbr_row = self.ui.table_roi.rowCount()
for _row in np.arange(nbr_row):
self.ui.table_roi.removeRow(0)
def update_table_roi_ui(self):
"""Using list_roi as reference, repopulate the table_roi_ui"""
self.ui.table_roi.blockSignals(True)
list_roi = self.list_roi
self.clear_table()
_index_row = 0
for _roi_key in list_roi.keys():
_roi = list_roi[_roi_key]
self.ui.table_roi.insertRow(_index_row)
self._set_item_value(_index_row, 0, _roi['x0'])
# _item = QtGui.QTableWidgetItem(str(_roi['x0']))
# self.ui.table_roi.setItem(_index_row, 0, _item)
self._set_item_value(_index_row, 1, _roi['y0'])
# _item = QtGui.QTableWidgetItem(str(_roi['y0']))
# self.ui.table_roi.setItem(_index_row, 1, _item)
self._set_item_value(_index_row, 2, _roi['x1'])
# _item = QtGui.QTableWidgetItem(str(_roi['x1']))
# self.ui.table_roi.setItem(_index_row, 2, _item)
self._set_item_value(_index_row, 3, _roi['y1'])
# _item = QtGui.QTableWidgetItem(str(_roi['y1']))
# self.ui.table_roi.setItem(_index_row, 3, _item)
_index_row += 1
self.ui.table_roi.blockSignals(False)
#self.ui.table_roi.itemChanged['QTableWidgetItem*'].connect(self.update_table_roi)
def _set_item_value(self, row=0, column=0, value=-1):
_item = QTableWidgetItem(str(value))
self.ui.table_roi.setItem(row, column, _item)
def check_roi_validity(self, value, x_axis=True):
"""Make sure the ROI selected or defined stays within the image size"""
min_value = 0
value = np.int(value)
if x_axis:
max_value = self.integrated_image_size['width']
else:
max_value = self.integrated_image_size['height']
if value < 0:
return min_value
if value > max_value:
return max_value
return value
def update_table_roi(self, item):
"""Using the table_roi_ui as reference, will update the list_roi dictionary"""
self.ui.table_roi.blockSignals(True)
nbr_row = self.ui.table_roi.rowCount()
new_list_roi = OrderedDict()
old_list_roi = self.list_roi
for _row in np.arange(nbr_row):
_roi = {}
# checking that x0, y0, x1 and y1 stay within the range of the image
_x0 = self.check_roi_validity(self._get_item_value(_row, 0))
_y0 = self.check_roi_validity(self._get_item_value(_row, 1), x_axis=False)
_x1 = self.check_roi_validity(self._get_item_value(_row, 2))
_y1 = self.check_roi_validity(self._get_item_value(_row, 3), x_axis=False)
# updating table content (in case some of the roi were out of scope
self._set_item_value(_row, 0, _x0)
self._set_item_value(_row, 1, _y0)
self._set_item_value(_row, 2, _x1)
self._set_item_value(_row, 3, _y1)
_roi['x0'] = _x0
_roi['y0'] = _y0
_roi['x1'] = _x1
_roi['y1'] = _y1
_roi['id'] = old_list_roi[_row]['id']
new_list_roi[_row] = _roi
self.list_roi = new_list_roi
self.update_image_view_item()
self.ui.table_roi.blockSignals(False)
def update_image_view_item(self):
self.clear_roi_on_image_view()
list_roi = self.list_roi
for _row in list_roi.keys():
_roi = list_roi[_row]
_x0 = np.int(_roi['x0'])
_y0 = np.int(_roi['y0'])
_x1 = np.int(_roi['x1'])
_y1 = np.int(_roi['y1'])
_width = np.abs(_x1 - _x0)
_height = np.abs(_y1 - _y0)
_roi_id = self.init_roi(x0=_x0, y0=_y0,
width=_width, height=_height)
_roi['id'] = _roi_id
list_roi[_row] = _roi
self.list_roi = list_roi
def _get_item_value(self, row, column):
_item = self.ui.table_roi.item(row, column)
if _item:
return str(_item.text())
else:
return ''
def roi_manually_moved(self):
list_roi = self.list_roi
for _row in list_roi.keys():
_roi = list_roi[_row]
roi_id = _roi['id']
region = roi_id.getArraySlice(self.integrated_image, self.ui.image_view.imageItem)
x0 = region[0][0].start
x1 = region[0][0].stop
y0 = region[0][1].start
y1 = region[0][1].stop
_roi['x0'] = x0
_roi['x1'] = x1
_roi['y0'] = y0
_roi['y1'] = y1
list_roi[_row] = _roi
self.list_roi = list_roi
self.update_table_roi_ui()
def clear_roi_on_image_view(self):
list_roi = self.list_roi
for _row in list_roi.keys():
_roi = list_roi[_row]
roi_id = _roi['id']
self.ui.image_view.removeItem(roi_id)
def add_roi_button_clicked(self):
self.clear_roi_on_image_view()
self.ui.table_roi.blockSignals(True)
_selection = self.ui.table_roi.selectedRanges()
if _selection:
row = _selection[0].topRow()
else:
row = 0
# init new row with default value
self.ui.table_roi.insertRow(row)
_default_roi = self.default_roi
_item = QTableWidgetItem(str(_default_roi['x0']))
self.ui.table_roi.setItem(row, 0, _item)
_item = QTableWidgetItem(str(_default_roi['y0']))
self.ui.table_roi.setItem(row, 1, _item)
_item = QTableWidgetItem(str(_default_roi['x1']))
self.ui.table_roi.setItem(row, 2, _item)
_item = QTableWidgetItem(str(_default_roi['y1']))
self.ui.table_roi.setItem(row, 3, _item)
# save new list_roi dictionary
nbr_row = self.ui.table_roi.rowCount()
list_roi = OrderedDict()
for _row in np.arange(nbr_row):
_roi = {}
_x0 = self._get_item_value(_row, 0)
_roi['x0'] = np.int(_x0)
_y0 = self._get_item_value(_row, 1)
_roi['y0'] = np.int(_y0)
_x1 = self._get_item_value(_row, 2)
_roi['x1'] = np.int(_x1)
_y1 = self._get_item_value(_row, 3)
_roi['y1'] = np.int(_y1)
x0_int = int(_x0)
y0_int = int(_y0)
width_int = np.abs(x0_int - int(_x1))
height_int = np.abs(y0_int - int(_y1))
_roi_id = self.init_roi(x0=x0_int, y0=y0_int,
width=width_int, height=height_int)
_roi['id'] = _roi_id
list_roi[_row] = _roi
self.list_roi = list_roi
self.ui.table_roi.blockSignals(False)
self.check_add_remove_button_widgets_status()
if not _selection:
_new_selection = QTableWidgetSelectionRange(0, 0, 0, 3)
self.ui.table_roi.setRangeSelected(_new_selection, True)
def init_roi(self, x0=0, y0=0, width=0, height=0):
_color = QtGui.QColor(62, 13, 244)
_pen = QtGui.QPen()
_pen.setColor(_color)
_pen.setWidthF(self.roi_width)
_roi_id = pg.ROI([x0, y0], [width, height], pen=_pen, scaleSnap=True)
_roi_id.addScaleHandle([1, 1], [0, 0])
_roi_id.addScaleHandle([0, 0], [1, 1])
self.ui.image_view.addItem(_roi_id)
# add connection to roi
_roi_id.sigRegionChanged.connect(self.roi_manually_moved)
return _roi_id
def check_add_remove_button_widgets_status(self):
nbr_row = self.ui.table_roi.rowCount()
if nbr_row > 0:
self.ui.remove_roi_button.setEnabled(True)
else:
self.ui.remove_roi_button.setEnabled(False)
def format_roi(self):
roi_selected = {}
for _key in self.list_roi.keys():
_roi = self.list_roi[_key]
x0 = _roi['x0']
y0 = _roi['y0']
x1 = _roi['x1']
y1 = _roi['y1']
new_entry = {'x0': x0, 'y0': y0, 'x1': x1, 'y1': y1}
roi_selected[_key] = new_entry
self.roi_selected = roi_selected
def apply_clicked(self):
self.update_table_roi(None) #check ROI before leaving application
self.format_roi()
self.close()
if self.callback:
self.callback(self.roi_selected)
def cancel_clicked(self):
self.close()
def closeEvent(self, eventhere=None):
pass
|
the-stack_106_15457
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from termcolor import colored
import ipdb
from .graspNet import model as grasp_net
# from graspNet import model as grasp_net
import gc
class grasp_obj2:
def __init__(self, checkpoint_path='./models/shake/checkpoint.ckpt-2000', gpu_id=-1, num_samples = 128):
self.checkpoint = checkpoint_path
tf.reset_default_graph()
if gpu_id==-1:
self.dev_name = "/cpu:0"
else:
self.dev_name = "/gpu:{}".format(gpu_id)
self.IMAGE_SIZE = 224
self.NUM_CHANNELS = 3
self.GRASP_ACTION_SIZE = 18
self.SEED = 48 # Set to None for random seed.
self.BATCH_SIZE = num_samples
#CONFIG PARAMS
self.INTRA_OP_THREADS = 1
self.INTER_OP_THREADS = 1
self.SOFT_PLACEMENT = True
tf.set_random_seed(self.SEED)
self.config = tf.ConfigProto(allow_soft_placement=self.SOFT_PLACEMENT,
intra_op_parallelism_threads=self.INTRA_OP_THREADS,
inter_op_parallelism_threads=self.INTER_OP_THREADS)
self.config.gpu_options.allow_growth = True
def sigmoid_array(self,x):
return 1 / (1 + np.exp(-x))
def test_init(self, lr_rate):
with tf.device(self.dev_name):
with tf.name_scope('Grasp_training_data'):
# input
self.Grasp_patches = tf.placeholder(tf.float32, shape=[None, self.IMAGE_SIZE, self.IMAGE_SIZE, self.NUM_CHANNELS])
# groundtruth, debug
self.y = tf.placeholder(tf.float32, shape=[None, 1])
with tf.name_scope('Grasp'):
self.M = grasp_net()
self.M.initial_weights(weight_file=None)
self.grasp_pred = self.M.gen_model(self.Grasp_patches)
with tf.device("/cpu:0"):
grasp_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Grasp')
grasp_saver = tf.train.Saver(grasp_variables, max_to_keep=100)
with tf.name_scope('fc8_norm_vals'):
# debug, simulate fc8_norm_vals
padding = tf.constant([[0, 0], [1, 1]])
grasp_pred = tf.pad(self.grasp_pred, padding, "REFLECT")
grasp_in = tf.expand_dims(tf.expand_dims(grasp_pred, axis=1), axis=3) # NHWC [128,1,18,1]
filter = tf.constant([[0.25, 0.5, 0.25]]) # [1, 3]
filter = tf.expand_dims(tf.expand_dims(filter, axis=2), axis=3) # H, W, IN, OUT, [1, 3, 1, 1]
self.fc8_norm_vals = tf.nn.conv2d(grasp_in, filter, strides=[1, 1, 1, 1], padding='VALID', data_format='NHWC') # shape=(?, 1, 18, 1)
# ipdb.set_trace()
self.fc8_norm_vals = tf.squeeze(self.fc8_norm_vals, axis=(1, 3)) #[128,18] or [None, 18]
# training op, Jiali
with tf.device(self.dev_name):
self.pred = tf.reduce_max(self.fc8_norm_vals, axis=1, keepdims=True) # [None, 1]
self.probs = tf.sigmoid(self.pred)
self.loss_vec = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.pred, labels=self.y)
self.loss = tf.reduce_mean(self.loss_vec)
optimizer = tf.train.RMSPropOptimizer(lr_rate)
pro_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Grasp')
self.train_op = optimizer.minimize(self.loss, var_list=pro_vars)
# debug, Jiali
with tf.name_scope('init'):
init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
with tf.device(self.dev_name):
self.sess = tf.Session(config = self.config)
# add init, Jiali
self.sess.run(init)
print(colored('init pro model with: {}'.format(self.checkpoint), 'magenta'))
grasp_saver.restore(self.sess, self.checkpoint)
#return fc8
def test_one_batch(self,Is):
with tf.device(self.dev_name):
print('debug test one batch, Grasp_patches: ', Is.shape)
grasp_feed_dict = {self.Grasp_patches : Is, self.M.dropfc6: 1.0, self.M.dropfc7: 1.0}
pred, g_pred_norms = self.sess.run([self.pred, self.fc8_norm_vals], feed_dict=grasp_feed_dict)
return pred, g_pred_norms
# training, y_batch: groundtruth, y_pred_batch: predictions
def train_batch(self, Is, y_batch, save_num, save_name, batch_update):
n_epochs =1
n_iteration_per_epoch = 1
dropfc6 = np.ones((batch_update-1,1))
dropfc7 = np.ones((batch_update-1, 1))
train_loss_val = None
for epoch in range(n_epochs):
print(colored('Epoch: {}'.format(epoch),'magenta'))
for iteration in range(n_iteration_per_epoch):
with tf.device(self.dev_name):
train_feed_dict = {self.Grasp_patches: Is, self.y: y_batch, self.M.dropfc6 : dropfc6, self.M.dropfc7 : dropfc7}
probs, train_loss_vec, train_loss_val, _ = self.sess.run([self.probs, self.loss_vec, self.loss, self.train_op], feed_dict = train_feed_dict)
print(colored('probs: {}, pro_train_loss_vec: {}, pro train loss: {}'.format(probs, train_loss_vec,train_loss_val),'magenta'))
# if save_num % 5 ==0:
# self.saver.save(self.sess, save_name)
# print(colored('pro model saved at: {}'.format(save_name),'cyan'))
gc.collect()
return train_loss_val
def test_close(self):
self.sess.close()
|
the-stack_106_15458
|
import csv
import logging
import os
from datetime import datetime
import pytest
from pytest import approx
from brainscore.submission.database import connect_db
from brainscore.submission.evaluation import run_evaluation
from brainscore.submission.models import Score, Model, Submission
from tests.test_submission.test_db import clear_schema, init_user, init_benchmark_parents
logger = logging.getLogger(__name__)
#
# Integration tests for the submission systems, executing 4 submissions:
# 1: ID:33 Working submission, executing one benchmark on Alexnet (zip + json)
# 2: ID:34 Rerunning Alexnet on another benchmark (only json)
# 3: ID:35 Failing installation submission (zip + json)
# 4: ID:36 Submission is installable, but model (Alexnet) is not scoreable (zip + json)
#
@pytest.mark.memory_intense
@pytest.mark.private_access
class TestIntegration:
database = 'brainscore-ohio-test' # test database
@classmethod
def setup_class(cls):
logger.info('Connect to database')
connect_db(TestIntegration.database)
clear_schema()
def setup_method(self):
logger.info('Initialize database')
init_user()
init_benchmark_parents()
def teardown_method(self):
logger.info('Clean database')
clear_schema()
def test_competition_field(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
run_evaluation(config_dir, working_dir, 33, TestIntegration.database, models=['alexnet'],
benchmarks=['dicarlo.MajajHong2015.IT-pls'])
model = Model.get()
assert model.competition == "cosyne2022"
def test_competition_field_none(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
submission = Submission.create(id=33, submitter=1, timestamp=datetime.now(),
model_type='BaseModel', status='running')
model = Model.create(name='alexnet', owner=submission.submitter, public=False,
submission=submission)
with open(f'{config_dir}submission_40.json', 'w') as rerun:
rerun.write(f"""{{
"model_ids": [{model.id}], "user_id": 1, "competition": null}}""")
run_evaluation(config_dir, working_dir, 40, TestIntegration.database,
benchmarks=['dicarlo.Rajalingham2018-i2n'])
model = Model.get()
assert model.competition is None
def test_evaluation(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
run_evaluation(config_dir, working_dir, 33, TestIntegration.database, models=['alexnet'],
benchmarks=['dicarlo.MajajHong2015.IT-pls'])
with open('result_33.csv') as results:
csv_reader = csv.reader(results, delimiter=',')
next(csv_reader) # header row
result_row = next(csv_reader)
assert result_row[0] == 'alexnet'
assert result_row[1] == 'dicarlo.MajajHong2015.IT-pls'
assert float(result_row[2]) == approx(0.5857491098187586, abs=0.0001)
assert float(result_row[3]) == approx(0.5079816726934638, abs=0.0001)
assert float(result_row[4]) == approx(0.003155449372125895, abs=0.0001)
scores = Score.select()
assert len(scores) == 1
# successful score comment should inform about which layers were used for which regions
assert scores[0].comment.startswith("layers:")
def test_rerun_evaluation(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
submission = Submission.create(id=33, submitter=1, timestamp=datetime.now(),
model_type='BaseModel', status='running')
model = Model.create(name='alexnet', owner=submission.submitter, public=False,
submission=submission)
with open(f'{config_dir}submission_34.json', 'w') as rerun:
rerun.write(f"""{{
"model_ids": [{model.id}], "user_id": 1}}""")
run_evaluation(config_dir, working_dir, 34, TestIntegration.database,
benchmarks=['dicarlo.Rajalingham2018-i2n'])
with open('result_34.csv') as results:
csv_reader = csv.reader(results, delimiter=',')
next(csv_reader) # header row
result_row = next(csv_reader)
assert result_row[0] == 'alexnet'
assert result_row[1] == 'dicarlo.Rajalingham2018-i2n'
assert float(result_row[2]) == approx(0.25771746331458695, abs=0.0001)
assert float(result_row[3]) == approx(0.3701702418190641, abs=0.0001)
assert float(result_row[4]) == approx(0.011129032024657565, abs=0.0001)
def test_failure_evaluation(self, tmpdir):
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
with pytest.raises(Exception):
run_evaluation(config_dir, working_dir, 35, TestIntegration.database, models=['alexnet'],
benchmarks=['dicarlo.Rajalingham2018-i2n'])
def test_model_failure_evaluation(self, tmpdir):
# os.environ['RESULTCACHING_DISABLE'] = 'brainscore.score_model,model_tools'
working_dir = str(tmpdir.mkdir('sub'))
config_dir = str(os.path.join(os.path.dirname(__file__), 'configs/'))
run_evaluation(config_dir, working_dir, 36, TestIntegration.database, models=['alexnet'],
benchmarks=['movshon.FreemanZiemba2013.V1-pls'])
with open('result_36.csv') as results:
csv_reader = csv.reader(results, delimiter=',')
next(csv_reader) # header row
result_row = next(csv_reader)
assert result_row[0] == 'alexnet'
assert result_row[1] == 'movshon.FreemanZiemba2013.V1-pls'
assert result_row[2] == '0'
assert result_row[3] == '0'
model = Model.get()
score = Score.get(model=model)
assert score.comment is not None # When there's a problem, the comment field contains an error message
# os.environ['RESULTCACHING_DISABLE'] = '0'
|
the-stack_106_15460
|
# encoding: utf-8
"""
Test suite for the docxx.parts.numbering module
"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from docxx.oxml.numbering import CT_Numbering
from docxx.parts.numbering import NumberingPart, _NumberingDefinitions
from ..oxml.unitdata.numbering import a_num, a_numbering
from ..unitutil.mock import class_mock, instance_mock
class DescribeNumberingPart(object):
def it_provides_access_to_the_numbering_definitions(
self, num_defs_fixture):
(numbering_part, _NumberingDefinitions_, numbering_elm_,
numbering_definitions_) = num_defs_fixture
numbering_definitions = numbering_part.numbering_definitions
_NumberingDefinitions_.assert_called_once_with(numbering_elm_)
assert numbering_definitions is numbering_definitions_
# fixtures -------------------------------------------------------
@pytest.fixture
def num_defs_fixture(
self, _NumberingDefinitions_, numbering_elm_,
numbering_definitions_):
numbering_part = NumberingPart(None, None, numbering_elm_, None)
return (
numbering_part, _NumberingDefinitions_, numbering_elm_,
numbering_definitions_
)
# fixture components ---------------------------------------------
@pytest.fixture
def _NumberingDefinitions_(self, request, numbering_definitions_):
return class_mock(
request, 'docxx.parts.numbering._NumberingDefinitions',
return_value=numbering_definitions_
)
@pytest.fixture
def numbering_definitions_(self, request):
return instance_mock(request, _NumberingDefinitions)
@pytest.fixture
def numbering_elm_(self, request):
return instance_mock(request, CT_Numbering)
class Describe_NumberingDefinitions(object):
def it_knows_how_many_numbering_definitions_it_contains(
self, len_fixture):
numbering_definitions, numbering_definition_count = len_fixture
assert len(numbering_definitions) == numbering_definition_count
# fixtures -------------------------------------------------------
@pytest.fixture(params=[0, 1, 2, 3])
def len_fixture(self, request):
numbering_definition_count = request.param
numbering_bldr = a_numbering().with_nsdecls()
for idx in range(numbering_definition_count):
numbering_bldr.with_child(a_num())
numbering_elm = numbering_bldr.element
numbering_definitions = _NumberingDefinitions(numbering_elm)
return numbering_definitions, numbering_definition_count
|
the-stack_106_15463
|
from collections import defaultdict
#import MySQLdb # this is not available on windows for anaconda and python 3.4
from bs4 import BeautifulSoup
import operator
import os
from tornado.httpclient import AsyncHTTPClient
import tornado.ioloop
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("templates/form.html", title="OLEx App")
@tornado.web.asynchronous
def post(self):
self.write("Your URL is: " + self.get_argument('url', ''))
http_client = AsyncHTTPClient()
http_client.fetch(self.get_argument('url', ''),
callback=self.on_fetch)
def on_fetch(self, response):
if response.error:
print("Error:", response.error)
self.render("templates/error.html", title="OLEx App", message = response.error)
else:
soup = BeautifulSoup(response.body)
for script in soup(["script", "style"]):
script.extract()
wordmap = self.generate_wordmap(soup.get_text())
top100 = sorted(wordmap.items(), key=operator.itemgetter(1), reverse=True)[:100]
self.render("templates/result.html", title="OLEx App", content = top100)
def generate_wordmap(self, text):
words = text.split()
counts = defaultdict(int)
for word in words:
counts[word] += 1
return counts
def make_app():
settings = {'debug': True,
'static_path': os.path.join(os.path.dirname(__file__), "static")
}
return tornado.web.Application([
(r"/", MainHandler),
], **settings)
if __name__ == "__main__":
app = make_app()
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
|
the-stack_106_15467
|
types_of_people = 10 #sets types_of_people to 10
x = f"There are {types_of_people} types of people." # takes the value of variable types_of_people and inserts in string
binary = "binary" # sets binary to string "binary"
do_not = "don't" # sets do_not to string "don't"
y = f"Those who know {binary} and those who {do_not}." # Stríng input: binary and do_not
print(x) # prints x
print(y) # prints y
print(f"I said: {x}") # string input x
print(f"I also said: '{y}'") # string input y
hilarious = False # sets bolean statement to false, also just sets hilarious to string false
joke_evaluation = "Isen't that joke so funny?! {}" # joke_evaluation is set to question, {} allows us to "isnert" something with format
print(joke_evaluation.format(hilarious))
w = "This is a left side of ...."
e = "a string with a right side"
print(w + e) # prints out first w and then e
# w holds <text> and e holds text, adding them together will print out both w and e, in the order inserted into print.
|
the-stack_106_15468
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import sys
import os
import torch
GEOSCORER_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "geoscorer/")
sys.path.append(GEOSCORER_DIR)
from geoscorer_wrapper import ContextSegmentMergerWrapper
from spatial_utils import shift_sparse_voxel_to_origin, densify
class Geoscorer(object):
"""
A model class that provides geoscorer functionality.
This is distinct from the wrapper itself because I see the wrapper
becoming more specialized as we add more functionality and this object
possible becoming a process or holding multiple wrappers.
"""
def __init__(self, merger_model_path=None):
if merger_model_path is not None:
logging.info("Geoscorer using merger_model_path={}".format(merger_model_path))
self.merger_model = ContextSegmentMergerWrapper(merger_model_path)
else:
raise Exception("specify a geoscorer model")
self.radius = self.merger_model.context_sl // 2
self.seg_sl = self.merger_model.seg_sl
self.blacklist = ["BETWEEN", "INSIDE", "AWAY", "NEAR"]
# Define the circumstances where we can use geoscorer
def use(self, steps, repeat_num, rel_dir):
if repeat_num > 1 or steps is not None:
return False
if rel_dir is None or rel_dir in self.blacklist:
return False
return True
def produce_segment_pos_in_context(self, segment, context, brc):
# Offset puts us right outside of the bottom right corner
# c_offset = [sum(x) for x in zip(brc, (-1, -1, -1))]
c_offset = brc
context_p = self._process_context(context)
segment_p = self._process_segment(segment)
bottom_right_coord = self._seg_context_processed_to_coord(segment_p, context_p, c_offset)
return bottom_right_coord
def _seg_context_processed_to_coord(self, segment, context, context_off):
local_coord = self.merger_model.segment_context_to_pos(segment, context)
global_coord = [sum(x) for x in zip(local_coord, context_off)]
return global_coord
def _process_context(self, context):
c_tensor = torch.from_numpy(context[:, :, :, 0]).long().to(device="cuda")
return c_tensor
def _process_segment(self, segment):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns an 8x8x8 block with the segment shifted to the origin its bounds.
"""
shifted_seg, _ = shift_sparse_voxel_to_origin(segment)
sl = self.seg_sl
c = self.seg_sl // 2
p, _ = densify(shifted_seg, [sl, sl, sl], center=[c, c, c], useid=True)
s_tensor = torch.from_numpy(p).long().to(device="cuda")
return s_tensor
|
the-stack_106_15470
|
from django import forms
from .models import Product
class ProductForm(forms.ModelForm):
title = forms.CharField(
max_length=120,
label="Item Title:",
widget=forms.TextInput(attrs={
"placeholder": "Title",
"autofocus": True,
})
)
class Meta:
model = Product
fields = [
"title",
# "description",
"price",
"summary",
# "featured",
]
def clean_title(self, *arg, **kwargs):
title = self.cleaned_data.get("title")
if not "item" in title:
raise forms.ValidationError("Title requires 'item' prefix")
return title
class RawProductForm(forms.Form):
title = forms.CharField(
max_length=120, label="Item Title:", widget=forms.TextInput(attrs={
"placeholder": "Title",
})
)
# description = forms.CharField(required=False, widget=forms.Textarea())
price = forms.DecimalField(max_digits=16, decimal_places=2)
summary = forms.CharField(
widget=forms.Textarea(
attrs={
"id": "summary-textarea",
"class": "bg-grey-100",
"rows": 5,
"cols": 25,
}
)
)
# featured = forms.BooleanField(required=False)
|
the-stack_106_15472
|
#!/usr/bin/env python3
import socket
import time
#define address & buffer size
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
def get_remote_ip(host):
print(f'Getting IP for {host}')
try:
remote_ip = socket.gethostbyname( host )
except socket.gaierror:
print('Hostname could not be resolved. Exiting')
sys.exit()
print(f'Ip address of {host} is {remote_ip}')
return remote_ip
def main():
host = "www.google.com"
port = 80
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print("Starting proxy server")
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#bind socket to address
s.bind((HOST, PORT))
#set to listening mode
s.listen(1)
#continuously listen for connections
while True:
conn, addr = s.accept()
print("Connected by", addr)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as end:
print("Connecting to Google")
remote_ip = get_remote_ip(host)
end.connect((remote_ip, port))
#recieve data, wait a bit, then send it back
send_full_data = conn.recv(BUFFER_SIZE)
print(f"Sending recieved data {send_full_data} to google")
end.sendall(send_full_data)
end.shutdown(socket.SHUT_WR)
data = end.recv(BUFFER_SIZE)
print(f"Sending recieved data {data} to client")
conn.send(data)
conn.close()
if __name__ == "__main__":
main()
|
the-stack_106_15474
|
import functools
import weakref
import jax.numpy as np
import scipy as oscipy
def to_float(value):
"""Convert value to float"""
return np.asscalar(value)
def memoized_method(*lru_args, **lru_kwargs):
def decorator(func):
@functools.wraps(func)
def wrapped_func(self, *args, **kwargs):
# We're storing the wrapped method inside the instance. If we had
# a strong reference to self the instance would never die.
self_weak = weakref.ref(self)
@functools.wraps(func)
@functools.lru_cache(*lru_args, **lru_kwargs)
def cached_method(*args, **kwargs):
return func(self_weak(), *args, **kwargs)
setattr(self, func.__name__, cached_method)
return cached_method(*args, **kwargs)
return wrapped_func
return decorator
def minimize_random(fun, init, tries=100):
best_x = None
best_loss = float("+inf")
while tries > 0:
x = init()
loss = fun(x)
if best_x is None or loss < best_loss:
best_x = x
best_loss = loss
tries -= 1
return best_x
def minimize(fun, *args, init=None, init_tries=1, opt_tries=1, verbose=False, **kwargs):
"""
Wrapper around scipy.optimize.minimize that supports retries
"""
if "x0" in kwargs:
raise ValueError("Provide initialization function (init), not x0")
best_results = None
best_loss = float("+inf")
while opt_tries > 0:
init_params = minimize_random(fun, init, tries=init_tries)
results = oscipy.optimize.minimize(fun, *args, x0=init_params, **kwargs)
opt_tries -= 1
if best_results is None or results.fun < best_loss:
best_results = results
best_loss = results.fun
if opt_tries == 0:
break
return best_results
def shift(xs, k, fill_value):
return np.concatenate((np.full(k, fill_value), xs[:-k]))
|
the-stack_106_15475
|
# -*- coding: utf-8 -*-
"""This module implements all CAD database manipulations using skill commands.
"""
from typing import List, Dict, Optional, Any, Tuple
import os
import shutil
import yaml
import bag
from ..io.common import get_encoding, fix_string
from ..io.file import open_temp
from .database import DbAccess
try:
import cybagoa
except ImportError:
cybagoa = None
def _dict_to_pcell_params(table):
"""Convert given parameter dictionary to pcell parameter list format.
Parameters
----------
table : dict[str, any]
the parameter dictionary.
Returns
-------
param_list : list[any]
the Pcell parameter list
"""
param_list = []
for key, val in table.items():
# python 2/3 compatibility: convert raw bytes to string.
val = fix_string(val)
if isinstance(val, float):
param_list.append([key, "float", val])
elif isinstance(val, str):
# unicode string
param_list.append([key, "string", val])
elif isinstance(val, int):
param_list.append([key, "int", val])
elif isinstance(val, bool):
param_list.append([key, "bool", val])
else:
raise Exception('Unsupported parameter %s with type: %s' % (key, type(val)))
return param_list
def to_skill_list_str(pylist):
"""Convert given python list to a skill list string.
Parameters
----------
pylist : list[str]
a list of string.
Returns
-------
ans : str
a string representation of the equivalent skill list.
"""
content = ' '.join(('"%s"' % val for val in pylist))
return "'( %s )" % content
def _handle_reply(reply):
"""Process the given reply."""
if isinstance(reply, dict):
print("reply",reply)
if reply.get('type') == 'error':
if 'data' not in reply:
raise Exception('Unknown reply format: %s' % reply)
raise VirtuosoException(reply['data'])
else:
try:
return reply['data']
except Exception:
raise Exception('Unknown reply format: %s' % reply)
else:
raise Exception('Unknown reply format: %s' % reply)
class VirtuosoException(Exception):
"""Exception raised when Virtuoso returns an error."""
def __init__(self, *args, **kwargs):
# noinspection PyArgumentList
Exception.__init__(self, *args, **kwargs)
class SkillInterface(DbAccess):
"""Skill interface between bag and Virtuoso.
This class sends all bag's database and simulation operations to
an external Virtuoso process, then get the result from it.
Parameters
----------
dealer : :class:`bag.interface.ZMQDealer`
the socket used to communicate with :class:`~bag.interface.SkillOceanServer`.
tmp_dir : string
temporary file directory for DbAccess.
db_config : dict[str, any]
the database configuration dictionary.
"""
def __init__(self, dealer, tmp_dir, db_config):
"""Initialize a new SkillInterface object.
"""
DbAccess.__init__(self, tmp_dir, db_config)
self.handler = dealer
self._rcx_jobs = {}
def close(self):
"""Terminate the database server gracefully.
"""
self.handler.send_obj(dict(type='exit'))
self.handler.close()
def _eval_skill(self, expr, input_files=None, out_file=None):
# type: (str, Optional[Dict[str, Any]], Optional[str]) -> str
"""Send a request to evaluate the given skill expression.
Because Virtuoso has a limit on the input/output data (< 4096 bytes),
if your input is large, you need to write it to a file and have
Virtuoso open the file to parse it. Similarly, if you expect a
large output, you need to make Virtuoso write the result to the
file, then read it yourself. The parameters input_files and
out_file help you achieve this functionality.
For example, if you need to evaluate "skill_fun(arg fname)", where
arg is a file containing the list [1 2 3], and fname is the output
file name, you will call this function with:
expr = "skill_fun({arg} {fname})"
input_files = { "arg": [1 2 3] }
out_file = "fname"
the bag server will then a temporary file for arg and fname, write
the list [1 2 3] into the file for arg, call Virtuoso, then read
the output file fname and return the result.
Parameters
----------
expr : string
the skill expression to evaluate.
input_files : dict[string, any] or None
A dictionary of input files content.
out_file : string or None
the output file name argument in expr.
Returns
-------
result : str
a string representation of the result.
Raises
------
:class: `.VirtuosoException` :
if virtuoso encounters errors while evaluating the expression.
"""
request = dict(
type='skill',
expr=expr,
input_files=input_files,
out_file=out_file,
)
self.handler.send_obj(request)
reply = self.handler.recv_obj()
#print('reply',reply)
return _handle_reply(reply)
def parse_schematic_template(self, lib_name, cell_name):
"""Parse the given schematic template.
Parameters
----------
lib_name : str
name of the library.
cell_name : str
name of the cell.
Returns
-------
template : str
the content of the netlist structure file.
"""
cmd = 'parse_cad_sch( "%s" "%s" {netlist_info} )' % (lib_name, cell_name)
return self._eval_skill(cmd, out_file='netlist_info')
def get_cells_in_library(self, lib_name):
"""Get a list of cells in the given library.
Returns an empty list if the given library does not exist.
Parameters
----------
lib_name : str
the library name.
Returns
-------
cell_list : list[str]
a list of cells in the library
"""
cmd = 'get_cells_in_library_file( "%s" {cell_file} )' % lib_name
return self._eval_skill(cmd, out_file='cell_file').split()
def create_library(self, lib_name, lib_path=''):
"""Create a new library if one does not exist yet.
Parameters
----------
lib_name : string
the library name.
lib_path : string
directory to create the library in. If Empty, use default location.
"""
lib_path = lib_path or self.default_lib_path
tech_lib = self.db_config['schematic']['tech_lib']
return self._eval_skill('create_or_erase_library('
'"{}" "{}" "{}" nil)'.format(lib_name, tech_lib, lib_path))
def create_implementation(self, lib_name, template_list, change_list, lib_path=''):
"""Create implementation of a design in the CAD database.
Parameters
----------
lib_name : str
implementation library name.
template_list : list
a list of schematic templates to copy to the new library.
change_list :
a list of changes to be performed on each copied templates.
lib_path : str
directory to create the library in. If Empty, use default location.
"""
lib_path = lib_path or self.default_lib_path
tech_lib = self.db_config['schematic']['tech_lib']
if cybagoa is not None and self.db_config['schematic'].get('use_cybagoa', False):
cds_lib_path = os.environ.get('CDS_LIB_PATH', './cds.lib')
sch_name = 'schematic'
sym_name = 'symbol'
encoding = get_encoding()
# release write locks
cell_view_list = []
for _, _, cell_name in template_list:
cell_view_list.append((cell_name, sch_name))
cell_view_list.append((cell_name, sym_name))
self.release_write_locks(lib_name, cell_view_list)
# create library in case it doesn't exist
self.create_library(lib_name, lib_path)
# write schematic
with cybagoa.PyOASchematicWriter(cds_lib_path, lib_name, encoding) as writer:
for temp_info, change_info in zip(template_list, change_list):
sch_cell = cybagoa.PySchCell(temp_info[0], temp_info[1], temp_info[2], encoding)
for old_pin, new_pin in change_info['pin_map']:
sch_cell.rename_pin(old_pin, new_pin)
for inst_name, rinst_list in change_info['inst_list']:
sch_cell.add_inst(inst_name, lib_name, rinst_list)
writer.add_sch_cell(sch_cell)
writer.create_schematics(sch_name, sym_name)
copy = 'nil'
else:
copy = "'t"
in_files = {'template_list': template_list,
'change_list': change_list}
sympin = to_skill_list_str(self.db_config['schematic']['sympin'])
ipin = to_skill_list_str(self.db_config['schematic']['ipin'])
opin = to_skill_list_str(self.db_config['schematic']['opin'])
iopin = to_skill_list_str(self.db_config['schematic']['iopin'])
simulators = to_skill_list_str(self.db_config['schematic']['simulators'])
cmd = ('create_concrete_schematic( "%s" "%s" "%s" {template_list} '
'{change_list} %s %s %s %s %s %s)' % (lib_name, tech_lib, lib_path,
sympin, ipin, opin, iopin, simulators, copy))
#print('inputfiles',in_files)
return self._eval_skill(cmd, input_files=in_files)
def configure_testbench(self, tb_lib, tb_cell):
"""Update testbench state for the given testbench.
This method fill in process-specific information for the given testbench.
Parameters
----------
tb_lib : str
testbench library name.
tb_cell : str
testbench cell name.
Returns
-------
cur_env : str
the current simulation environment.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
"""
tb_config = self.db_config['testbench']
print("this is db_config:",self.db_config)
cmd = ('instantiate_testbench("{tb_cell}" "{targ_lib}" ' +
'"{config_libs}" "{config_views}" "{config_stops}" ' +
'"{default_corner}" "{corner_file}" {def_files} ' +
'"{tech_lib}" {result_file})')
cmd = cmd.format(tb_cell=tb_cell,
targ_lib=tb_lib,
config_libs=tb_config['config_libs'],
config_views=tb_config['config_views'],
config_stops=tb_config['config_stops'],
default_corner=tb_config['default_env'],
corner_file=tb_config['env_file'],
def_files=to_skill_list_str(tb_config['def_files']),
tech_lib=self.db_config['schematic']['tech_lib'],
result_file='{result_file}')
output = yaml.load(self._eval_skill(cmd, out_file='result_file'))
return tb_config['default_env'], 'output[corners]', output['parameters'], output['outputs']
def get_testbench_info(self, tb_lib, tb_cell):
"""Returns information about an existing testbench.
Parameters
----------
tb_lib : str
testbench library.
tb_cell : str
testbench cell.
Returns
-------
cur_envs : list[str]
the current simulation environments.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
outputs : dict[str, str]
a list of testbench output expressions.
"""
cmd = 'get_testbench_info("{tb_lib}" "{tb_cell}" {result_file})'
cmd = cmd.format(tb_lib=tb_lib,
tb_cell=tb_cell,
result_file='{result_file}')
output = yaml.load(self._eval_skill(cmd, out_file='result_file'))
return output['enabled_corners'], output['corners'], output['parameters'], output['outputs']
def update_testbench(self,
lib, # type: str
cell, # type: str
parameters, # type: Dict[str, str]
sim_envs, # type: List[str]
config_rules, # type: List[List[str]]
env_parameters # type: List[List[Tuple[str, str]]]
):
# type: (...) -> None
"""Update the given testbench configuration.
Parameters
----------
lib : str
testbench library.
cell : str
testbench cell.
parameters : Dict[str, str]
testbench parameters.
sim_envs : List[str]
list of enabled simulation environments.
config_rules : List[List[str]]
config view mapping rules, list of (lib, cell, view) rules.
env_parameters : List[List[Tuple[str, str]]]
list of param/value list for each simulation environment.
"""
cmd = ('modify_testbench("%s" "%s" {conf_rules} {run_opts} '
'{sim_envs} {params} {env_params})' % (lib, cell))
in_files = {'conf_rules': config_rules,
'run_opts': [],
'sim_envs': sim_envs,
'params': list(parameters.items()),
'env_params': list(zip(sim_envs, env_parameters)),
}
self._eval_skill(cmd, input_files=in_files)
def instantiate_layout_pcell(self, lib_name, cell_name, view_name,
inst_lib, inst_cell, params, pin_mapping):
"""Create a layout cell with a single pcell instance.
Parameters
----------
lib_name : str
layout library name.
cell_name : str
layout cell name.
view_name : str
layout view name, default is "layout".
inst_lib : str
pcell library name.
inst_cell : str
pcell cell name.
params : dict[str, any]
the parameter dictionary.
pin_mapping: dict[str, str]
the pin mapping dictionary.
"""
# create library in case it doesn't exist
self.create_library(lib_name)
# convert parameter dictionary to pcell params list format
param_list = _dict_to_pcell_params(params)
cmd = ('create_layout_with_pcell( "%s" "%s" "%s" "%s" "%s"'
'{params} {pin_mapping} )' % (lib_name, cell_name,
view_name, inst_lib, inst_cell))
in_files = {'params': param_list, 'pin_mapping': list(pin_mapping.items())}
return self._eval_skill(cmd, input_files=in_files)
def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):
"""Create a batch of layouts.
Parameters
----------
lib_name : str
layout library name.
view_name : str
layout view name.
via_tech : str
via technology library name.
layout_list : list[any]
a list of layouts to create
"""
# create library in case it doesn't exist
#print('libname_inskill',lib_name)
self.create_library(lib_name)
# convert parameter dictionary to pcell params list format
new_layout_list = []
for info_list in layout_list:
new_inst_list = []
for inst in info_list[1]:
if 'master_key' in inst:
# SKILL inteface cannot handle master_key info, so we remove it
# from InstanceInfo if we find it
inst.pop('master_key')
if 'params' in inst:
inst = inst.copy()
inst['params'] = _dict_to_pcell_params(inst['params'])
new_inst_list.append(inst)
new_info_list = info_list[:]
new_info_list[1] = new_inst_list
new_layout_list.append(new_info_list)
cmd = 'create_layout( "%s" "%s" "%s" {layout_list} )' % (lib_name, view_name, via_tech)
in_files = {'layout_list': new_layout_list}
return self._eval_skill(cmd, input_files=in_files)
def release_write_locks(self, lib_name, cell_view_list):
"""Release write locks from all the given cells.
Parameters
----------
lib_name : string
the library name.
cell_view_list : List[(string, string)]
list of cell/view name tuples.
"""
cmd = 'release_write_locks( "%s" {cell_view_list} )' % lib_name
in_files = {'cell_view_list': cell_view_list}
return self._eval_skill(cmd, input_files=in_files)
def create_schematic_from_netlist(self, netlist, lib_name, cell_name,
sch_view=None, **kwargs):
# type: (str, str, str, Optional[str], **Any) -> None
"""Create a schematic from a netlist.
This is mainly used to create extracted schematic from an extracted netlist.
Parameters
----------
netlist : str
the netlist file name.
lib_name : str
library name.
cell_name : str
cell_name
sch_view : Optional[str]
schematic view name. The default value is implemendation dependent.
**kwargs : Any
additional implementation-dependent arguments.
"""
calview_config = self.db_config.get('calibreview', None)
use_calibreview = self.db_config.get('use_calibreview', True)
if calview_config is not None and use_calibreview:
# create calibre view from extraction netlist
cell_map = calview_config['cell_map']
sch_view = sch_view or calview_config['view_name']
# create calibre view config file
tmp_params = dict(
netlist_file=netlist,
lib_name=lib_name,
cell_name=cell_name,
calibre_cellmap=cell_map,
view_name=sch_view,
)
content = self.render_file_template('calibreview_setup.txt', tmp_params)
with open_temp(prefix='calview', dir=self.tmp_dir, delete=False) as f:
fname = f.name
f.write(content)
# delete old calibre view
cmd = 'delete_cellview( "%s" "%s" "%s" )' % (lib_name, cell_name, sch_view)
self._eval_skill(cmd)
# make extracted schematic
cmd = 'mgc_rve_load_setup_file( "%s" )' % fname
self._eval_skill(cmd)
else:
# get netlists to copy
netlist_dir = os.path.dirname(netlist)
netlist_files = self.checker.get_rcx_netlists(lib_name, cell_name)
if not netlist_files:
# some error checking. Shouldn't be needed but just in case
raise ValueError('RCX did not generate any netlists')
# copy netlists to a "netlist" subfolder in the CAD database
cell_dir = self.get_cell_directory(lib_name, cell_name)
targ_dir = os.path.join(cell_dir, 'netlist')
os.makedirs(targ_dir, exist_ok=True)
for fname in netlist_files:
shutil.copy(os.path.join(netlist_dir, fname), targ_dir)
# create symbolic link as aliases
symlink = os.path.join(targ_dir, 'netlist')
try:
os.remove(symlink)
except FileNotFoundError:
pass
os.symlink(netlist_files[0], symlink)
def get_cell_directory(self, lib_name, cell_name):
# type: (str, str) -> str
"""Returns the directory name of the given cell.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
Returns
-------
cell_dir : str
path to the cell directory.
"""
# use yaml.load to remove outermost quotation marks
lib_dir = yaml.load(self._eval_skill('get_lib_directory( "%s" )' % lib_name))
if not lib_dir:
raise ValueError('Library %s not found.' % lib_name)
return os.path.join(lib_dir, cell_name)
def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):
# type: (str, str, str, **Any) -> None
"""Create a verilog view for mix-signal simulation.
Parameters
----------
verilog_file : str
the verilog file name.
lib_name : str
library name.
cell_name : str
cell name.
**kwargs : Any
additional implementation-dependent arguments.
"""
# delete old verilog view
cmd = 'delete_cellview( "%s" "%s" "verilog" )' % (lib_name, cell_name)
self._eval_skill(cmd)
cmd = 'schInstallHDL("%s" "%s" "verilog" "%s" t)' % (lib_name, cell_name, verilog_file)
self._eval_skill(cmd)
|
the-stack_106_15476
|
import asyncio
import json
import ssl
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Any, Dict, Optional
import websockets
from shamrock.types.blockchain_format.sized_bytes import bytes32
from shamrock.util.config import load_config
from shamrock.util.json_util import dict_to_json_str
from shamrock.util.ws_message import WsRpcMessage, create_payload_dict
class DaemonProxy:
def __init__(self, uri: str, ssl_context: Optional[ssl.SSLContext]):
self._uri = uri
self._request_dict: Dict[bytes32, asyncio.Event] = {}
self.response_dict: Dict[bytes32, Any] = {}
self.ssl_context = ssl_context
def format_request(self, command: str, data: Dict[str, Any]) -> WsRpcMessage:
request = create_payload_dict(command, data, "client", "daemon")
return request
async def start(self):
self.websocket = await websockets.connect(self._uri, max_size=None, ssl=self.ssl_context)
async def listener():
while True:
try:
message = await self.websocket.recv()
except websockets.exceptions.ConnectionClosedOK:
return None
decoded = json.loads(message)
id = decoded["request_id"]
if id in self._request_dict:
self.response_dict[id] = decoded
self._request_dict[id].set()
asyncio.create_task(listener())
await asyncio.sleep(1)
async def _get(self, request: WsRpcMessage) -> WsRpcMessage:
request_id = request["request_id"]
self._request_dict[request_id] = asyncio.Event()
string = dict_to_json_str(request)
asyncio.create_task(self.websocket.send(string))
async def timeout():
await asyncio.sleep(30)
if request_id in self._request_dict:
print("Error, timeout.")
self._request_dict[request_id].set()
asyncio.create_task(timeout())
await self._request_dict[request_id].wait()
if request_id in self.response_dict:
response = self.response_dict[request_id]
self.response_dict.pop(request_id)
else:
response = None
self._request_dict.pop(request_id)
return response
async def get_version(self) -> WsRpcMessage:
data: Dict[str, Any] = {}
request = self.format_request("get_version", data)
response = await self._get(request)
return response
async def start_service(self, service_name: str) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("start_service", data)
response = await self._get(request)
return response
async def stop_service(self, service_name: str, delay_before_kill: int = 15) -> WsRpcMessage:
data = {"service": service_name}
request = self.format_request("stop_service", data)
response = await self._get(request)
return response
async def is_running(self, service_name: str) -> bool:
data = {"service": service_name}
request = self.format_request("is_running", data)
response = await self._get(request)
if "is_running" in response["data"]:
return bool(response["data"]["is_running"])
return False
async def is_keyring_locked(self) -> bool:
data: Dict[str, Any] = {}
request = self.format_request("is_keyring_locked", data)
response = await self._get(request)
if "is_keyring_locked" in response["data"]:
return bool(response["data"]["is_keyring_locked"])
return False
async def unlock_keyring(self, passphrase: str) -> WsRpcMessage:
data = {"key": passphrase}
request = self.format_request("unlock_keyring", data)
response = await self._get(request)
return response
async def notify_keyring_migration_completed(self, passphrase: Optional[str]) -> WsRpcMessage:
data: Dict[str, Any] = {"key": passphrase}
request: WsRpcMessage = self.format_request("notify_keyring_migration_completed", data)
response: WsRpcMessage = await self._get(request)
return response
async def ping(self) -> WsRpcMessage:
request = self.format_request("ping", {})
response = await self._get(request)
return response
async def close(self) -> None:
await self.websocket.close()
async def exit(self) -> WsRpcMessage:
request = self.format_request("exit", {})
return await self._get(request)
async def connect_to_daemon(self_hostname: str, daemon_port: int, ssl_context: Optional[ssl.SSLContext]) -> DaemonProxy:
"""
Connect to the local daemon.
"""
client = DaemonProxy(f"wss://{self_hostname}:{daemon_port}", ssl_context)
await client.start()
return client
async def connect_to_daemon_and_validate(root_path: Path, quiet: bool = False) -> Optional[DaemonProxy]:
"""
Connect to the local daemon and do a ping to ensure that something is really
there and running.
"""
from shamrock.server.server import ssl_context_for_client
try:
net_config = load_config(root_path, "config.yaml")
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
key_path = root_path / net_config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / net_config["private_ssl_ca"]["crt"]
ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
connection = await connect_to_daemon(net_config["self_hostname"], net_config["daemon_port"], ssl_context)
r = await connection.ping()
if "value" in r["data"] and r["data"]["value"] == "pong":
return connection
except Exception:
if not quiet:
print("Daemon not started yet")
return None
return None
@asynccontextmanager
async def acquire_connection_to_daemon(root_path: Path, quiet: bool = False):
"""
Asynchronous context manager which attempts to create a connection to the daemon.
The connection object (DaemonProxy) is yielded to the caller. After the caller's
block exits scope, execution resumes in this function, wherein the connection is
closed.
"""
from shamrock.daemon.client import connect_to_daemon_and_validate
daemon: Optional[DaemonProxy] = None
try:
daemon = await connect_to_daemon_and_validate(root_path, quiet=quiet)
yield daemon # <----
except Exception as e:
print(f"Exception occurred while communicating with the daemon: {e}")
if daemon is not None:
await daemon.close()
|
the-stack_106_15477
|
import os
import time
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connections
from django.db.models import Q, F, Avg
from django.utils.encoding import force_text
import multidb
from celery.task.sets import TaskSet
import waffle
import olympia.core.logger
from olympia import amo
from olympia.amo.celery import task
from olympia.amo.decorators import write
from olympia.amo.utils import chunked, walkfiles
from olympia.addons.models import Addon, AppSupport, FrozenAddon
from olympia.files.models import File
from olympia.lib.es.utils import raise_if_reindex_in_progress
from olympia.stats.models import UpdateCount
log = olympia.core.logger.getLogger('z.cron')
task_log = olympia.core.logger.getLogger('z.task')
def update_addon_average_daily_users():
"""Update add-ons ADU totals."""
if not waffle.switch_is_active('local-statistics-processing'):
return False
raise_if_reindex_in_progress('amo')
cursor = connections[multidb.get_slave()].cursor()
q = """SELECT addon_id, AVG(`count`)
FROM update_counts
WHERE `date` > DATE_SUB(CURDATE(), INTERVAL 13 DAY)
GROUP BY addon_id
ORDER BY addon_id"""
cursor.execute(q)
d = cursor.fetchall()
cursor.close()
ts = [_update_addon_average_daily_users.subtask(args=[chunk])
for chunk in chunked(d, 250)]
TaskSet(ts).apply_async()
@task
def _update_addon_average_daily_users(data, **kw):
task_log.info("[%s] Updating add-ons ADU totals." % (len(data)))
if not waffle.switch_is_active('local-statistics-processing'):
return False
for pk, count in data:
try:
addon = Addon.objects.get(pk=pk)
except Addon.DoesNotExist:
# The processing input comes from metrics which might be out of
# date in regards to currently existing add-ons
m = "Got an ADU update (%s) but the add-on doesn't exist (%s)"
task_log.debug(m % (count, pk))
continue
if (count - addon.total_downloads) > 10000:
# Adjust ADU to equal total downloads so bundled add-ons don't
# skew the results when sorting by users.
task_log.info('Readjusted ADU count for addon %s' % addon.slug)
addon.update(average_daily_users=addon.total_downloads)
else:
addon.update(average_daily_users=count)
def update_addon_download_totals():
"""Update add-on total and average downloads."""
if not waffle.switch_is_active('local-statistics-processing'):
return False
cursor = connections[multidb.get_slave()].cursor()
# We need to use SQL for this until
# http://code.djangoproject.com/ticket/11003 is resolved
q = """SELECT addon_id, AVG(count), SUM(count)
FROM download_counts
USE KEY (`addon_and_count`)
JOIN addons ON download_counts.addon_id=addons.id
WHERE addons.status != %s
GROUP BY addon_id
ORDER BY addon_id"""
cursor.execute(q, [amo.STATUS_DELETED])
d = cursor.fetchall()
cursor.close()
ts = [_update_addon_download_totals.subtask(args=[chunk])
for chunk in chunked(d, 250)]
TaskSet(ts).apply_async()
@task
def _update_addon_download_totals(data, **kw):
task_log.info('[%s] Updating add-ons download+average totals.' %
(len(data)))
if not waffle.switch_is_active('local-statistics-processing'):
return False
for pk, avg, sum in data:
try:
addon = Addon.objects.get(pk=pk)
# Don't trigger a save unless we have to. Since the query that
# sends us data doesn't filter out deleted addons, or the addon may
# be unpopular, this can reduce a lot of unnecessary save queries.
if (addon.average_daily_downloads != avg or
addon.total_downloads != sum):
addon.update(average_daily_downloads=avg, total_downloads=sum)
except Addon.DoesNotExist:
# The processing input comes from metrics which might be out of
# date in regards to currently existing add-ons.
m = ("Got new download totals (total=%s,avg=%s) but the add-on"
"doesn't exist (%s)" % (sum, avg, pk))
task_log.debug(m)
def _change_last_updated(next):
# We jump through some hoops here to make sure we only change the add-ons
# that really need it, and to invalidate properly.
current = dict(Addon.objects.values_list('id', 'last_updated'))
changes = {}
for addon, last_updated in next.items():
try:
if current[addon] != last_updated:
changes[addon] = last_updated
except KeyError:
pass
if not changes:
return
log.debug('Updating %s add-ons' % len(changes))
# Update + invalidate.
qs = Addon.objects.no_cache().filter(id__in=changes).no_transforms()
for addon in qs:
addon.last_updated = changes[addon.id]
addon.save()
@write
def addon_last_updated():
next = {}
for q in Addon._last_updated_queries().values():
for addon, last_updated in q.values_list('id', 'last_updated'):
next[addon] = last_updated
_change_last_updated(next)
# Get anything that didn't match above.
other = (Addon.objects.no_cache().filter(last_updated__isnull=True)
.values_list('id', 'created'))
_change_last_updated(dict(other))
def update_addon_appsupport():
# Find all the add-ons that need their app support details updated.
newish = (Q(last_updated__gte=F('appsupport__created')) |
Q(appsupport__created__isnull=True))
# Search providers don't list supported apps.
has_app = Q(versions__apps__isnull=False) | Q(type=amo.ADDON_SEARCH)
has_file = Q(versions__files__status__in=amo.VALID_FILE_STATUSES)
good = Q(has_app, has_file) | Q(type=amo.ADDON_PERSONA)
ids = (Addon.objects.valid().distinct()
.filter(newish, good).values_list('id', flat=True))
task_log.info('Updating appsupport for %d new-ish addons.' % len(ids))
ts = [_update_appsupport.subtask(args=[chunk])
for chunk in chunked(ids, 20)]
TaskSet(ts).apply_async()
def update_all_appsupport():
from .tasks import update_appsupport
ids = sorted(set(AppSupport.objects.values_list('addon', flat=True)))
task_log.info('Updating appsupport for %s addons.' % len(ids))
for idx, chunk in enumerate(chunked(ids, 100)):
if idx % 10 == 0:
task_log.info('[%s/%s] Updating appsupport.'
% (idx * 100, len(ids)))
update_appsupport(chunk)
@task
def _update_appsupport(ids, **kw):
from .tasks import update_appsupport
task_log.info('Updating appsupport for %d of new-ish addons.' % len(ids))
update_appsupport(ids)
def hide_disabled_files():
# If an add-on or a file is disabled, it should be moved to
# GUARDED_ADDONS_PATH so it's not publicly visible.
q = (Q(version__addon__status=amo.STATUS_DISABLED) |
Q(version__addon__disabled_by_user=True))
ids = (File.objects.filter(q | Q(status=amo.STATUS_DISABLED))
.values_list('id', flat=True))
for chunk in chunked(ids, 300):
qs = File.objects.no_cache().filter(id__in=chunk)
qs = qs.select_related('version')
for f in qs:
f.hide_disabled_file()
def unhide_disabled_files():
# Files are getting stuck in /guarded-addons for some reason. This job
# makes sure guarded add-ons are supposed to be disabled.
log = olympia.core.logger.getLogger('z.files.disabled')
q = (Q(version__addon__status=amo.STATUS_DISABLED) |
Q(version__addon__disabled_by_user=True))
files = set(File.objects.filter(q | Q(status=amo.STATUS_DISABLED))
.values_list('version__addon', 'filename'))
for filepath in walkfiles(settings.GUARDED_ADDONS_PATH):
filepath = force_text(filepath)
addon, filename = filepath.split('/')[-2:]
if tuple([int(addon), filename]) not in files:
log.warning(u'File that should not be guarded: %s.', filepath)
try:
file_ = (File.objects.select_related('version__addon')
.get(version__addon=addon, filename=filename))
file_.unhide_disabled_file()
except File.DoesNotExist:
log.warning(u'File object does not exist for: %s.' % filepath)
except Exception:
log.error(u'Could not unhide file: %s.' % filepath,
exc_info=True)
def deliver_hotness():
"""
Calculate hotness of all add-ons.
a = avg(users this week)
b = avg(users three weeks before this week)
hotness = (a-b) / b if a > 1000 and b > 1 else 0
"""
frozen = set(f.id for f in FrozenAddon.objects.all())
all_ids = list((Addon.objects.exclude(type=amo.ADDON_PERSONA)
.filter(status__in=amo.VALID_ADDON_STATUSES)
.values_list('id', flat=True)))
now = datetime.now()
one_week = now - timedelta(days=7)
four_weeks = now - timedelta(days=28)
for ids in chunked(all_ids, 300):
addons = Addon.objects.no_cache().filter(id__in=ids).no_transforms()
ids = [a.id for a in addons if a.id not in frozen]
qs = (UpdateCount.objects.filter(addon__in=ids)
.values_list('addon').annotate(Avg('count')))
thisweek = dict(qs.filter(date__gte=one_week))
threeweek = dict(qs.filter(date__range=(four_weeks, one_week)))
for addon in addons:
this, three = thisweek.get(addon.id, 0), threeweek.get(addon.id, 0)
if this > 1000 and three > 1:
addon.update(hotness=(this - three) / float(three))
else:
addon.update(hotness=0)
# Let the database catch its breath.
time.sleep(10)
def reindex_addons(index=None, addon_type=None):
from . import tasks
ids = Addon.unfiltered.values_list('id', flat=True)
if addon_type:
ids = ids.filter(type=addon_type)
ts = [tasks.index_addons.subtask(args=[chunk], kwargs=dict(index=index))
for chunk in chunked(sorted(list(ids)), 150)]
TaskSet(ts).apply_async()
def cleanup_image_files():
"""
Clean up all header and footer images files for themes.
We use these images to asynchronuously generate thumbnails with
tasks, here we delete images that are older than one day.
"""
log.info('Removing one day old temporary image files for themes.')
for folder in ('persona_footer', 'persona_header'):
root = os.path.join(settings.TMP_PATH, folder)
if not os.path.exists(root):
continue
for path in os.listdir(root):
full_path = os.path.join(root, path)
age = time.time() - os.stat(full_path).st_atime
if age > 60 * 60 * 24: # One day.
log.debug('Removing image file: %s, %dsecs old.' %
(full_path, age))
os.unlink(full_path)
|
the-stack_106_15479
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Amirreza Shaban (from MMTM github https://github.com/haamoon/mmtm)
# Copyright (c) 2020 Anita Hu and Kevin Su
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mfas.models.auxiliary.resnet.resnet import transform_input
import sys
sys.path.append('..')
from MSAF import MSAF
class MSAFNet(nn.Module):
def __init__(self, args):
super(MSAFNet, self).__init__()
self.visual = None
self.rgb_net_name = args.rgb_net
self.skeleton = None
self.final_pred = None
if args.rgb_net == 'resnet':
self.msaf1 = MSAF(in_channels=[1024, 128, 128], block_channel=64, block_dropout=0, lowest_atten=0.5,
reduction_factor=4)
self.msaf2 = MSAF(in_channels=[2048, 512], block_channel=256, block_dropout=0, lowest_atten=0.5,
reduction_factor=4)
elif args.rgb_net == 'i3d':
self.msaf1 = MSAF(in_channels=[832, 128, 128], block_channel=64, block_dropout=0, lowest_atten=0.5,
reduction_factor=4)
self.msaf2 = MSAF(in_channels=[1024, 512], block_channel=256, block_dropout=0, lowest_atten=0.5,
reduction_factor=4)
else:
print("RGB net not resnet or i3d")
raise NotImplementedError
self.return_interm_feas = False
self.return_both = False
if hasattr(args, 'fc_final_preds') and args.fc_final_preds:
print("Using fc final prediction")
self.final_pred = nn.Linear(60 * 2, 60)
def get_msaf_params(self):
parameters = []
if hasattr(self, "msaf1"):
parameters.append({'params': self.msaf1.parameters()})
if hasattr(self, "msaf2"):
parameters.append({'params': self.msaf2.parameters()})
return parameters
def get_visual_params(self):
parameters = [{'params': self.visual.parameters()}]
if hasattr(self, "msaf1"):
parameters.append({'params': self.msaf1.parameters()})
if hasattr(self, "msaf2"):
parameters.append({'params': self.msaf2.parameters()})
return parameters
def get_skeleton_params(self):
parameters = [{'params': self.skeleton.parameters()}]
if hasattr(self, "msaf1"):
parameters.append({'params': self.msaf1.parameters()})
if hasattr(self, "msaf2"):
parameters.append({'params': self.msaf2.parameters()})
return parameters
def set_visual_skeleton_nets(self, visual, skeleton, return_interm_feas=False):
self.visual = visual
self.skeleton = skeleton
self.return_interm_feas = return_interm_feas
def set_return_both(self, p):
self.return_both = p
def forward(self, tensor_tuple):
frames, skeleton = tensor_tuple[:2]
############################################## SKELETON INIT BLOCK
N, C, T, V, M = skeleton.size() # N0, C1, T2, V3, M4
motion = skeleton[:, :, 1::, :, :] - skeleton[:, :, 0:-1, :, :]
motion = motion.permute(0, 1, 4, 2, 3).contiguous().view(N, C * M, T - 1, V)
motion = F.interpolate(motion, size=(T, V), mode='bilinear',
align_corners=False).contiguous().view(N, C, M, T, V).permute(0, 1, 3, 4, 2)
# sk_logits = []
sk_hidden = []
for i in range(self.skeleton.num_person):
# position
# N0,C1,T2,V3 point-level
out1 = self.skeleton.conv1(skeleton[:, :, :, :, i])
out2 = self.skeleton.conv2(out1)
# N0,V1,T2,C3, global level
out2 = out2.permute(0, 3, 2, 1).contiguous()
out3 = self.skeleton.conv3(out2)
out_p = self.skeleton.conv4(out3)
# motion
# N0,T1,V2,C3 point-level
out1m = self.skeleton.conv1m(motion[:, :, :, :, i])
out2m = self.skeleton.conv2m(out1m)
# N0,V1,T2,C3, global level
out2m = out2m.permute(0, 3, 2, 1).contiguous()
out3m = self.skeleton.conv3m(out2m)
out_m = self.skeleton.conv4m(out3m)
# concat
out4 = torch.cat((out_p, out_m), dim=1)
sk_hidden.append([out1, out2, out3, out4])
# clean hidden representations
new_sk_hidden = []
for h1, h2 in zip(sk_hidden[0], sk_hidden[1]):
new_sk_hidden.append(torch.max(h1, h2))
out4_p0 = sk_hidden[0][-1]
out4_p1 = sk_hidden[1][-1]
out5_p0 = self.skeleton.conv5(out4_p0)
sk_hidden[0].append(out5_p0)
out5_p1 = self.skeleton.conv5(out4_p1)
sk_hidden[1].append(out5_p1)
out5_max = torch.max(out5_p0, out5_p1)
################################################ VISUAL INIT BLOCK
# Changing temporal and channel dim to fit the inflated resnet input requirements
B, T, W, H, C = frames.size()
frames = frames.view(B, 1, T, W, H, C)
frames = frames.transpose(1, -1)
frames = frames.view(B, C, T, W, H)
frames = frames.contiguous()
if self.rgb_net_name == 'resnet':
rgb_resnet = self.visual.cnn
# 5D -> 4D if 2D conv at the beginning
frames = transform_input(frames, rgb_resnet.input_dim, T=T)
# 1st conv
frames = rgb_resnet.conv1(frames)
frames = rgb_resnet.bn1(frames)
frames = rgb_resnet.relu(frames)
frames = rgb_resnet.maxpool(frames)
# 1st residual block
frames = transform_input(frames, rgb_resnet.layer1[0].input_dim, T=T)
frames = rgb_resnet.layer1(frames)
fm1 = frames
# 2nd residual block
frames = transform_input(frames, rgb_resnet.layer2[0].input_dim, T=T)
frames = rgb_resnet.layer2(frames)
fm2 = frames
# 3rd residual block
frames = transform_input(frames, rgb_resnet.layer3[0].input_dim, T=T)
frames = rgb_resnet.layer3(frames)
fm3 = frames
else:
fm2 = self.visual.features[:13](frames)
fm3 = self.visual.features[13:15](fm2)
###################################### FIRST msaf
# fm3, out5_p0 (first person), out5_p1 (second person) => fm3, out5_p0, out5_p1
fm3, out5_p0, out5_p1 = self.msaf1([fm3, out5_p0, out5_p1])
######################################
# skeleton
out6_p0 = self.skeleton.conv6(out5_p0)
sk_hidden[0].append(out6_p0)
out6_p1 = self.skeleton.conv6(out5_p1)
sk_hidden[1].append(out6_p0)
out6_max = torch.max(out6_p0, out6_p1)
out7 = out6_max
# max out logits
out7 = out7.view(out7.size(0), -1)
out8 = self.skeleton.fc7(out7)
# visual
if self.rgb_net_name == 'resnet':
# 4th residual block
frames = transform_input(frames, rgb_resnet.layer4[0].input_dim, T=T)
frames = rgb_resnet.layer4(frames)
final_fm = transform_input(frames, rgb_resnet.out_dim, T=T)
else:
final_fm = self.visual.features[15](fm3)
########################################## SECOND msaf
# final_fm, out8 => final_fm, out8
final_fm, out8 = self.msaf2([final_fm, out8])
##########################################
# skeleton
outf = self.skeleton.fc8(out8)
new_sk_hidden.append(out5_max)
new_sk_hidden.append(out6_max)
new_sk_hidden.append(out7)
new_sk_hidden.append(out8)
t = outf
assert not (torch.isnan(t).any()) # find out nan in tensor
skeleton_features = [new_sk_hidden, outf]
# visual
if self.rgb_net_name == 'resnet':
# Temporal pooling
vis_out5 = self.visual.temporal_pooling(final_fm)
vis_out6 = self.visual.classifier(vis_out5)
visual_features = [fm1, fm2, fm3, final_fm, vis_out5, vis_out6]
else:
vis_out5 = self.visual.features[16:](final_fm)
if self.visual.spatial_squeeze:
vis_out5 = vis_out5.squeeze(3)
vis_out5 = vis_out5.squeeze(3)
vis_out6 = torch.mean(vis_out5, 2)
visual_features = [fm2, fm3, final_fm, vis_out5, vis_out6]
if self.return_interm_feas:
return visual_features, skeleton_features
### LATE FUSION
vis_pred = vis_out6
skeleton_pred = outf
if self.final_pred is None:
pred = (skeleton_pred + vis_pred) / 2
else:
pred = self.final_pred(torch.cat([skeleton_pred, vis_pred], dim=-1))
if self.return_both:
return vis_pred, skeleton_pred
return pred
|
the-stack_106_15483
|
"""
==========================================
Using cloudknot to run pyAFQ on AWS batch:
==========================================
One of the purposes of ``pyAFQ`` is to analyze large-scale openly-available datasets,
such as those in the `Human Connectome Project <https://www.humanconnectome.org/>`_.
To analyze these datasets, large amounts of compute are needed. One way to gain access
to massive computational power is by using cloud computing. Here, we will demonstrate
how to use ``pyAFQ`` in the Amazon Web Services cloud.
We will rely on the `AWS Batch Service <https://aws.amazon.com/batch/>`_ , and we will
submit work into AWS Batch using software that our group developed called
`Cloudknot <https://nrdg.github.io/cloudknot/>`_.
"""
##########################################################################
# Import cloudknot and set the AWS region within which computations will take place. Setting a
# region is important, because if the data that you are analyzing is stored in
# `AWS S3 <https://aws.amazon.com/s3/>`_ in a particular region, it is best to run the computation
# in that region as well. That is because AWS charges for inter-region transfer of data.
import cloudknot as ck
ck.set_region('us-east-1')
##########################################################################
# Define the function to use
# --------------------------
# ``Cloudknot`` uses the single program multiple data paradigm of computing. This means that the same
# function will be run on multiple different inputs. For example, a ``pyAFQ`` processing function run
# on multiple different subjects in a dataset.
# Below, we define the function that we will use. Notice that ``Cloudknot`` functions include the
# import statements of the dependencies used. This is necessary so that ``Cloudknot`` knows
# what dependencies to install into AWS Batch to run this function.
def afq_process_subject(subject):
# define a function that each job will run
# In this case, each process does a single subject
import logging
import s3fs
# all imports must be at the top of the function
# cloudknot installs the appropriate packages from pip
import AFQ.data as afqd
import AFQ.api as api
import AFQ.mask as afm
# set logging level to your choice
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# Download the given subject to your local machine from s3
study_ixi = afqd.S3BIDSStudy(
"my_study",
"my_study_bucket",
"my_study_prefix",
subjects=[subject],
anon=False)
study_ixi.download(
"local_bids_dir",
include_derivs=["pipeline_name"])
# you can optionally provide your own segmentation file
# in this case, we look for a file with suffix 'seg'
# in the 'pipeline_name' pipeline,
# and we consider all non-zero labels to be a part of the brain
brain_mask = afm.LabelledMaskFile(
'seg', {'scope': 'pipeline_name'}, exclusive_labels=[0])
# define the api AFQ object
myafq = api.AFQ(
local_bids_dir,
dmriprep="pipeline_name",
brain_mask=brain_mask,
viz_backend='plotly', # this will generate both interactive html and GIFs
scalars=["dki_fa", "dki_md"])
# export_all runs the entire pipeline and creates many useful derivates
myafq.export_all()
# upload the results to some location on s3
myafq.upload_to_s3(
s3fs.S3FileSystem(),
f"my_study_bucket/my_study_prefix/derivatives/afq")
##########################################################################
# Here we provide a list of subjects that we have selected to process
# to randomly select 3 subjects without replacement, instead do:
# subjects = [[1], [2], [3]]
# see the docstring for S3BIDSStudy.__init__ for more information
subjects = [123456, 123457, 123458]
##########################################################################
# Defining a ``Knot`` instance
# ---------------------------------
# We instantiate a class instance of the :class:`ck.Knot` class. This object will be used to run your jobs.
# The object is instantiated with the `'AmazonS3FullAccess'` policy, so that it can write the results
# out to S3, into a bucket that you have write permissions on.
# Setting the `bid_percentage` key-word makes AWS Batch use
# `spot EC2 instances <https://aws.amazon.com/ec2/spot/>`_ for the computation.
# This can result in substantial cost-savings, as spot compute instances can cost
# much less than on-demand instances. However, not that spot instances can also
# be evicted, so if completing all of the work is very time-sensitive, do not set this
# key-word argument. Using the `image_github_installs` key-word argument will
# install pyAFQ from GitHub. You can also specify other forks and branches to
# install from.
knot = ck.Knot(
name='afq_process_subject-201009-0',
func=afq_process_subject,
base_image='python:3.8',
image_github_installs="https://github.com/yeatmanlab/pyAFQ.git",
pars_policies=('AmazonS3FullAccess',),
bid_percentage=100)
##########################################################################
# Launching the computation
# --------------------------------
# The :meth:`map` method of the :class:`Knot object maps each of the inputs provided
# as a sequence onto the function and executes the function on each one of them in
# parallel.
result_futures = knot.map(subjects)
##########################################################################
# Once computations have started, you can call the following
# function to view the progress of jobs::
#
# knot.view_jobs()
#
# You can also view the status of a specific job::
#
# knot.jobs[0].status
##########################################################################
# When all jobs are finished, remember to use the :meth:`clobber` method to
# destroy all of the AWS resources created by the :class:`Knot`
result_futures.result()
knot.clobber(clobber_pars=True, clobber_repo=True, clobber_image=True)
##########################################################################
# In a second :class:`Knot` object, we use a function that takes the resulting profiles of each subject
# and combines them into one csv file.
def afq_combine_profiles(dummy_argument):
from AFQ.api import download_and_combine_afq_profiles
download_and_combine_afq_profiles(
"temp", "my_study_bucket", "my_study_prefix/derivatives/afq")
knot2 = ck.Knot(
name='afq_combine_subjects-201009-0',
func=afq_combine_profiles,
base_image='python:3.8',
image_github_installs="https://github.com/yeatmanlab/pyAFQ.git",
pars_policies=('AmazonS3FullAccess',),
bid_percentage=100)
##########################################################################
# This knot is called with a dummy argument, which is not used within the function itself. The
# `job_type` key-word argument is used to signal to ``Cloudknot`` that only one job is submitted
# rather than the default array of jobs.
result_futures2 = knot2.map(["dummy_argument"], job_type="independent")
result_futures2.result()
knot2.clobber(clobber_pars=True, clobber_repo=True, clobber_image=True)
|
the-stack_106_15487
|
from __future__ import print_function
import matplotlib
matplotlib.use('tkAgg')
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from dolfin import *
import scipy
import numpy as np
import healpy as hp
from deepsphere import utils
# Test for PETSc and SLEPc
if not has_linear_algebra_backend("PETSc"):
print("DOLFIN has not been configured with PETSc. Exiting.")
exit()
if not has_slepc():
print("DOLFIN has not been configured with SLEPc. Exiting.")
exit()
spectral_content = dict()
nsides = [8]
for nside in nsides:
lmax = 3 * nside - 1
N = np.cumsum(np.arange(1, 2*lmax+2, 2))[-1]
# Define mesh, function space
mesh = Mesh("09_meshes/HEALPix_{}.xml".format(nside))
global_normal = Expression(("x[0]", "x[1]", "x[2]"), degree=1)
mesh.init_cell_orientations(global_normal)
V = FunctionSpace(mesh, "Lagrange", 1)
# Define basis and bilinear form
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(u), grad(v))*dx
b = dot(u, v)*dx
# Assemble stiffness form
A = PETScMatrix()
B = PETScMatrix()
assemble(a, tensor=A)
assemble(b, tensor=B)
# Create eigensolver
eigensolver = SLEPcEigenSolver(A, B)
eigensolver.parameters['spectrum'] = 'target real'
eigensolver.parameters['tolerance'] = 1.e-3
eigensolver.parameters['maximum_iterations'] = 100
# Compute all eigenvalues of A x = \lambda x
print("Computing eigenvalues. This can take a minute.")
eigensolver.solve(N)
print('Done. Extracting results...')
eig_vectors = np.ndarray((12*nside**2, N), dtype='float')
eig_values = np.ndarray(N, dtype='float')
ne = 16
for i in range(ne):
# Extract largest (first) eigenpair
r, c, rx, cx = eigensolver.get_eigenpair(i)
# ----- keeping the dof ordering -----
eig_vectors[:, i] = np.asarray(rx)
eig_values[i] = r
for ind in range(ne):
hp.mollview(eig_vectors[:, ind],
title='Eigenvector {}'.format(ind),
nest=False,
sub=(ne//4, 4, ind+1),
max=np.max(np.abs(eig_vectors[:, :ne])),
min=-np.max(np.abs(eig_vectors[:, :ne])),
cbar=False,
rot=(0,0,0))
with utils.HiddenPrints():
hp.graticule();
plt.show()
# ---------- reordering ----------
reordered_mask = np.load('15_reordering_masks/reordering_mask_{}.npy'.format(nside))
eig_vectors = eig_vectors[reordered_mask]
# --------------------------------
ne = 16
for ind in range(ne):
hp.mollview(eig_vectors[:, ind],
title='Eigenvector {}'.format(ind),
nest=False,
sub=(ne//4, 4, ind+1),
max=np.max(np.abs(eig_vectors[:, :ne])),
min=-np.max(np.abs(eig_vectors[:, :ne])),
cbar=False,
rot=(0,0,0))
with utils.HiddenPrints():
hp.graticule();
plt.show()
|
the-stack_106_15490
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import shutil
import uuid
import pytest
from ..... import invoke_sm_helper_function
from .recordio_utils import build_record_file, build_single_record_file
from sagemaker import TrainingInput
from sagemaker.tensorflow import TensorFlow
from ...integration.utils import processor, py_version, unique_name_from_base # noqa: F401
from .timeout import timeout
DIMENSION = 5
def make_test_data(directory, name, num_files, num_records, dimension, sagemaker_session):
if not os.path.exists('test-data'):
os.makedirs('test-data')
for i in range(num_files):
if num_records > 1:
build_record_file(os.path.join(directory, name + str(i)),
num_records=num_records, dimension=dimension)
else:
build_single_record_file(os.path.join(directory, name + str(i)),
dimension=dimension)
return sagemaker_session.upload_data(path=os.path.join(directory),
key_prefix='pipemode-{}-files'.format(name))
def multi_records_test_data(sagemaker_session):
test_data = 'test-data-' + str(uuid.uuid4())
os.makedirs(test_data)
s3_url = make_test_data(
directory=test_data,
name='multi',
num_files=1,
num_records=1000,
dimension=DIMENSION,
sagemaker_session=sagemaker_session)
shutil.rmtree(test_data)
return s3_url
def single_record_test_data(sagemaker_session):
test_data = 'test-data-' + str(uuid.uuid4())
os.makedirs(test_data)
s3_url = make_test_data(
directory=test_data,
name='single',
num_files=100,
num_records=1,
dimension=DIMENSION,
sagemaker_session=sagemaker_session)
shutil.rmtree(test_data)
return s3_url
def run_test(ecr_image, sagemaker_session, instance_type, framework_version, test_data,
record_wrapper_type=None):
source_path = os.path.join(os.path.dirname(__file__), '..', '..', 'resources', 'pipemode')
script = os.path.join(source_path, 'pipemode.py')
estimator = TensorFlow(entry_point=script,
role='SageMakerRole',
instance_type=instance_type,
instance_count=1,
sagemaker_session=sagemaker_session,
image_uri=ecr_image,
framework_version=framework_version,
input_mode='Pipe',
hyperparameters={'dimension': DIMENSION})
input = TrainingInput(s3_data=test_data(sagemaker_session),
distribution='FullyReplicated',
record_wrapping=record_wrapper_type,
input_mode='Pipe')
with timeout(minutes=20):
estimator.fit({'elizabeth': input},
job_name=unique_name_from_base('test-sagemaker-pipemode'))
@pytest.mark.integration("pipemode")
@pytest.mark.model("N/A")
def test_single_record(ecr_image, sagemaker_regions, instance_type, framework_version):
invoke_sm_helper_function(ecr_image, sagemaker_regions, run_test,
instance_type,
framework_version,
single_record_test_data,
'RecordIO'
)
@pytest.mark.integration("pipemode")
@pytest.mark.model("N/A")
def test_multi_records(ecr_image, sagemaker_regions, instance_type, framework_version):
invoke_sm_helper_function(ecr_image, sagemaker_regions, run_test,
instance_type,
framework_version,
multi_records_test_data
)
|
the-stack_106_15492
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""""""
from datalad.ui.utils import get_console_width
__docformat__ = 'restructuredtext'
import logging
lgr = logging.getLogger('datalad.cmdline')
lgr.log(5, "Importing cmdline.main")
import argparse
from collections import defaultdict
import sys
import textwrap
import os
from six import text_type
import datalad
from datalad.cmdline import helpers
from datalad.support.exceptions import InsufficientArgumentsError
from datalad.support.exceptions import IncompleteResultsError
from datalad.support.exceptions import CommandError
from .helpers import strip_arg_from_argv
from ..utils import setup_exceptionhook, chpwd
from ..utils import assure_unicode
from ..utils import on_msys_tainted_paths
from ..dochelpers import exc_str
def _license_info():
return """\
Copyright (c) 2013-2018 DataLad developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class ArgumentParserDisableAbbrev(argparse.ArgumentParser):
# Don't accept abbreviations for long options. With py3.5 and above, we
# could just use allow_abbrev=False.
#
# Modified from the solution posted at
# https://bugs.python.org/issue14910#msg204678
def _get_option_tuples(self, option_string):
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
# option_string is a long flag. Disable abbreviation.
return []
return super(ArgumentParserDisableAbbrev, self)._get_option_tuples(
option_string)
# TODO: OPT look into making setup_parser smarter to become faster
# Now it seems to take up to 200ms to do all the parser setup
# even though it might not be necessary to know about all the commands etc.
# I wondered if it could somehow decide on what commands to worry about etc
# by going through sys.args first
def setup_parser(
cmdlineargs,
formatter_class=argparse.RawDescriptionHelpFormatter,
return_subparsers=False):
lgr.log(5, "Starting to setup_parser")
# delay since it can be a heavy import
from ..interface.base import dedent_docstring, get_interface_groups, \
get_cmdline_command_name, alter_interface_docs_for_cmdline, \
load_interface, get_cmd_doc
# setup cmdline args parser
parts = {}
# main parser
parser = ArgumentParserDisableAbbrev(
fromfile_prefix_chars=None,
# usage="%(prog)s ...",
description=dedent_docstring("""\
Comprehensive data management solution
DataLad provides a unified data distribution system built on the Git
and Git-annex. DataLad command line tools allow to manipulate (obtain,
create, update, publish, etc.) datasets and provide a comprehensive
toolbox for joint management of data and code. Compared to Git/annex
it primarly extends their functionality to transparently and
simultaneously work with multiple inter-related repositories."""),
epilog='"Be happy!"',
formatter_class=formatter_class,
add_help=False)
# common options
helpers.parser_add_common_opt(parser, 'log_level')
helpers.parser_add_common_opt(parser, 'pbs_runner')
helpers.parser_add_common_opt(parser, 'change_path')
helpers.parser_add_common_opt(
parser,
'version',
version='datalad %s\n' % datalad.__version__)
if __debug__:
parser.add_argument(
'--dbg', action='store_true', dest='common_debug',
help="enter Python debugger when uncaught exception happens")
parser.add_argument(
'--idbg', action='store_true', dest='common_idebug',
help="enter IPython debugger when uncaught exception happens")
parser.add_argument(
'-c', action='append', dest='cfg_overrides', metavar='KEY=VALUE',
help="""configuration variable setting. Overrides any configuration
read from a file, but is potentially overridden itself by configuration
variables in the process environment.""")
parser.add_argument(
'-f', '--output-format', dest='common_output_format',
default='default',
type=assure_unicode,
metavar="{default,json,json_pp,tailored,'<template>'}",
help="""select format for returned command results. 'default' give one line
per result reporting action, status, path and an optional message;
'json' renders a JSON object with all properties for each result (one per
line); 'json_pp' pretty-prints JSON spanning multiple lines; 'tailored'
enables a command-specific rendering style that is typically
tailored to human consumption (no result output otherwise),
'<template>' reports any value(s) of any result properties in any format
indicated by the template (e.g. '{path}'; compare with JSON
output for all key-value choices). The template syntax follows the Python
"format() language". It is possible to report individual
dictionary values, e.g. '{metadata[name]}'. If a 2nd-level key contains
a colon, e.g. 'music:Genre', ':' must be substituted by '#' in the template,
like so: '{metadata[music#Genre]}'.""")
parser.add_argument(
'--report-status', dest='common_report_status',
choices=['success', 'failure', 'ok', 'notneeded', 'impossible', 'error'],
help="""constrain command result report to records matching the given
status. 'success' is a synonym for 'ok' OR 'notneeded', 'failure' stands
for 'impossible' OR 'error'.""")
parser.add_argument(
'--report-type', dest='common_report_type',
choices=['dataset', 'file'],
action='append',
help="""constrain command result report to records matching the given
type. Can be given more than once to match multiple types.""")
parser.add_argument(
'--on-failure', dest='common_on_failure',
choices=['ignore', 'continue', 'stop'],
# no default: better be configure per-command
help="""when an operation fails: 'ignore' and continue with remaining
operations, the error is logged but does not lead to a non-zero exit code
of the command; 'continue' works like 'ignore', but an error causes a
non-zero exit code; 'stop' halts on first failure and yields non-zero exit
code. A failure is any result with status 'impossible' or 'error'.""")
parser.add_argument(
'--proc-pre', dest='common_proc_pre',
nargs='+',
action='append',
metavar=('<PROCEDURE NAME>', 'ARGS'),
help="""Dataset procedure to run before the main command (see run-procedure
command for details). This option can be given more than once to run
multiple procedures in the order in which they were given.
It is important to specify the target dataset via the --dataset argument
of the main command."""),
parser.add_argument(
'--proc-post', dest='common_proc_post',
nargs='+',
action='append',
metavar=('<PROCEDURE NAME>', 'ARGS'),
help="""Like --proc-pre, but procedures are executed after the main command
has finished."""),
parser.add_argument(
'--cmd', dest='_', action='store_true',
help="""syntactical helper that can be used to end the list of global
command line options before the subcommand label. Options like
--proc-pre can take an arbitrary number of arguments and may require
to be followed by a single --cmd in order to enable identification
of the subcommand.""")
# yoh: atm we only dump to console. Might adopt the same separation later on
# and for consistency will call it --verbose-level as well for now
# log-level is set via common_opts ATM
# parser.add_argument('--log-level',
# choices=('critical', 'error', 'warning', 'info', 'debug'),
# dest='common_log_level',
# help="""level of verbosity in log files. By default
# everything, including debug messages is logged.""")
#parser.add_argument('-l', '--verbose-level',
# choices=('critical', 'error', 'warning', 'info', 'debug'),
# dest='common_verbose_level',
# help="""level of verbosity of console output. By default
# only warnings and errors are printed.""")
# Before doing anything additional and possibly expensive see may be that
# we have got the command already
need_single_subparser = False if return_subparsers else None
fail_handler = (lambda *a, **kw: True) \
if return_subparsers else fail_with_short_help
try:
parsed_args, unparsed_args = parser._parse_known_args(
cmdlineargs[1:], argparse.Namespace())
if not unparsed_args:
fail_handler(parser, msg="too few arguments", exit_code=2)
lgr.debug("Command line args 1st pass. Parsed: %s Unparsed: %s",
parsed_args, unparsed_args)
except Exception as exc:
lgr.debug("Early parsing failed with %s", exc_str(exc))
need_single_subparser = False
unparsed_args = cmdlineargs[1:] # referenced before assignment otherwise
interface_groups = get_interface_groups(include_plugins=True)
# First unparsed could be either unknown option to top level "datalad"
# or a command. Among unknown could be --help/--help-np which would
# need to be dealt with
unparsed_arg = unparsed_args[0] if unparsed_args else None
if need_single_subparser is not None \
or unparsed_arg in ('--help', '--help-np', '-h'):
need_single_subparser = False
add_entrypoints_to_interface_groups(interface_groups)
elif unparsed_arg.startswith('-'): # unknown option
fail_with_short_help(parser,
msg="unrecognized argument %s" % unparsed_arg,
exit_code=2)
# if we could get a list of options known to parser,
# we could suggest them
# known=get_all_options(parser), provided=unparsed_arg)
else: # the command to handle
known_commands = get_commands_from_groups(interface_groups)
if unparsed_arg not in known_commands:
# need to load all the extensions and try again
add_entrypoints_to_interface_groups(interface_groups)
known_commands = get_commands_from_groups(interface_groups)
if unparsed_arg not in known_commands:
# check if might be coming from known extensions
from ..interface import _known_extension_commands
extension_commands = {
c: e
for e, commands in _known_extension_commands.items()
for c in commands
}
hint = None
if unparsed_arg in extension_commands:
hint = "Command %s is provided by (not installed) extension %s." \
% (unparsed_arg, extension_commands[unparsed_arg])
fail_with_short_help(
parser,
hint=hint,
provided=unparsed_arg,
known=list(known_commands.keys()) + list(extension_commands.keys())
)
if need_single_subparser is None:
need_single_subparser = unparsed_arg
# --help specification was delayed since it causes immediate printout of
# --help output before we setup --help for each command
helpers.parser_add_common_opt(parser, 'help')
grp_short_descriptions = defaultdict(list)
# create subparser, use module suffix as cmd name
subparsers = parser.add_subparsers()
for group_name, _, _interfaces \
in sorted(interface_groups, key=lambda x: x[1]):
for _intfspec in _interfaces:
cmd_name = get_cmdline_command_name(_intfspec)
if need_single_subparser and cmd_name != need_single_subparser:
continue
_intf = load_interface(_intfspec)
if _intf is None:
# TODO(yoh): add doc why we could skip this one... makes this
# loop harder to extract into a dedicated function
continue
# deal with optional parser args
if hasattr(_intf, 'parser_args'):
parser_args = _intf.parser_args
else:
parser_args = dict(formatter_class=formatter_class)
# use class description, if no explicit description is available
intf_doc = get_cmd_doc(_intf)
parser_args['description'] = alter_interface_docs_for_cmdline(
intf_doc)
subparser = subparsers.add_parser(cmd_name, add_help=False, **parser_args)
# our own custom help for all commands
helpers.parser_add_common_opt(subparser, 'help')
# let module configure the parser
_intf.setup_parser(subparser)
# logger for command
# configure 'run' function for this command
plumbing_args = dict(
func=_intf.call_from_parser,
logger=logging.getLogger(_intf.__module__),
subparser=subparser)
if hasattr(_intf, 'result_renderer_cmdline'):
plumbing_args['result_renderer'] = _intf.result_renderer_cmdline
subparser.set_defaults(**plumbing_args)
parts[cmd_name] = subparser
# store short description for later
sdescr = getattr(_intf, 'short_description',
parser_args['description'].split('\n')[0])
grp_short_descriptions[group_name].append((cmd_name, sdescr))
# create command summary
if '--help' in cmdlineargs or '--help-np' in cmdlineargs:
parser.description = get_description_with_cmd_summary(
grp_short_descriptions,
interface_groups,
parser.description)
parts['datalad'] = parser
lgr.log(5, "Finished setup_parser")
if return_subparsers:
return parts
else:
return parser
def fail_with_short_help(parser=None,
msg=None,
known=None, provided=None,
hint=None,
exit_code=1,
what="command",
out=None):
"""Generic helper to fail
with short help possibly hinting on what was intended if `known`
were provided
"""
out = out or sys.stderr
if msg:
out.write("error: %s\n" % msg)
if not known:
if parser:
# just to appear in print_usage also consistent with --help output
parser.add_argument("command [command-opts]")
parser.print_usage(file=out)
else:
out.write(
"datalad: Unknown %s %r. See 'datalad --help'.\n\n"
% (what, provided,))
if provided not in known:
import difflib
suggestions = difflib.get_close_matches(provided, known)
if suggestions:
out.write(
"Did you mean one of these?\n %s\n"
% "\n ".join(suggestions))
# Too noisy
# sys.stderr.write(" All known %ss: %s\n"
# % (what, ", ".join(sorted(known))))
if hint:
out.write("Hint: %s\n" % hint)
raise SystemExit(exit_code)
def get_description_with_cmd_summary(grp_short_descriptions, interface_groups,
parser_description):
from ..interface.base import dedent_docstring
from ..interface.base import get_cmd_summaries
lgr.debug("Generating detailed description for the parser")
console_width = get_console_width()
cmd_summary = get_cmd_summaries(grp_short_descriptions, interface_groups,
width=console_width)
# we need one last formal section to not have the trailed be
# confused with the last command group
cmd_summary.append('\n*General information*\n')
detailed_description = '%s\n%s\n\n%s' \
% (parser_description,
'\n'.join(cmd_summary),
textwrap.fill(dedent_docstring("""\
Detailed usage information for individual commands is
available via command-specific --help, i.e.:
datalad <command> --help"""),
console_width - 5,
initial_indent='',
subsequent_indent=''))
return detailed_description
def get_commands_from_groups(groups):
"""Get a dictionary of command: interface_spec"""
from ..interface.base import get_cmdline_command_name
return {
get_cmdline_command_name(_intfspec): _intfspec
for _, _, _interfaces in groups
for _intfspec in _interfaces
}
def add_entrypoints_to_interface_groups(interface_groups):
lgr.debug("Loading entrypoints")
from pkg_resources import iter_entry_points # delay expensive import
for ep in iter_entry_points('datalad.extensions'):
lgr.debug(
'Loading entrypoint %s from datalad.extensions for docs building',
ep.name)
try:
spec = ep.load()
interface_groups.append((ep.name, spec[0], spec[1]))
lgr.debug('Loaded entrypoint %s', ep.name)
except Exception as e:
lgr.warning('Failed to load entrypoint %s: %s', ep.name, exc_str(e))
continue
def _fix_datalad_ri(s):
"""Fixup argument if it was a DataLadRI and had leading / removed
See gh-2643
"""
if s.startswith('//') and (len(s) == 2 or (len(s) > 2 and s[2] != '/')):
lgr.info(
"Changing %s back to /%s as it was probably changed by MINGW/MSYS, "
"see http://www.mingw.org/wiki/Posix_path_conversion", s, s)
return "/" + s
return s
def main(args=None):
lgr.log(5, "Starting main(%r)", args)
args = args or sys.argv
if on_msys_tainted_paths:
# Possibly present DataLadRIs were stripped of a leading /
args = [_fix_datalad_ri(s) for s in args]
# PYTHON_ARGCOMPLETE_OK
parser = setup_parser(args)
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
# parse cmd args
lgr.debug("Parsing known args among %s", repr(args))
cmdlineargs, unparsed_args = parser.parse_known_args(args[1:])
has_func = hasattr(cmdlineargs, 'func') and cmdlineargs.func is not None
if unparsed_args:
if has_func and cmdlineargs.func.__self__.__name__ != 'Export':
lgr.error('unknown argument{}: {}'.format(
's' if len(unparsed_args) > 1 else '',
unparsed_args if len(unparsed_args) > 1 else unparsed_args[0]))
cmdlineargs.subparser.print_usage()
sys.exit(1)
else:
# store all unparsed arguments
cmdlineargs.datalad_unparsed_args = unparsed_args
# to possibly be passed into PBS scheduled call
args_ = args or sys.argv
if cmdlineargs.cfg_overrides is not None:
overrides = dict([
(o.split('=')[0], '='.join(o.split('=')[1:]))
for o in cmdlineargs.cfg_overrides])
datalad.cfg.overrides.update(overrides)
# enable overrides
datalad.cfg.reload(force=True)
if cmdlineargs.change_path is not None:
from .common_args import change_path as change_path_opt
for path in cmdlineargs.change_path:
chpwd(path)
args_ = strip_arg_from_argv(args_, path, change_path_opt[1])
ret = None
if cmdlineargs.pbs_runner:
from .helpers import run_via_pbs
from .common_args import pbs_runner as pbs_runner_opt
args_ = strip_arg_from_argv(args_, cmdlineargs.pbs_runner, pbs_runner_opt[1])
# run the function associated with the selected command
run_via_pbs(args_, cmdlineargs.pbs_runner)
elif has_func:
if cmdlineargs.common_debug or cmdlineargs.common_idebug:
# so we could see/stop clearly at the point of failure
setup_exceptionhook(ipython=cmdlineargs.common_idebug)
from datalad.interface.base import Interface
Interface._interrupted_exit_code = None
ret = cmdlineargs.func(cmdlineargs)
else:
# otherwise - guard and only log the summary. Postmortem is not
# as convenient if being caught in this ultimate except
try:
ret = cmdlineargs.func(cmdlineargs)
except InsufficientArgumentsError as exc:
# if the func reports inappropriate usage, give help output
lgr.error('%s (%s)' % (exc_str(exc), exc.__class__.__name__))
cmdlineargs.subparser.print_usage(sys.stderr)
sys.exit(2)
except IncompleteResultsError as exc:
# rendering for almost all commands now happens 'online'
# hence we are no longer attempting to render the actual
# results in an IncompleteResultsError, ubt rather trust that
# this happened before
# in general we do not want to see the error again, but
# present in debug output
lgr.debug('could not perform all requested actions: %s',
exc_str(exc))
sys.exit(1)
except CommandError as exc:
# behave as if the command ran directly, importantly pass
# exit code as is
if exc.msg:
msg = exc.msg.encode() if isinstance(exc.msg, text_type) else exc.msg
os.write(2, msg + b"\n")
if exc.stdout:
os.write(1, exc.stdout.encode() if isinstance(exc.stdout, text_type) else exc.stdout)
if exc.stderr:
os.write(2, exc.stderr.encode() if isinstance(exc.stderr, text_type) else exc.stderr)
# We must not exit with 0 code if any exception got here but
# had no code defined
sys.exit(exc.code if exc.code is not None else 1)
except Exception as exc:
lgr.error('%s (%s)' % (exc_str(exc), exc.__class__.__name__))
sys.exit(1)
else:
# just let argparser spit out its error, since there is smth wrong
parser.parse_args(args)
# if that one didn't puke -- we should
parser.print_usage()
lgr.error("Please specify the command")
sys.exit(2)
try:
if hasattr(cmdlineargs, 'result_renderer'):
cmdlineargs.result_renderer(ret, cmdlineargs)
except Exception as exc:
lgr.error("Failed to render results due to %s", exc_str(exc))
sys.exit(1)
lgr.log(5, "Done importing cmdline.main")
|
the-stack_106_15493
|
"""Basic functional stuff, I guess.
"""
import logging
import os
LOG = logging.getLogger(__name__)
def getIpdModelFilename(ipdModel, majorityChem, paramsPath):
"""
ipdModel: str
majorityChem: str
"""
# In order of precedence they are:
# 1. Explicit path passed to --ipdModel
# 2. In-order through each directory listed in --paramsPath
if ipdModel:
LOG.info("Using passed-in kinetics model: {!r}".format(ipdModel))
return ipdModel
if majorityChem == 'unknown':
majorityChem = "P6-C4"
#msg = "Chemistry cannot be identified---cannot perform kinetic analysis"
#LOG.error(msg)
#raise Exception(msg)
# Route any pre-Kiwi / pre-SSB Sequel chemistries to Seabiscuit training
if majorityChem.startswith("S/P1") or majorityChem.startswith("S/P2"):
majorityChem = "SP2-C2"
# Route any post-SSB Sequel chemistries to Kiwi training (for now)
elif majorityChem.startswith("S/"):
majorityChem = "SP3-C3"
# '/' is not a valid character in a file, unescaped--remove it
majorityChem = majorityChem.replace("/", "")
# go through each paramsPath in-order, checking if the model exists there
# or no
for paramPath in paramsPath:
ipdModel = os.path.join(paramPath, majorityChem + ".npz.gz")
if os.path.isfile(ipdModel):
LOG.info(
"Using chemistry-matched kinetics model: {!r}".format(ipdModel))
return ipdModel
msg = "No kinetics model available for this chemistry ({!r}) on paramsPath {!r}".format(
ipdModel, paramsPath)
LOG.error(msg)
raise Exception(msg)
def getResourcePathSpec(default_dir):
"""Create list of [${SMRT_CHEMISTRY_BUNDLE_DIR}/kineticsTools, {default_dir}].
Return colon-separated string.
"""
pths = []
smrtChemBundlePath = os.environ.get("SMRT_CHEMISTRY_BUNDLE_DIR", None)
if smrtChemBundlePath:
LOG.info("found SMRT_CHEMISTRY_BUNDLE_DIR, prepending to default paramsPath")
pths.append(os.path.join(smrtChemBundlePath, "kineticsTools"))
pths.append(default_dir)
return ':'.join(pths)
|
the-stack_106_15494
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import re
import pytest
import asdf
from asdf.tests import helpers
def make_complex_asdf(string):
yaml = """
a: !core/complex-1.0.0
{}
""".format(string)
return helpers.yaml_to_asdf(yaml)
@pytest.mark.parametrize('invalid', [
'3 + 4i', '3+-4i', '3-+4i', '3i+4i', 'X3+4iX', '3+X4i', '3+4', '3i+4'
'3+4z', '3.+4i', '3+4.i', '3e-4.0+4i', '3+4e4.0i', ''
])
def test_invalid_complex(invalid):
with pytest.raises(asdf.ValidationError):
with asdf.open(make_complex_asdf(invalid)):
pass
@pytest.mark.parametrize('valid', [
'3+4j', '(3+4j)', '.3+4j', '3+.4j', '3e10+4j', '3e-10+4j', '3+4e10j',
'3.0+4j', '3+4.0j', '3.0+4.0j', '3+4e-10j', '3+4J', '3+4i', '3+4I', 'inf',
'inf+infj', 'inf+infi', 'infj', 'infi', 'INFi', 'INFI', '3+infj', 'inf+4j',
])
def test_valid_complex(valid):
with asdf.open(make_complex_asdf(valid)) as af:
assert af.tree['a'] == complex(re.sub(r'[iI]$', r'j', valid))
@pytest.mark.parametrize('valid', [
'nan', 'nan+nanj', 'nan+nani', 'nanj', 'nani', 'NANi', 'NANI', '3+nanj',
'nan+4j'
])
def test_valid_nan_complex(valid):
with asdf.open(make_complex_asdf(valid)) as af:
# Don't compare values since NANs are never equal
pass
def test_roundtrip(tmpdir):
tree = {
'a': 0+0j,
'b': 1+1j,
'c': -1+1j,
'd': -1-1j
}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
the-stack_106_15495
|
'''
The script does inference (semantic segmentation) on videostream from camera.
Just run the script and watch output in cv2.namedWindow.
Make sure you have trained model and set an existing checkpoint filename as a model_filename
To stop the script press the "q" button.
Created on Sun Sep 15 19:53:37 2019
@author: Pinaxe
'''
from os import path as osp
import numpy as np
import cv2
from model import DeepLab
from utils import ( save_load_means, subtract_channel_means, label_to_color_image)
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.namedWindow('reslt', cv2.WINDOW_NORMAL)
model_filename = 'data/models/deeplab/resnet_101_voc2012/resnet_101_0.3685.ckpt'
channel_means = save_load_means(means_filename='channel_means.npz',image_filenames=None, recalculate=False)
deeplab = DeepLab('resnet_101', training=False)
deeplab.load(model_filename)
while(True):
_, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
image=frame
image_input = subtract_channel_means(image=image, channel_means=channel_means)
output = deeplab.test(inputs=[image_input], target_height=image.shape[0], target_width=image.shape[1])[0]
img=label_to_color_image(np.argmax(output, axis=-1))
img=img.astype(np.uint8)
cv2.imshow('reslt', img)
cap.release()
cv2.destroyAllWindows()
deeplab.close()
|
the-stack_106_15497
|
# Time: O(n + l)
# Space: O(h + l)
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# kmp solution
class Solution(object):
def isSubPath(self, head, root):
"""
:type head: ListNode
:type root: TreeNode
:rtype: bool
"""
def getPrefix(head):
pattern, prefix = [head.val], [-1]
j = -1
node = head.next
while node:
while j+1 and pattern[j+1] != node.val:
j = prefix[j]
if pattern[j+1] == node.val:
j += 1
pattern.append(node.val)
prefix.append(j)
node = node.next
return pattern, prefix
def dfs(pattern, prefix, root, j):
if not root:
return False
while j+1 and pattern[j+1] != root.val:
j = prefix[j]
if pattern[j+1] == root.val:
j += 1
if j+1 == len(pattern):
return True
return dfs(pattern, prefix, root.left, j) or \
dfs(pattern, prefix, root.right, j)
if not head:
return True
pattern, prefix = getPrefix(head)
return dfs(pattern, prefix, root, -1)
# Time: O(n * min(h, l))
# Space: O(h)
# dfs solution
class Solution2(object):
def isSubPath(self, head, root):
"""
:type head: ListNode
:type root: TreeNode
:rtype: bool
"""
def dfs(head, root):
if not head:
return True
if not root:
return False
return root.val == head.val and \
(dfs(head.next, root.left) or
dfs(head.next, root.right))
if not head:
return True
if not root:
return False
return dfs(head, root) or \
self.isSubPath(head, root.left) or \
self.isSubPath(head, root.right)
|
the-stack_106_15498
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
import numpy as np
from ...utils import print_progress
from ..matrix import CostMatrix
from ..cost_function import AbstractCostFunction
from ..cost_function.brownian import BrownianLinkCostFunction
from ..cost_function.diagonal import DiagonalCostFunction
from ..cost_function.directed import BasicDirectedLinkCostFunction
from . import AbstractSolver
__all__ = []
class ByFrameSolver(AbstractSolver):
"""
Parameters
----------
trajs : :class:`pandas.DataFrame`
cost_functions : list of list
"""
def __init__(self, trajs, cost_functions, coords=['x', 'y', 'z']):
super(self.__class__, self).__init__(trajs)
self.t_in = 0
self.t_out = 0
self.coords = coords
self.trajs.check_trajs_df_structure(index=['t_stamp', 'label'],
columns=['t'] + coords)
self.link_cf = cost_functions['link']
self.check_cost_function_type(self.link_cf, AbstractCostFunction)
self.birth_cf = cost_functions['birth']
self.check_cost_function_type(self.birth_cf, AbstractCostFunction)
self.death_cf = cost_functions['death']
self.check_cost_function_type(self.death_cf, AbstractCostFunction)
self.max_assigned_cost = self.death_cf.context['cost']
@classmethod
def for_brownian_motion(cls, trajs,
max_speed,
penalty=1.05,
coords=['x', 'y', 'z']):
"""
Parameters
----------
trajs : :class:`sktracker.trajectories.Trajectories`
max_speed : float
Maximum objects velocity
penalty : float
coords : list
Which columns to choose in trajs when computing distances.
Examples
--------
>>> true_trajs = Trajectories(data.brownian_trajectories_generator())
>>>
>>> # Remove labels
>>> true_trajs.relabel(np.arange(len(true_trajs)))
>>>
>>> solver = ByFrameSolver.for_brownian_motion(true_trajs, max_speed=5, penalty=2.)
>>> new_trajs = solver.track()
2014:INFO:by_frame_solver: Initiating frame by frame tracking.
2014:INFO:by_frame_solver: Frame by frame tracking done. 5 segments found (500 before).
"""
guessed_cost = np.float(max_speed ** 2) * penalty
diag_context = {'cost': guessed_cost}
diag_params = {'penalty': penalty, 'coords': coords}
link_cost_func = BrownianLinkCostFunction(parameters={'max_speed': max_speed,
'coords': coords})
birth_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
death_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
cost_functions = {'link': link_cost_func,
'birth': birth_cost_func,
'death': death_cost_func}
return cls(trajs, cost_functions, coords=coords)
@classmethod
def for_directed_motion(cls, trajs,
max_speed,
penalty=1.05,
past_traj_time=10,
smooth_factor=0,
interpolation_order=1,
coords=['x', 'y', 'z']):
"""Link objects according to their distance found in trajectories frame by frame.
Parameters
----------
trajs : :class:`sktracker.trajectories.Trajectories`
max_speed : float
Maximum objects velocity
penalty : float
past_traj_time : float
Time during which the tracker can make a gap close. Above this time all gap
close event will discarded.
smooth_factor : float
Smoothing condition used in :func:`scipy.interpolate.splrep`
interpolation_order : int
The order of the spline fit. See :func:`scipy.interpolate.splrep`
coords : list
Which columns to choose in trajs when computing distances.
"""
parameters = {'max_speed': max_speed,
'past_traj_time': past_traj_time,
'smooth_factor': smooth_factor,
'interpolation_order': interpolation_order,
'coords': coords}
guessed_cost = 20 * penalty
diag_context = {'cost': guessed_cost}
diag_params = {'penalty': penalty}
link_context = {'trajs': trajs}
link_cost_func = BasicDirectedLinkCostFunction(parameters=parameters,
context=link_context)
birth_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
death_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
cost_functions = {'link': link_cost_func,
'birth': birth_cost_func,
'death': death_cost_func}
return cls(trajs, cost_functions, coords=coords)
@property
def blocks_structure(self):
return [[self.link_cf.mat, self.death_cf.mat],
[self.birth_cf.mat, None]]
@property
def pos_in(self):
return self.trajs.loc[self.t_in]
@property
def pos_out(self):
return self.trajs.loc[self.t_out]
def track(self, progress_bar=False, progress_bar_out=None):
"""
Returns
-------
self.trajs : :class:`pandas.DataFrame`
progress_bar : bool
Display progress bar
progress_bar_out : OutStream
For testing purpose only
"""
log.info('Initiating frame by frame tracking.')
old_labels = self.trajs.index.get_level_values('label').values
self.trajs['new_label'] = old_labels.astype(np.float)
ts_in = self.trajs.t_stamps[:-1]
ts_out = self.trajs.t_stamps[1:]
n_labels_before = len(self.trajs.labels)
n = len(ts_in)
for i, (t_in, t_out) in enumerate(zip(ts_in, ts_out)):
if progress_bar:
progress = i / n * 100
message = "t_in : {} | t_out {}".format(t_in, t_out)
print_progress(progress, message=message, out=progress_bar_out)
self.one_frame(t_in, t_out)
if progress_bar:
print_progress(-1)
self.relabel_trajs()
n_labels_after = len(self.trajs.labels)
mess = 'Frame by frame tracking done. {} segments found ({} before).'
log.info(mess.format(n_labels_after, n_labels_before))
return self.trajs
def one_frame(self, t_in, t_out):
"""
Parameters
----------
t_in : int
t_out : int
"""
self.t_in = t_in
self.t_out = t_out
pos_in = self.pos_in
pos_out = self.pos_out
self.link_cf.context['pos_in'] = pos_in
self.link_cf.context['pos_out'] = pos_out
self.link_cf.get_block()
self.birth_cf.context['objects'] = pos_out
self.birth_cf.get_block()
self.death_cf.context['objects'] = pos_in
self.death_cf.get_block()
self.cm = CostMatrix(self.blocks_structure)
self.cm.solve()
self.assign()
def assign(self):
"""
"""
row_shapes, col_shapes = self.cm.get_shapes()
last_in_link = row_shapes[0]
last_out_link = col_shapes[0]
new_labels_in = self.trajs.loc[self.t_in]['new_label'].values
new_labels_out = np.arange(last_out_link)
for idx_out, idx_in in enumerate(self.cm.out_links[:last_out_link]):
if idx_in >= last_in_link:
# new segment
new_label = self.trajs['new_label'].max() + 1.
else:
# assignment
new_label = new_labels_in[idx_in]
self._update_max_assign_cost(self.cm.mat[idx_in, idx_out])
new_labels_out[idx_out] = new_label
self.trajs.loc[self.t_out, 'new_label'] = new_labels_out
# The line below looks much slower than the two lines above
# self.trajs.loc[self.t_out, 'new_label'].iloc[idx_out] = new_label
def _update_max_assign_cost(self, cost):
"""
"""
if cost > self.max_assigned_cost:
self.max_assigned_cost = cost
new_b_cost = self.max_assigned_cost * self.birth_cf.parameters['penalty']
new_d_cost = self.max_assigned_cost * self.death_cf.parameters['penalty']
self.birth_cf.context['cost'] = new_b_cost
self.death_cf.context['cost'] = new_d_cost
|
the-stack_106_15499
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import warnings
from typing import Any, Iterable, Optional, Union, cast
from bson import json_util
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.mongo.hooks.mongo import MongoHook
_DEPRECATION_MSG = (
"The s3_conn_id parameter has been deprecated. You should pass instead the aws_conn_id parameter."
)
class MongoToS3Operator(BaseOperator):
"""Operator meant to move data from mongo via pymongo to s3 via boto.
:param mongo_conn_id: reference to a specific mongo connection
:type mongo_conn_id: str
:param aws_conn_id: reference to a specific S3 connection
:type aws_conn_id: str
:param mongo_collection: reference to a specific collection in your mongo db
:type mongo_collection: str
:param mongo_query: query to execute. A list including a dict of the query
:type mongo_query: Union[list, dict]
:param mongo_projection: optional parameter to filter the returned fields by
the query. It can be a list of fields names to include or a dictionary
for excluding fields (e.g ``projection={"_id": 0}`` )
:type mongo_projection: Union[list, dict]
:param s3_bucket: reference to a specific S3 bucket to store the data
:type s3_bucket: str
:param s3_key: in which S3 key the file will be stored
:type s3_key: str
:param mongo_db: reference to a specific mongo database
:type mongo_db: str
:param replace: whether or not to replace the file in S3 if it previously existed
:type replace: bool
:param allow_disk_use: enables writing to temporary files in the case you are handling large dataset.
This only takes effect when `mongo_query` is a list - running an aggregate pipeline
:type allow_disk_use: bool
:param compression: type of compression to use for output file in S3. Currently only gzip is supported.
:type compression: str
"""
template_fields = ('s3_bucket', 's3_key', 'mongo_query', 'mongo_collection')
ui_color = '#589636'
template_fields_renderers = {"mongo_query": "json"}
def __init__(
self,
*,
s3_conn_id: Optional[str] = None,
mongo_conn_id: str = 'mongo_default',
aws_conn_id: str = 'aws_default',
mongo_collection: str,
mongo_query: Union[list, dict],
s3_bucket: str,
s3_key: str,
mongo_db: Optional[str] = None,
mongo_projection: Optional[Union[list, dict]] = None,
replace: bool = False,
allow_disk_use: bool = False,
compression: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if s3_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
aws_conn_id = s3_conn_id
self.mongo_conn_id = mongo_conn_id
self.aws_conn_id = aws_conn_id
self.mongo_db = mongo_db
self.mongo_collection = mongo_collection
# Grab query and determine if we need to run an aggregate pipeline
self.mongo_query = mongo_query
self.is_pipeline = isinstance(self.mongo_query, list)
self.mongo_projection = mongo_projection
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.replace = replace
self.allow_disk_use = allow_disk_use
self.compression = compression
def execute(self, context):
"""Is written to depend on transform method"""
s3_conn = S3Hook(self.aws_conn_id)
# Grab collection and execute query according to whether or not it is a pipeline
if self.is_pipeline:
results = MongoHook(self.mongo_conn_id).aggregate(
mongo_collection=self.mongo_collection,
aggregate_query=cast(list, self.mongo_query),
mongo_db=self.mongo_db,
allowDiskUse=self.allow_disk_use,
)
else:
results = MongoHook(self.mongo_conn_id).find(
mongo_collection=self.mongo_collection,
query=cast(dict, self.mongo_query),
projection=self.mongo_projection,
mongo_db=self.mongo_db,
)
# Performs transform then stringifies the docs results into json format
docs_str = self._stringify(self.transform(results))
s3_conn.load_string(
string_data=docs_str,
key=self.s3_key,
bucket_name=self.s3_bucket,
replace=self.replace,
compression=self.compression,
)
@staticmethod
def _stringify(iterable: Iterable, joinable: str = '\n') -> str:
"""
Takes an iterable (pymongo Cursor or Array) containing dictionaries and
returns a stringified version using python join
"""
return joinable.join([json.dumps(doc, default=json_util.default) for doc in iterable])
@staticmethod
def transform(docs: Any) -> Any:
"""This method is meant to be extended by child classes
to perform transformations unique to those operators needs.
Processes pyMongo cursor and returns an iterable with each element being
a JSON serializable dictionary
Base transform() assumes no processing is needed
ie. docs is a pyMongo cursor of documents and cursor just
needs to be passed through
Override this method for custom transformations
"""
return docs
|
the-stack_106_15500
|
"""Interactive registry management tool"""
import getpass
import logging
import sys
import click
from dsgrid.common import REMOTE_REGISTRY, LOCAL_REGISTRY
from dsgrid.loggers import setup_logging
from dsgrid.registry.registry_manager import RegistryManager
@click.command()
@click.option(
"--path",
default=LOCAL_REGISTRY,
show_default=True,
envvar="DSGRID_REGISTRY_PATH",
help="path to dsgrid registry. Override with the environment variable DSGRID_REGISTRY_PATH",
)
@click.option(
"--remote-path",
default=REMOTE_REGISTRY,
show_default=True,
help="path to dsgrid remote registry",
)
@click.option(
"--offline",
"-o",
is_flag=True,
help="run in registry commands in offline mode. WARNING: any commands you perform in offline "
"mode run the risk of being out-of-sync with the latest dsgrid registry, and any write "
"commands will not be officially synced with the remote registry",
)
@click.option(
"--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose log output."
)
def load(path, remote_path, offline, verbose):
level = logging.DEBUG if verbose else logging.INFO
setup_logging("dsgrid", "dsgrid.log", console_level=level, file_level=level, mode="a")
return RegistryManager.load(path, remote_path, offline_mode=offline)
if __name__ == "__main__":
manager = load(standalone_mode=False)
if isinstance(manager, int):
# The user likely invoked --help
sys.exit(manager)
project_manager = manager.project_manager
dataset_manager = manager.dataset_manager
dimension_manager = manager.dimension_manager
dimension_mapping_manager = manager.dimension_mapping_manager
submitter = getpass.getuser()
|
the-stack_106_15502
|
import sqlite3
from sqlite3 import Error
def Connect(db_file):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
return conn
except Error as e:
print(e)
|
the-stack_106_15504
|
import click
import platform
import subprocess
import sys
import pipes
import shutil
import os
import getpass
# What is the container runtime for this platform?
if platform.system() == "Darwin":
container_tech = "docker"
container_runtime = "/usr/local/bin/docker"
elif platform.system() == "Windows":
container_tech = "docker"
container_runtime = shutil.which("docker.exe")
elif platform.system() == "Linux":
container_tech = "podman"
container_runtime = shutil.which("podman")
else:
print("Unknown operating system, defaulting to Docker")
container_tech = "docker"
container_runtime = shutil.which("docker")
# Define startupinfo for subprocesses
if platform.system() == "Windows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
def exec_container(args):
args = [container_runtime] + args
args_str = " ".join(pipes.quote(s) for s in args)
print("> " + args_str)
sys.stdout.flush()
# In Tails, tell the container runtime to download over Tor
if (
platform.system() == "Linux"
and getpass.getuser() == "amnesia"
and os.getuid() == 1000
):
env = os.environ.copy()
env["HTTP_PROXY"] = "socks5://127.0.0.1:9050"
else:
env = None
with subprocess.Popen(
args,
stdin=None,
stdout=sys.stdout,
stderr=sys.stderr,
bufsize=1,
universal_newlines=True,
startupinfo=startupinfo,
env=env,
) as p:
p.communicate()
return p.returncode
@click.group()
def container_main():
"""
Dangerzone container commands with elevated privileges.
Humans don't need to run this command by themselves.
"""
pass
@container_main.command()
@click.option("--container-name", default="docker.io/flmcode/dangerzone")
def ls(container_name):
"""docker image ls [container_name]"""
sys.exit(exec_container(["image", "ls", container_name]))
@container_main.command()
def pull():
"""docker pull flmcode/dangerzone"""
sys.exit(exec_container(["pull", "docker.io/flmcode/dangerzone"]))
@container_main.command()
@click.option("--document-filename", required=True)
@click.option("--pixel-dir", required=True)
@click.option("--container-name", default="docker.io/flmcode/dangerzone")
def documenttopixels(document_filename, pixel_dir, container_name):
"""docker run --network none -v [document_filename]:/tmp/input_file -v [pixel_dir]:/dangerzone [container_name] document-to-pixels"""
args = ["run", "--network", "none"]
# docker uses --security-opt, podman doesn't
if container_tech == "docker":
args += ["--security-opt=no-new-privileges:true"]
# :Z tells SELinux to relabel the volume content to match the container label
args += [
"-v",
f"{document_filename}:/tmp/input_file:Z",
"-v",
f"{pixel_dir}:/dangerzone:Z",
container_name,
"document-to-pixels",
]
sys.exit(exec_container(args))
@container_main.command()
@click.option("--pixel-dir", required=True)
@click.option("--safe-dir", required=True)
@click.option("--container-name", default="docker.io/flmcode/dangerzone")
@click.option("--ocr", required=True)
@click.option("--ocr-lang", required=True)
def pixelstopdf(pixel_dir, safe_dir, container_name, ocr, ocr_lang):
"""docker run --network none -v [pixel_dir]:/dangerzone -v [safe_dir]:/safezone [container_name] -e OCR=[ocr] -e OCR_LANGUAGE=[ocr_lang] pixels-to-pdf"""
sys.exit(
exec_container(
[
"run",
"--network",
"none",
"-v",
f"{pixel_dir}:/dangerzone:Z",
"-v",
f"{safe_dir}:/safezone:Z",
"-e",
f"OCR={ocr}",
"-e",
f"OCR_LANGUAGE={ocr_lang}",
container_name,
"pixels-to-pdf",
]
)
)
|
the-stack_106_15505
|
import random
import importlib
from functools import reduce
import numpy as np
import os
import re
import subprocess
import sys
from collections import defaultdict
import PIL
import psutil
import torch
import torchvision
from tabulate import tabulate
from datetime import datetime
import torch.distributed as dist
from torchvision import transforms
from thop import profile
from copy import deepcopy
def collate_func(batch_dic):
"""
自定义 collate_fn
:param batch_dic:
:return:
>>> dataloader = torch.utils.data.DataLoader(dataset,batch_size=batch_size, num_workers=nw,pin_memory=isTrain,shuffle=isTrain,collate_fn=collate_func)
"""
def get_pad_lrtb(pad_tb):
if pad_tb % 2 == 0:
pad_tb = pad_tb / 2
pad_t = pad_tb
pad_b = pad_tb
else:
pad_tb = (pad_tb-1) / 2
pad_t = pad_tb
pad_b = pad_tb + 1
return int(pad_t),int(pad_b)
resize_h,resize_w=cuda2cpu(torch.max(torch.tensor([dic[0].shape[1:] for dic in batch_dic]),0).values) # h,w
batch_dic_list0 = []
batch_dic_list1 = []
batch_dic_list2 = []
batch_dic_list3 = []
for dic0, dic1, dic2, dic3 in batch_dic:
dic_c, dic_h, dic_w = dic0.shape
pad_t,pad_b = get_pad_lrtb(resize_h - dic_h)
pad_l,pad_r = get_pad_lrtb(resize_w - dic_w)
pad = nn.ConstantPad2d(padding=(pad_l, pad_r, pad_t, pad_b),value=-1)
batch_dic_list0.append(pad(dic0))
batch_dic_list1.append(dic1)
batch_dic_list2.append(dic2)
batch_dic_list3.append(dic3)
res=[]
res.append(torch.stack(batch_dic_list0))
res.append(torch.tensor(batch_dic_list1))
res.append(batch_dic_list2)
res.append(torch.tensor(batch_dic_list3))
return res
def delete_tensor_one(tmp_x):
# 删除tensor等于-1的值
indexx = torch.where(tmp_x != -1)
shape_list = [index.unique().size()[0] for index in indexx]
shape_list[-1] = int(tmp_x[indexx].shape[0] / shape_list[0] / shape_list[1])
shape_max_index = reduce(lambda x, y: x * y, shape_list)
tmp_x2 = torch.reshape(tmp_x[indexx][:shape_max_index], shape_list)
return tmp_x2
def get_model_info(model, tsize=(640,640)): # h,w
"""计算模型的参数量和计算一张图片的计算量"""
stride = 64
img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device)
flops, params = profile(deepcopy(model), inputs=(img,), verbose=False)
params /= 1e6
flops /= 1e9
flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops
info = "Params: {:.6f}M, Gflops: {:.6f}".format(params, flops)
return info
def recursive_to(input, device):
"""将输入的值转到设备cpu或者gpu中"""
if isinstance(input, torch.Tensor):
return input.to(device)
if isinstance(input, dict):
for name in input:
if isinstance(input[name], torch.Tensor):
input[name] = input[name].to(device)
return input
if isinstance(input, list):
for i, item in enumerate(input):
input[i] = recursive_to(item, device)
return input
assert False
def init_seeds(seed=0):
"""eg:init_seeds(seed=0)"""
if seed is None:
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # gpu
# 设置随机种子 rank = -1
# 在神经网络中,参数默认是进行随机初始化的。如果不设置的话每次训练时的初始化都是随机的,
# 导致结果不确定。如果设置初始化,则每次初始化都是固定的
torch.manual_seed(seed) # cpu
def init_cudnn(reproducibility_training_speed=True):
# https://blog.csdn.net/weixin_42587961/article/details/109363698
# torch.backends.cudnn.deterministic将这个 flag 置为True的话,每次返回的卷积算法将是确定的,即默认算法
"""
reproducibility_training_speed为True表示训练速度加快,复现性;为false不能复现,提升网络性能
https://blog.csdn.net/byron123456sfsfsfa/article/details/96003317
设置 torch.backends.cudnn.benchmark=True 将会让程序在开始时花费一点额外时间,为整个网络的每个卷积层搜索最适合它的卷积实现算法,
进而实现网络的加速。适用场景是网络结构固定(不是动态变化的),网络的输入形状(包括 batch size,图片大小,输入的通道)是不变的,
其实也就是一般情况下都比较适用。反之,如果卷积层的设置一直变化,将会导致程序不停地做优化,反而会耗费更多的时间。
对于卷积这个操作来说,其实现方式是多种多样的。最简单的实现方式就是使用多层循环嵌套,对于每张输入图像,对于每个要输出的通道,对于每个输入的通道,选取一个区域,
同指定卷积核进行卷积操作,然后逐行滑动,直到整张图像都处理完毕,这个方法一般被称为 direct 法,
这个方法虽然简单,但是看到这么多循环,我们就知道效率在一般情况下不会很高了。除此之外,实现卷积层的算法还有基于 GEMM (General Matrix Multiply) 的,
基于 FFT 的,基于 Winograd 算法的等等,而且每个算法还有自己的一些变体。在一个开源的 C++ 库 triNNity 中,就实现了接近 80 种的卷积前向传播算法!
每种卷积算法,都有其特有的一些优势,比如有的算法在卷积核大的情况下,速度很快;比如有的算法在某些情况下内存使用比较小。
给定一个卷积神经网络(比如 ResNet-101),给定输入图片的尺寸,给定硬件平台,实现这个网络最简单的方法就是对所有卷积层都采用相同的卷积算法(比如 direct 算法),
但是这样运行肯定不是最优的;比较好的方法是,我们可以预先进行一些简单的优化测试,在每一个卷积层中选择最适合(最快)它的卷积算法,决定好每层最快的算法之后,
我们再运行整个网络,这样效率就会提升不少。
"""
if reproducibility_training_speed:
# 因此方便复现、提升训练速度就:
torch.backends.cudnn.benchmark = False
# 虽然通过torch.backends.cudnn.benchmark = False限制了算法的选择这种不确定性,但是由于,
# 算法本身也具有不确定性,因此可以通过设置:
torch.backends.cudnn.deterministic = True # consistent results on the cpu and gpu
else:
# 不需要复现结果、想尽可能提升网络性能:
torch.backends.cudnn.benchmark = True
def tensor_to_PIL(tensor):
# 输入tensor变量
# 输出PIL格式图片
unloader = transforms.ToPILImage()
image = tensor.cpu().clone()
image = image.squeeze(0)
image = unloader(image)
return image
def tensor_to_np(tensor):
#tensor转numpy
img = tensor.mul(255).byte()
img = img.cpu().numpy().squeeze(0).transpose((1, 2, 0))
return img
def cuda2cpu(pred):
"""
将cuda的torch变量转为cpu
eg:cuda2cpu(pred)"""
if not hasattr(pred, 'is_cuda'):
return pred
if pred.is_cuda:
pred_cpu = pred.cpu()
if not hasattr(pred_cpu, 'detach'):
pred_cpu = pred_cpu.numpy()
else:
pred_cpu = pred_cpu.detach().numpy()
else:
pred_cpu = pred.numpy()
return pred_cpu
# -------------收集环境的版本等start-------------------#
def collect_torch_env():
try:
import torch.__config__
return torch.__config__.show()
except ImportError:
# compatible with older versions of pytorch
from torch.utils.collect_env import get_pretty_env_info
return get_pretty_env_info()
def getMemCpu():
data = psutil.virtual_memory()
total = data.total # 总内存,单位为byte
print('total', total)
free = data.available # 可用内存
print('free', free)
memory = "Memory usage:%d" % (int(round(data.percent))) + "%" + " " # 内存使用情况
print('memory', memory)
cpu = "CPU:%0.2f" % psutil.cpu_percent(interval=1) + "%" # CPU占用情况
print('cpu', cpu)
def get_env_module():
var_name = "DETECTRON2_ENV_MODULE"
return var_name, os.environ.get(var_name, "<not set>")
def detect_compute_compatibility(CUDA_HOME, so_file):
try:
cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump")
if os.path.isfile(cuobjdump):
output = subprocess.check_output(
"'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True
)
output = output.decode("utf-8").strip().split("\n")
arch = []
for line in output:
line = re.findall(r"\.sm_([0-9]*)\.", line)[0]
arch.append(".".join(line))
arch = sorted(set(arch))
return ", ".join(arch)
else:
return so_file + "; cannot find cuobjdump"
except Exception:
# unhandled failure
return so_file
def collect_env_info():
"""查看cuda cudnn torch 等版本是多少"""
has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM
torch_version = torch.__version__
# NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
has_rocm = False
if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None):
has_rocm = True
has_cuda = has_gpu and (not has_rocm)
data = []
data.append(("sys.platform", sys.platform)) # check-template.yml depends on it
data.append(("Python", sys.version.replace("\n", "")))
data.append(("numpy", np.__version__))
try:
import detectron2 # noqa
data.append(
("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__))
)
except ImportError:
data.append(("detectron2", "failed to import"))
try:
import detectron2._C as _C
except ImportError as e:
data.append(("detectron2._C", f"not built correctly: {e}"))
# print system compilers when extension fails to build
if sys.platform != "win32": # don't know what to do for windows
try:
# this is how torch/utils/cpp_extensions.py choose compiler
cxx = os.environ.get("CXX", "c++")
cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True)
cxx = cxx.decode("utf-8").strip().split("\n")[0]
except subprocess.SubprocessError:
cxx = "Not found"
data.append(("Compiler ($CXX)", cxx))
if has_cuda and CUDA_HOME is not None:
try:
nvcc = os.path.join(CUDA_HOME, "bin", "nvcc")
nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True)
nvcc = nvcc.decode("utf-8").strip().split("\n")[-1]
except subprocess.SubprocessError:
nvcc = "Not found"
data.append(("CUDA compiler", nvcc))
else:
# print compilers that are used to build extension
data.append(("Compiler", _C.get_compiler_version()))
data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip
if has_cuda and getattr(_C, "has_cuda", lambda: True)():
data.append(
("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__))
)
data.append(get_env_module())
data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__)))
data.append(("PyTorch debug build", torch.version.debug))
data.append(("GPU available", has_gpu))
if has_gpu:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k)))
name = torch.cuda.get_device_name(k) + f" (arch={cap})"
devices[name].append(str(k))
for name, devids in devices.items():
data.append(("GPU " + ",".join(devids), name))
if has_rocm:
msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else ""
data.append(("ROCM_HOME", str(ROCM_HOME) + msg))
else:
msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else ""
data.append(("CUDA_HOME", str(CUDA_HOME) + msg))
cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
if cuda_arch_list:
data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list))
data.append(("Pillow", PIL.__version__))
try:
data.append(
(
"torchvision",
str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__),
)
)
if has_cuda:
try:
torchvision_C = importlib.util.find_spec("torchvision._C").origin
msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)
data.append(("torchvision arch flags", msg))
except ImportError:
data.append(("torchvision._C", "Not found"))
except AttributeError:
data.append(("torchvision", "unknown"))
try:
import fvcore
data.append(("fvcore", fvcore.__version__))
except ImportError:
pass
try:
import cv2
data.append(("cv2", cv2.__version__))
except ImportError:
data.append(("cv2", "Not found"))
env_str = tabulate(data) + "\n"
env_str += collect_torch_env()
return env_str
# -------------收集环境的版本等end-------------------#
def view_version_cuda_torch():
os.system('cat /usr/local/cuda/version.txt')
os.system('cat /etc/issue')
os.system('cat /proc/cpuinfo | grep name | sort | uniq')
# os.system('whereis cudnn')
try:
head_file = open('/usr/local/cuda/include/cudnn.h')
except:
head_file = open('/usr/include/cudnn.h')
lines = head_file.readlines()
for line in lines:
line = line.strip()
if line.startswith('#define CUDNN_MAJOR'):
line = line.split('#define CUDNN_MAJOR')
n1 = int(line[1])
continue
if line.startswith('#define CUDNN_MINOR'):
line = line.split('#define CUDNN_MINOR')
n2 = int(line[1])
continue
if line.startswith('#define CUDNN_PATCHLEVEL'):
line = line.split('#define CUDNN_PATCHLEVEL')
n3 = int(line[1])
break
print("torch version", torch.__version__)
print("torchvision version", torchvision.__version__)
print("CUDA version", torch.version.cuda)
print("CUDNN version", torch.backends.cudnn.version())
print('CUDNN Version ', str(n1) + '.' + str(n2) + '.' + str(n3))
def select_device(device='',batch_size=None):
"""选择训练设备
eg:select_device("0")"""
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = f'Using torch {torch.__version__} '
for i in range(0, ng):
if i == 1:
s = ' ' * len(s)
print("%sCUDA:%g (%s, %dMB)" % (s, i, x[i].name, x[i].total_memory / c))
else:
print(f'Using torch {torch.__version__} CPU')
if cuda:
if "," in device:
return torch.device('cuda:0') #如果是多卡那么返回第一张卡
else:
return torch.device('cuda:{}'.format(device)) # 如果单卡并且是指定的卡号,那么直接返回
else:
return torch.device("cpu")
"""
torch.distributed.get_backend(group=group) # group是可选参数,返回字符串表示的后端 group表示的是ProcessGroup类
torch.distributed.get_rank(group=group) # group是可选参数,返回int,执行该脚本的进程的rank
torch.distributed.get_world_size(group=group) # group是可选参数,返回全局的整个的进程数
torch.distributed.is_initialized() # 判断该进程是否已经初始化
torch.distributed.is_mpi_avaiable() # 判断MPI是否可用
torch.distributed.is_nccl_avaiable() # 判断nccl是否可用
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
classes = labels # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurences per class
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class mAPs
n = len(labels)
class_counts = np.array([np.bincount([labels[i]], minlength=nc) for i in range(n)])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
if __name__ == '__main__':
# from python_developer_tools.cv.utils.torch_utils import collect_env_info
print(collect_env_info())
getMemCpu()
|
the-stack_106_15506
|
from flask import g
import uuid
import hashlib
from expungeservice.database.db_util import rollback_errors
from expungeservice.models.expungement_result import EligibilityStatus
from flask_login import current_user
def save_result(request_data, record):
user_id = current_user.user_id
search_param_string = (
user_id +
request_data["first_name"] +
request_data["last_name"] +
request_data["middle_name"] +
request_data["birth_date"])
hashed_search_params = hashlib.sha256(search_param_string.encode()).hexdigest()
num_charges = len(record.charges)
num_eligible_charges = len([ c for c in record.charges if
c.expungement_result.type_eligibility.status == EligibilityStatus.ELIGIBLE])
_db_insert_result(
g.database, user_id, hashed_search_params, num_charges, num_eligible_charges)
@rollback_errors
def _db_insert_result(database, user_id, hashed_search_params, num_charges, num_eligible_charges):
database.cursor.execute(
"""
INSERT INTO SEARCH_RESULTS(search_result_id, user_id, hashed_search_params, num_charges, num_eligible_charges )
VALUES ( uuid_generate_v4(), %(user_id)s, %(params)s, %(num_charges)s, %(num_eligible_charges)s);
""", {'user_id': uuid.UUID(user_id).hex, 'params': hashed_search_params, 'num_charges': num_charges,
'num_eligible_charges': num_eligible_charges})
|
the-stack_106_15507
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Conjugate Gradient algorithm."""
import logging
from scipy.optimize import minimize
from qiskit.aqua.components.optimizers import Optimizer
logger = logging.getLogger(__name__)
class CG(Optimizer):
"""Conjugate Gradient algorithm.
Uses scipy.optimize.minimize CG
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
"""
CONFIGURATION = {
'name': 'CG',
'description': 'CG Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/draft-07/schema#',
'id': 'cg_schema',
'type': 'object',
'properties': {
'maxiter': {
'type': 'integer',
'default': 20
},
'disp': {
'type': 'boolean',
'default': False
},
'gtol': {
'type': 'number',
'default': 1e-05
},
'tol': {
'type': ['number', 'null'],
'default': None
},
'eps': {
'type': 'number',
'default': 1.4901161193847656e-08
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.supported,
'bounds': Optimizer.SupportLevel.ignored,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['maxiter', 'disp', 'gtol', 'eps'],
'optimizer': ['local']
}
# pylint: disable=unused-argument
def __init__(self, maxiter=20, disp=False, gtol=1e-5, tol=None, eps=1.4901161193847656e-08):
"""
Constructor.
For details, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Args:
maxiter (int): Maximum number of iterations to perform.
disp (bool): Set to True to print convergence messages.
gtol (float): Gradient norm must be less than gtol before successful termination.
tol (float or None): Tolerance for termination.
eps (float): If jac is approximated, use this value for the step size.
"""
self.validate(locals())
super().__init__()
for k, v in locals().items():
if k in self._configuration['options']:
self._options[k] = v
self._tol = tol
def optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function,
variable_bounds, initial_point)
if gradient_function is None and self._max_evals_grouped > 1:
epsilon = self._options['eps']
gradient_function = Optimizer.wrap_function(Optimizer.gradient_num_diff,
(objective_function, epsilon,
self._max_evals_grouped))
res = minimize(objective_function, initial_point, jac=gradient_function,
tol=self._tol, method="CG", options=self._options)
return res.x, res.fun, res.nfev
|
the-stack_106_15508
|
# coding=utf-8
"""
Module to build backbone modules based on passed inputs.
"""
import tensorflow as tf
import deepreg.model.loss.deform as deform_loss
import deepreg.model.loss.image as image_loss
import deepreg.model.loss.label as label_loss
from deepreg.model.backbone.global_net import GlobalNet
from deepreg.model.backbone.local_net import LocalNet
from deepreg.model.backbone.u_net import UNet
def build_backbone(
image_size: tuple, out_channels: int, model_config: dict, method_name: str
) -> tf.keras.Model:
"""
Backbone model accepts a single input of shape (batch, dim1, dim2, dim3, ch_in)
and returns a single output of shape (batch, dim1, dim2, dim3, ch_out)
:param image_size: tuple, dims of image, (dim1, dim2, dim3)
:param out_channels: int, number of out channels, ch_out
:param method_name: str, one of ddf | dvf | conditional
:param model_config: dict, model configuration, returned from parser.yaml.load
:return: tf.keras.Model
"""
if not (
(isinstance(image_size, tuple) or isinstance(image_size, list))
and len(image_size) == 3
):
raise ValueError(f"image_size must be tuple of length 3, got {image_size}")
if not (isinstance(out_channels, int) and out_channels >= 1):
raise ValueError(f"out_channels must be int >=1, got {out_channels}")
if not (isinstance(model_config, dict) and "backbone" in model_config.keys()):
raise ValueError(
f"model_config must be a dict having key 'backbone', got{model_config}"
)
if method_name not in ["ddf", "dvf", "conditional", "affine"]:
raise ValueError(
"method name has to be one of ddf/dvf/conditional/affine in build_backbone, "
"got {}".format(method_name)
)
if method_name in ["ddf", "dvf"]:
out_activation = None
# TODO try random init with smaller number
out_kernel_initializer = "zeros" # to ensure small ddf and dvf
elif method_name in ["conditional"]:
out_activation = "sigmoid" # output is probability
out_kernel_initializer = "glorot_uniform"
elif method_name in ["affine"]:
out_activation = None
out_kernel_initializer = "zeros"
else:
raise ValueError("Unknown method name {}".format(method_name))
if model_config["backbone"] == "local":
return LocalNet(
image_size=image_size,
out_channels=out_channels,
out_kernel_initializer=out_kernel_initializer,
out_activation=out_activation,
**model_config["local"],
)
elif model_config["backbone"] == "global":
return GlobalNet(
image_size=image_size,
out_channels=out_channels,
out_kernel_initializer=out_kernel_initializer,
out_activation=out_activation,
**model_config["global"],
)
elif model_config["backbone"] == "unet":
return UNet(
image_size=image_size,
out_channels=out_channels,
out_kernel_initializer=out_kernel_initializer,
out_activation=out_activation,
**model_config["unet"],
)
else:
raise ValueError("Unknown model name")
def build_inputs(
moving_image_size: tuple,
fixed_image_size: tuple,
index_size: int,
batch_size: int,
labeled: bool,
) -> [tf.keras.Input, tf.keras.Input, tf.keras.Input, tf.keras.Input, tf.keras.Input]:
"""
Configure a pair of moving and fixed images and a pair of
moving and fixed labels as model input
and returns model input tf.keras.Input
:param moving_image_size: tuple, dims of moving images, [m_dim1, m_dim2, m_dim3]
:param fixed_image_size: tuple, dims of fixed images, [f_dim1, f_dim2, f_dim3]
:param index_size: int, dataset size (number of images)
:param batch_size: int, mini-batch size
:param labeled: Boolean, true if we have label data
:return: 5 (if labeled=True) or 3 (if labeled=False) tf.keras.Input objects
"""
moving_image = tf.keras.Input(
shape=(*moving_image_size,), batch_size=batch_size, name="moving_image"
) # (batch, m_dim1, m_dim2, m_dim3)
fixed_image = tf.keras.Input(
shape=(*fixed_image_size,), batch_size=batch_size, name="fixed_image"
) # (batch, f_dim1, f_dim2, f_dim3)
moving_label = (
tf.keras.Input(
shape=(*moving_image_size,), batch_size=batch_size, name="moving_label"
)
if labeled
else None
) # (batch, m_dim1, m_dim2, m_dim3)
fixed_label = (
tf.keras.Input(
shape=(*fixed_image_size,), batch_size=batch_size, name="fixed_label"
)
if labeled
else None
) # (batch, m_dim1, m_dim2, m_dim3)
indices = tf.keras.Input(
shape=(index_size,), batch_size=batch_size, name="indices"
) # (batch, 2)
return moving_image, fixed_image, moving_label, fixed_label, indices
def add_ddf_loss(
model: tf.keras.Model, ddf: tf.Tensor, loss_config: dict
) -> tf.keras.Model:
"""
add regularization loss of ddf into model
:param model: tf.keras.Model
:param ddf: tensor of shape (batch, m_dim1, m_dim2, m_dim3, 3)
:param loss_config: config for loss
"""
loss_reg = tf.reduce_mean(
deform_loss.local_displacement_energy(ddf, **loss_config["regularization"])
)
weighted_loss_reg = loss_reg * loss_config["regularization"]["weight"]
model.add_loss(weighted_loss_reg)
model.add_metric(loss_reg, name="loss/regularization", aggregation="mean")
model.add_metric(
weighted_loss_reg, name="loss/weighted_regularization", aggregation="mean"
)
return model
def add_image_loss(
model: tf.keras.Model,
fixed_image: tf.Tensor,
pred_fixed_image: tf.Tensor,
loss_config: dict,
) -> tf.keras.Model:
"""
add image dissimilarity loss of ddf into model
:param model: tf.keras.Model
:param fixed_image: tensor of shape (batch, f_dim1, f_dim2, f_dim3)
:param pred_fixed_image: tensor of shape (batch, f_dim1, f_dim2, f_dim3)
:param loss_config: config for loss
"""
if loss_config["dissimilarity"]["image"]["weight"] > 0:
loss_image = tf.reduce_mean(
image_loss.dissimilarity_fn(
y_true=fixed_image,
y_pred=pred_fixed_image,
**loss_config["dissimilarity"]["image"],
)
)
weighted_loss_image = (
loss_image * loss_config["dissimilarity"]["image"]["weight"]
)
model.add_loss(weighted_loss_image)
model.add_metric(
loss_image, name="loss/image_dissimilarity", aggregation="mean"
)
model.add_metric(
weighted_loss_image,
name="loss/weighted_image_dissimilarity",
aggregation="mean",
)
return model
def add_label_loss(
model: tf.keras.Model,
grid_fixed: tf.Tensor,
fixed_label: (tf.Tensor, None),
pred_fixed_label: (tf.Tensor, None),
loss_config: dict,
) -> tf.keras.Model:
"""
add label dissimilarity loss of ddf into model
:param model: tf.keras.Model
:param grid_fixed: tensor of shape (f_dim1, f_dim2, f_dim3, 3)
:param fixed_label: tensor of shape (batch, f_dim1, f_dim2, f_dim3)
:param pred_fixed_label: tensor of shape (batch, f_dim1, f_dim2, f_dim3)
:param loss_config: config for loss
"""
if fixed_label is not None:
loss_label = tf.reduce_mean(
label_loss.get_dissimilarity_fn(
config=loss_config["dissimilarity"]["label"]
)(y_true=fixed_label, y_pred=pred_fixed_label)
)
weighted_loss_label = (
loss_label * loss_config["dissimilarity"]["label"]["weight"]
)
model.add_loss(weighted_loss_label)
model.add_metric(
loss_label, name="loss/label_dissimilarity", aggregation="mean"
)
model.add_metric(
weighted_loss_label,
name="loss/weighted_label_dissimilarity",
aggregation="mean",
)
# metrics
dice_binary = label_loss.dice_score(
y_true=fixed_label, y_pred=pred_fixed_label, binary=True
)
dice_float = label_loss.dice_score(
y_true=fixed_label, y_pred=pred_fixed_label, binary=False
)
tre = label_loss.compute_centroid_distance(
y_true=fixed_label, y_pred=pred_fixed_label, grid=grid_fixed
)
foreground_label = label_loss.foreground_proportion(y=fixed_label)
foreground_pred = label_loss.foreground_proportion(y=pred_fixed_label)
model.add_metric(dice_binary, name="metric/dice_binary", aggregation="mean")
model.add_metric(dice_float, name="metric/dice_float", aggregation="mean")
model.add_metric(tre, name="metric/tre", aggregation="mean")
model.add_metric(
foreground_label, name="metric/foreground_label", aggregation="mean"
)
model.add_metric(
foreground_pred, name="metric/foreground_pred", aggregation="mean"
)
return model
|
the-stack_106_15509
|
import pirel.pcells as pc
import pirel.tools as pt
import phidl.geometry as pg
from phidl.device_layout import Port,CellArray,Device,DeviceReference
from pirel.tools import *
import pandas as pd
import warnings
from copy import copy
def Scaled(cls):
''' Class Decorator that accept normalized parameters for resonator designs.
Descaling rules:
IDT gap (d) = IDT gap (n) * pitch
Bus length (d) = Bus length (n) * pitch
Etch pit width (d) = Etch pit width (d) * active region width
Anchor width (d) = Anchor width (n) * active region width
Anchor length (d) = Anchor length (n) * pitch
Anchor Margin Y (d) = Anchor Margin Y (n) * Anchor length
Anchor Margin X (d) = Anchor Margin X (n) * Anchor width.
Parameters
----------
cls : class
pass class of resonator to be decorated.
(i.e. Scaled(LFE)(name="normalized LFE")).
'''
class Scaled(cls):
def __init__(self,*args,**kwargs):
cls.__init__(self,*args,**kwargs)
self._normalized=False
def import_params(self,df):
self._normalize()
cls.import_params(self,df)
self._denormalize()
def export_params(self):
self._normalize()
df=cls.export_params(self)
self._denormalize()
return df
def _normalize(self):
if self._normalized==True:
raise ValueError("Already normalized")
p=self.idt.pitch
active_area_x=self.idt.active_area.x
anchor_x=self.anchor.size.x
anchor_y=self.anchor.size.y
self.idt.y_offset=self.idt.y_offset/p
# self.idt.length=self.idt.length/p
self.bus.size=Point(self.bus.size.x,self.bus.size.y/p)
self.etchpit.x=self.etchpit.x/active_area_x
self.anchor.size=Point(\
self.anchor.size.x/active_area_x,\
self.anchor.size.y/p)
self._normalized=True
def _denormalize(self):
if self._normalized==False:
raise ValueError("Already denormalized")
p=self.idt.pitch
self.idt.y_offset=self.idt.y_offset*p
self.bus.size=Point(self.bus.size.x,self.bus.size.y*p)
active_area_x=self.idt.active_area.x
self.etchpit.x=self.etchpit.x*active_area_x
self.anchor.size=Point(\
self.anchor.size.x*active_area_x,\
self.anchor.size.y*p)
self._normalized=False
return self
Scaled.__name__=" ".join(["Scaled",cls.__name__])
# Scaled.draw=cached(Scaled)(Scaled.draw)
return Scaled
def addVia(cls,side='top',bottom_conn=False):
''' Class decorator to add vias to resonators.
you can select side (top,bottom or both) and if keep the bottom pad of the via.
Parameters
----------
cls : class
the class that needs vias
side : 'top','bottom', or iterable of both
decides the port to attach vias to.
bottom_conn : boolean
as the vias are drawn by default with top-bottom pads, you can decide here to
remove the second pad by default from all the designs.
Attributes
----------
via : PyResLayout.Via
instance of a PyResLayout.Via class
pad_layers : lenght 2 iterable of int
top/bottom layers to draw vias pads
over_via : float
ratio pad size / via size
via_distance : float
y distance between connecting port and via center
via_area : PyResLayout.Point
size (in coordinates) to be filled with vias.
'''
if isinstance(side,str):
side=[side]
side=[(_).lower() for _ in side]
class addVia(cls):
over_via=LayoutParamInterface()
via_distance=LayoutParamInterface()
via_area=LayoutParamInterface()
pad_layers=LayoutParamInterface()
def __init__(self,*args,**kwargs):
cls.__init__(self,*args,**kwargs)
self.pad_layers=[LayoutDefault.layerTop,LayoutDefault.layerBottom]
self.over_via=LayoutDefault.addVia_over_via
self.via_distance=LayoutDefault.addVia_via_distance
self.via_area=LayoutDefault.addVia_via_area
def draw(self):
cell=Device(name=self.name)
super_ref=cell.add_ref(cls.draw(self))
nvias_x,nvias_y=self.n_vias
unit_cell=self._draw_padded_via()
viacell=join(CellArray(unit_cell,\
columns=nvias_x,rows=nvias_y,\
spacing=(unit_cell.xsize,unit_cell.ysize)))
viacell.add_port(Port(name='conn',\
midpoint=(viacell.x,viacell.ymax),\
width=viacell.xsize,\
orientation=90))
for sides in side:
for p_name in super_ref.ports.keys():
if re.search(sides,p_name):
p=super_ref.ports[p_name]
pad=pg.compass(size=(p.width,self.via_distance),layer=self.pad_layers[0])
if sides=='top':
self._attach_instance(cell, pad, pad.ports['S'], viacell,p)
if sides=='bottom':
self._attach_instance(cell, pad, pad.ports['N'], viacell,p)
for p_name,p_value in super_ref.ports.items():
cell.add_port(p_value)
return cell
def export_params(self):
t=cls.export_params(self)
pop_all_dict(t,['ViaName'])
return t
def _bbox_mod(self,bbox):
LayoutPart._bbox_mod(self,bbox)
ll=Point(bbox[0])
ur=Point(bbox[1])
nvias_x,nvias_y=self.n_vias
if any([_=='top' for _ in side]):
ur=ur-Point(0,float(self.via.size*self.over_via*nvias_y+self.via_distance))
if any([_=='bottom' for _ in side]):
ll=ll+Point(0,float(self.via.size*self.over_via*nvias_y+self.via_distance))
return (ll.coord,ur.coord)
def _draw_padded_via(self):
viaref=DeviceReference(self.via.draw())
size=float(self.via.size*self.over_via)
port=viaref.ports['conn']
trace=pg.rectangle(size=(size,size),layer=self.pad_layers[0])
trace.move(origin=trace.center,\
destination=viaref.center)
trace2=pg.copy_layer(trace,layer=self.pad_layers[0],new_layer=self.pad_layers[1])
cell=Device(self.name)
cell.absorb(cell<<trace)
cell.absorb(cell<<trace2)
cell.add(viaref)
port.midpoint=(port.midpoint[0],cell.ymax)
port.width=size
cell.add_port(port)
if bottom_conn==False:
cell.remove_layers(layers=[self.pad_layers[1]])
return cell
def _attach_instance(self,cell,padcell,padport,viacell,port):
padref=cell<<padcell
padref.connect(padport,\
destination=port)
cell.absorb(padref)
viaref=cell.add_ref(viacell,alias=self.name+'Via'+port.name)
viaref.connect(viacell.ports["conn"],\
destination=port,\
overlap=-self.via_distance)
return port
@staticmethod
def get_components():
supercomp=copy(cls.get_components())
supercomp.update({"Via":pc.Via})
return supercomp
@property
def n_vias(self):
import numpy as np
nvias_x=max(1,int(np.floor(self.via_area.x/self.via.size/self.over_via)))
nvias_y=max(1,int(np.floor(self.via_area.y/self.via.size/self.over_via)))
return nvias_x,nvias_y
addVia.__name__=" ".join([cls.__name__,"w Via"])
# addVia.draw=cached(addVia)(addVia.draw)
return addVia
def addPad(cls):
''' Class decorator to add probing pads to existing cells.
Parameters
----------
cls : PyResLayout.LayoutPart
design where pads have to be added
Attributes
----------
pad : PyResLayout.Pad
pad design for the cell
The pad design needs a port to attach to the existing cell,
see help for more info.
'''
class addPad(cls):
def __init__(self,*args,**kwargs):
cls.__init__(self,*args,**kwargs)
def draw(self):
cell=Device()
cell.name=self.name
d_ref=cell.add_ref(cls.draw(self))
for name,port in d_ref.ports.items():
self.pad.port=port
pad_ref=cell.add_ref(self.pad.draw())
pad_ref.connect(pad_ref.ports['conn'],
destination=port)
cell.absorb(pad_ref)
cell.add_port(port,name)
return cell
@staticmethod
def get_components():
supercomp=copy(cls.get_components())
supercomp.update({"Pad":pc.Pad})
return supercomp
@property
def resistance_squares(self):
r0=super().resistance_squares
for port in cls.draw(self).get_ports():
self.pad.port=port
r0=r0+self.pad.resistance_squares
return r0
# addPad.draw=cached(addPad)(addPad.draw)
addPad.__name__=" ".join([cls.__name__,"w Pad"])
return addPad
def addPartialEtch(cls):
class addPartialEtch(cls):
@staticmethod
def get_components():
original_comp=copy(cls.get_components())
for compname,comp_value in original_comp.items():
if comp_value==pc.IDT:
original_comp[compname]=pc.PartialEtchIDT
break
else:
raise TypeError(f" no IDT to modify in {cls.__name__}")
return original_comp
return addPartialEtch
# addPartialEtch.draw=cached(addPartialEtch)(addPartialEtch.draw)
adddPartialEtch.__name__=cls.__name__+' w PartialEtching'
def addProbe(cls,probe=pc.GSGProbe):
class addProbe(cls):
gnd_routing_width=LayoutParamInterface()
def __init__(self,*args,**kwargs):
cls.__init__(self,*args,**kwargs)
self.gnd_routing_width=100.0
self._setup_routings()
def draw(self):
self._setup_routings()
device_cell=cls.draw(self)
probe_cell=self.probe.draw()
cell=Device(name=self.name)
cell.add_ref(device_cell, alias=self.name+"Device")
probe_ref=cell.add_ref(probe_cell, alias=self.name+"Probe")
self._move_probe_ref(probe_ref)
cell.add_ref(self._draw_probe_routing(),alias=self.name+"GndTrace")
return cell
def export_params(self):
t=cls.export_params(self)
pop_all_dict(t, ["ProbeName"])
pop_all_dict(t, [k for k in t if re.search('SigTrace',k) ])
pop_all_dict(t, [k for k in t if re.search('GndLeftTrace',k) ])
pop_all_dict(t, [k for k in t if re.search('GndRightTrace',k) ])
return t
def export_all(self):
df=super().export_all()
df["DUTResistance"]=super().resistance_squares
df["ProbeResistance"]=self.probe_resistance_squares
return df
@property
def resistance_squares(self):
return super().resistance_squares
@staticmethod
def get_components():
supercomp=copy(cls.get_components())
if issubclass(probe,pc.GSGProbe):
supercomp.update({
"Probe":probe,
"SigTrace":pc.ParasiticAwareMultiRouting,
"GndLeftTrace":pc.MultiRouting,
"GndRightTrace":pc.MultiRouting})
else:
raise ValueError("To be implemented")
return supercomp
@property
def probe_resistance_squares(self):
return 0
@property
def probe_dut_distance(self):
return Point(0,self.idt.active_area.x/2)
def _move_probe_ref(self,probe_ref):
probe_dut_distance=self.probe_dut_distance
device_cell=cls.draw(self)
for p_name in device_cell.ports.keys():
if re.search('bottom',p_name):
probe_ref.move(origin=(probe_ref.center[0],probe_ref.ymax),\
destination=(\
device_cell.center[0],\
device_cell.ports[p_name].midpoint[1]-probe_dut_distance.y))
break
else:
raise ValueError(f"no bottom port in {cls.__name__} cell")
return probe_ref
def _setup_routings(self):
device_cell=cls.draw(self)
probe_cell=self.probe.draw()
probe_ref=self._move_probe_ref(DeviceReference(probe_cell))
bbox=super()._bbox_mod(device_cell.bbox)
if isinstance(self.probe,pc.GSGProbe):
for index,groundroute in enumerate([self.gndlefttrace,self.gndrighttrace]):
groundroute.layer=self.probe.layer
groundroute.clearance=bbox
groundroute.trace_width=self.gnd_routing_width
if index==0:
groundroute.side='left'
groundroute.source=(probe_ref.ports['gnd_left'],)
elif index==1:
groundroute.side='right'
groundroute.source=(probe_ref.ports['gnd_right'],)
device_ports=device_cell.ports
dut_port_top=[]
for port_name in device_ports.keys():
if re.search('top',port_name):
dut_port_top.append(device_ports[port_name])
groundroute.destination=tuple(dut_port_top)
#signal routing
signalroute=self.sigtrace
for p_name in device_cell.ports.keys():
if re.search('bottom',p_name):
signalroute.trace_width=device_cell.ports[p_name].width
break
else:
raise ValueError(f"no bottom port in {cls.__name__} cell")
bottom_ports=[]
for port_name in device_ports.keys():
if re.search('bottom',port_name):
bottom_ports.append(device_ports[port_name])
signalroute.layer=self.probe.layer
signalroute.clearance=bbox
signalroute.source=(probe_ref.ports['sig'],)
signalroute.destination=tuple(bottom_ports)
elif isinstance(self.probe,pc.GSProbe):
raise ValueError("addProbe with GSprobe to be implemented ")
else:
raise ValueError("addProbe without GSG/GSprobe to be implemented ")
def _draw_probe_routing(self):
if isinstance(self.probe,pc.GSGProbe):
routing_cell=Device()
routing_cell<<self.gndlefttrace.draw()
routing_cell<<self.gndrighttrace.draw()
routing_cell<<self.sigtrace.draw()
return routing_cell
else :
raise ValueError("To be implemented")
addProbe.__name__=" ".join([cls.__name__,"w Probe"])
# addProbe.draw=cached(addProbe)(addProbe.draw)
return addProbe
def addLargeGnd(probe):
class addLargeGnd(probe):
ground_size=LayoutParamInterface()
pad_position=LayoutParamInterface('top','side')
def __init__(self,*args,**kwargs):
probe.__init__(self,*args,**kwargs)
self.ground_size=LayoutDefault.GSGProbe_LargePadground_size
self.pad_position='side'
def draw(self):
cell=pt.Device(name=self.name)
oldprobe=cell<<probe.draw(self)
cell.absorb(oldprobe)
groundpad=pg.compass(size=(self.ground_size,self.ground_size),\
layer=self.layer)
[_,_,ul,ur]=get_corners(groundpad)
for name,p in oldprobe.ports.items():
name=p.name
if 'gnd' in name:
groundref=cell<<groundpad
if 'left' in name:
groundref.move(origin=ur.coord,\
destination=p.endpoints[1])
left_port=groundref.ports['N']
elif 'right' in name:
groundref.move(origin=ul.coord,\
destination=p.endpoints[0])
right_port=groundref.ports['N']
cell.absorb(groundref)
else :
cell.add_port(p)
for name,port in oldprobe.ports.items():
if 'gnd' in name:
if 'left' in name:
if self.pad_position=='side':
left_port=Port(name=name,\
midpoint=(left_port.midpoint[0]+self.ground_size/2,\
left_port.midpoint[1]-self.ground_size/2),\
orientation=180,\
width=self.ground_size)
elif self.pad_position=='top':
left_port=Port(name=name,\
midpoint=(left_port.midpoint[0],\
left_port.midpoint[1]),\
orientation=90,\
width=self.ground_size)
else :
raise ValueError(f"New pad position is {self.pad_position} : not acceptable")
cell.add_port(left_port)
elif 'right' in name:
if self.pad_position=='side':
right_port=Port(name=name,\
midpoint=(right_port.midpoint[0]-self.ground_size/2,\
right_port.midpoint[1]-self.ground_size/2),\
orientation=0,\
width=self.ground_size)
elif self.pad_position=='top':
right_port=Port(name=name,\
midpoint=(right_port.midpoint[0],\
right_port.midpoint[1]),\
orientation=90,\
width=self.ground_size)
else :
raise ValueError(f"New pad position is {self.pad_position} : not acceptable")
cell.add_port(right_port)
return cell
# addLargeGnd.draw=pt.cached(addLargeGnd)(addLargeGnd.draw)
addLargeGnd.__name__=" ".join([probe.__name__,"w Large Ground"])
return addLargeGnd
def array(cls,n=2):
if not isinstance(n,int):
raise ValueError(" n needs to be integer")
class array(cls):
n_blocks=LayoutParamInterface()
def __init__(self,*args,**kwargs):
cls.__init__(self,*args,**kwargs)
self.n_blocks=n
def draw(self):
unit_cell=cls.draw(self)
port_names=list(unit_cell.ports.keys())
cell=draw_array(unit_cell,\
self.n_blocks,1)
cell.name=self.name
return cell
@property
def resistance_squares(self):
r=super().resistance_squares
cell=cls.draw(self)
for p in cell.get_ports():
if 'bottom' in p.name:
p_bot=p
break
w=p_bot.width
l=w
n_blocks=self.n_blocks
if n_blocks==1:
return r+l/w
else:
x_dist=self.idt.active_area.x+self.etchpit.x*2
if n_blocks%2==1 :
return parallel_res(r+l/w,(r+2*x_dist/l)/(n_blocks-1))
if n_blocks%2==0 :
if n_blocks==2:
return (r+x_dist/l)/2
else:
return parallel_res((r+x_dist/l)/2,(r+2*x_dist/l)/(n_blocks-2))
def export_all(self):
df=super().export_all()
df["SingleDeviceResistance"]=super().resistance_squares
return df
array.__name__= " ".join([f"{n} array of",cls.__name__])
return array
def fixture(cls,style='open'):
class fixture(cls):
style=LayoutParamInterface('short','open')
def __init__(self,*a,**k):
super().__init__(*a,**k)
self.style=style
def draw(self):
supercell=cls.draw(self)
cell=pg.deepcopy(supercell)
style=self.style
ports=supercell.ports
for subcell in cell.get_dependencies(recursive=True):
if 'IDT' in subcell.aliases:
idt_parent=subcell
idt_cell=subcell['IDT']
for alias in cell.aliases.keys():
if 'IDT' in alias:
idt_parent=cell
idt_cell=cell['IDT']
if style=='open':
idt_parent.remove(idt_cell)
if style=='short':
top_port=idt_cell.ports['top']
bottom_port=idt_cell.ports['bottom']
short=pg.taper(length=top_port.y-bottom_port.y,\
width1=top_port.width,\
width2=bottom_port.width,layer=self.idt.layer)
s_ref=cell<<short
s_ref.connect(short.ports[1],\
destination=top_port)
s_ref.rotate(center=top_port.center,\
angle=180)
cell.absorb(s_ref)
return cell
@property
def resistance_squares(self):
style=self.style
if style=='open':
from numpy import Inf
return 1e9
elif style=='short':
cell=cls.draw(self)
ports=cell.get_ports()
top_port=cell.ports['top']
bottom_port=cell.ports['bottom']
l=top_port.y-bottom_port.y
w=(top_port.width+bottom_port.width)/2
return l/w
fixture.__name__=f"fixture for {cls.__name__}"
return fixture
def bondstack(cls,n=4,sharedpad=False):
if not isinstance(n,int):
raise ValueError(" n needs to be integer")
padded_cls=addPad(cls)
class bondstack(padded_cls):
n_copies=LayoutParamInterface()
sharedpad=LayoutParamInterface(True,False)
pitch=LayoutParamInterface()
def __init__(self,*a,**k):
padded_cls.__init__(self,*a,**k)
self.n_copies=n
self.sharedpad=sharedpad
self.pitch=150.0
def draw(self):
cell=padded_cls.draw(self)
return pt.draw_array(cell,n,1,0.0,self.pitch)
bondstack.__name__=" ".join([f"Bondstack of {n}",padded_cls.__name__])
return bondstack
_allmodifiers=(Scaled,addVia,addPad,addPartialEtch,addProbe,addLargeGnd,array,fixture,bondstack)
|
the-stack_106_15510
|
#!/usr/bin/env python3
# Copyright (C) 2021, RTE (http://www.rte-france.com)
# SPDX-License-Identifier: CC-BY-4.0
"""
Script to test RbdManager module: rollback RBD image to a specific
image snapshot and group snapshot
"""
import time
from vm_manager.helpers.rbd_manager import RbdManager
CEPH_CONF = "/etc/ceph/ceph.conf"
POOL_NAME = "rbd"
IMG_SIZE = "4M"
IMG_NAME = "img1"
SNAP = "snap1"
GROUP = "group1"
TEXT1 = "Hello world"
TEXT2 = "XXXXXXXXXXX"
def write_to_image(rbd, img_name, text):
"""
Write and read data on an RBD image.
"""
# Write TEXT to image
print("Write text '" + text + "' to image " + img_name)
rbd.write_to_image(img_name, bytes(text, "utf-8"), 0)
# Verify data read
data = rbd.read_from_image(img_name, 0, len(text)).decode()
print("Read from image " + img_name + ": " + data)
if data != text:
raise Exception(
"Data read from " + img_name + " is not correct: " + data
)
def cleanup(rbd):
"""
Remove group and image.
"""
if rbd.group_exists(GROUP):
print("Remove group " + GROUP)
rbd.remove_group(GROUP)
print("Group list: " + str(rbd.list_groups()))
if rbd.image_exists(IMG_NAME):
print("Remove image " + IMG_NAME)
rbd.remove_image(IMG_NAME) # remove forces purge
print("Image list: " + str(rbd.list_images()))
if __name__ == "__main__":
with RbdManager(CEPH_CONF, POOL_NAME) as rbd:
# Create image
print("Create image " + IMG_NAME)
rbd.create_image(IMG_NAME, IMG_SIZE)
img_list = rbd.list_images()
print("Image list: " + str(img_list))
if IMG_NAME not in img_list:
raise Exception("Could not create image " + IMG_NAME)
try:
# Write text on image
write_to_image(rbd, IMG_NAME, TEXT1)
# Create image snapshot
print("Create snapshot " + SNAP + " from image " + IMG_NAME)
rbd.create_image_snapshot(IMG_NAME, SNAP)
# Verify snapshot creation
snap_list = rbd.list_image_snapshots(IMG_NAME)
print("Snaps from " + IMG_NAME + ": " + str(snap_list))
if SNAP not in snap_list:
raise Exception("Could not create snapshot " + SNAP)
# Check snapshot timestamp
ts = rbd.get_image_snapshot_timestamp(IMG_NAME, SNAP)
print("Snapshot " + SNAP + " timestamp: " + str(ts))
if (
int(ts.timestamp()) > int(time.time()) + 5
): # Compare with 5 sec delay
raise Exception(
"Incorrect snapshot " + SNAP + " timestamp: " + str(ts)
)
# Overwrite data on image
write_to_image(rbd, IMG_NAME, TEXT2)
# Rollback to snapshot
print("Rollback " + IMG_NAME + " to " + SNAP)
rbd.rollback_image(IMG_NAME, SNAP)
# Verify data rollback
data = rbd.read_from_image(IMG_NAME, 0, len(TEXT1)).decode()
print("Read from image " + IMG_NAME + ": " + data)
if data != TEXT1:
raise Exception(
"Data read from " + IMG_NAME + " is not correct: " + data
)
# Repeat process for group snapshot
# Create group
print("Create group " + GROUP)
rbd.create_group(GROUP)
groups = rbd.list_groups()
print("Group list: ", str(groups))
if GROUP not in groups:
raise Exception("Could not create group " + GROUP)
# Add image to group
print("Add image " + IMG_NAME + " to group " + GROUP)
rbd.add_image_to_group(IMG_NAME, GROUP)
print(
"Group "
+ GROUP
+ " image list: "
+ str(rbd.list_group_images(GROUP))
)
if not rbd.is_image_in_group(IMG_NAME, GROUP):
raise Exception(
"Could not add image " + IMG_NAME + " to group " + GROUP
)
# Write data to image
write_to_image(rbd, IMG_NAME, TEXT1)
# Create group snapshot
print("Create group snapshot")
rbd.create_group_snapshot(GROUP, SNAP)
if SNAP not in rbd.list_group_snapshots(GROUP):
raise Exception(
"Could not create snapshot " + SNAP + " on group " + GROUP
)
# Overwrite data on image
write_to_image(rbd, IMG_NAME, TEXT2)
# Rollback to snap
print("Rollback group " + GROUP + " to snap " + SNAP)
rbd.rollback_group(GROUP, SNAP)
# Verify data rollback
data = rbd.read_from_image(IMG_NAME, 0, len(TEXT1)).decode()
print("Read from image " + IMG_NAME + ": " + data)
if data != TEXT1:
raise Exception(
"Data read from " + IMG_NAME + " is not correct: " + data
)
# Cleanup
cleanup(rbd)
if rbd.group_exists(GROUP):
raise Exception("Could not remove group " + GROUP)
if rbd.image_exists(IMG_NAME):
raise Exception("Could not remove image " + IMG_NAME)
print("Test finished")
finally:
cleanup(rbd)
|
the-stack_106_15511
|
import inspect
import sys
import torch
from .line_records import LineRecords
# Seaborn's `muted` color cycle
DEFAULT_COLUMNS = ['active_bytes.all.peak', 'reserved_bytes.all.peak']
class LineProfiler:
"""Profile the CUDA memory usage info for each line in pytorch
This class registers callbacks for added functions to profiling them line
by line, and collects all the statistics in CUDA memory. Usually you may
want to use simpler wrapper below `profile` or `profile_every`.
The CUDA memory is collected only on the **current** cuda device.
Usage:
```python
with LineProfiler(func) as lp:
func
lp.display()
```python
lp = LineProfiler(func)
lp.enable()
func()
lp.disable()
lp.display()
```
"""
def __init__(self, *functions, target_gpu=0, **kwargs):
self.target_gpu = target_gpu
self._code_infos = {}
self._raw_line_records = []
self.enabled = False
for func in functions:
self.add_function(func)
def add_function(self, func):
""" Record line profiling information for the given Python function.
"""
try:
# We need to use the hash here because pandas will later expect something
# orderable for its index
code_hash = hash(func.__code__)
except AttributeError:
import warnings
warnings.warn(
"Could not extract a code object for the object %r" % (func,))
return
if code_hash not in self._code_infos:
first_line = inspect.getsourcelines(func)[1]
self._code_infos[code_hash] = {
'func': func,
'first_line': first_line,
'prev_line': first_line,
'prev_record': -1,
}
# re-register the newer trace_callback
if self.enabled:
self.register_callback()
def __enter__(self):
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disable()
def register_callback(self):
"""Register the trace_callback only on demand"""
if self._code_infos:
sys.settrace(self._trace_callback)
def _reset_cuda_stats(self):
torch.cuda.reset_peak_memory_stats()
torch.cuda.reset_accumulated_memory_stats()
def enable(self):
self.enabled = True
try:
torch.cuda.empty_cache()
self._reset_cuda_stats()
except AssertionError as e:
print('Could not reset CUDA stats and cache: ' + str(e))
self.register_callback()
def disable(self):
self.enabled = False
sys.settrace(None)
def clear(self):
"""Clear the state of the line profiler"""
self._code_infos = {}
self._raw_line_records = []
def _trace_callback(self, frame, event, arg):
"""Trace the execution of python line-by-line"""
if event == 'call':
return self._trace_callback
code_hash = hash(frame.f_code)
if event in ['line', 'return'] and code_hash in self._code_infos:
code_info = self._code_infos[code_hash]
with torch.cuda.device(self.target_gpu):
self._raw_line_records.append({
'code_hash': code_hash,
'line': code_info['prev_line'],
'prev_record_idx': code_info['prev_record'],
**torch.cuda.memory_stats()})
self._reset_cuda_stats()
if event == 'line':
code_info['prev_line'] = frame.f_lineno
code_info['prev_record'] = len(self._raw_line_records)-1
elif event == 'return':
code_info['prev_line'] = code_info['first_line']
code_info['prev_record'] = -1
def display(self, func=None, columns=DEFAULT_COLUMNS):
"""Display the profiling results on either IPython or CLI
The columns are explained in the PyTorch documentation:
https://pytorch.org/docs/stable/cuda.html#torch.cuda.memory_stats
.. note:: To work, this needs to be the last thing returned in the IPython statement or cell.
Args:
func (str): the function name of interest, None for all registered function
columns (list of str): the column names of interest, See PyTorch's doc for available names.
Returns:
RecordsDisplay: Returns an object that'll display the recorded stats in the IPython console
"""
return LineRecords(self._raw_line_records, self._code_infos).display(func, columns)
def print_stats(self, func=None, columns=DEFAULT_COLUMNS, stream=sys.stdout):
"""Print the text profiling results to stream
The columns are explained in the PyTorch documentation:
https://pytorch.org/docs/stable/cuda.html#torch.cuda.memory_stats
Args:
func (str): the function name of interest, None for all registered function
columns (list of str): the column names of interest, See PyTorch's doc for available names
stream (IO-like object): the stream to write to
"""
stream.write(str(self.display(func, columns)))
|
the-stack_106_15512
|
import math
import torch
from torch.optim.optimizer import Optimizer
import numpy as np
# What we want to do is have the update be
# sgd_update = - mu * grad
# adam_update = - mu * grad / (grad**2)
# new update = - mu * grad * (beta2 + beta1) / (grad**2 * beta1 + avg(grad**2) * beta2)
# where beta1 is gradually fading in (basically the reverse of the adam
# correction for early times) and beta2 is some factor, probably 2 ish
# beta2 -> infinity is sgd (w wonky learning rate), beta2->0 is adam.
# Goal is to surpress high eigenvalue eigenvectors while leaving the rest alone
class SmoothAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
eta=2.0, weight_decay=0, nesterov=True):
defaults = dict(lr=lr, betas=betas, eps=eps, eta=eta,
weight_decay=weight_decay,
nesterov=nesterov)
super(SmoothAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
nesterov = group['nesterov']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# beta1_hat = beta1 #min(beta1,1.0-1.0/state['step'])
beta2_hat = min(beta2,1.0-1.0/state['step'])
fade_in = min(1.0,(1-beta2)*(1+state['step']))
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2_hat).addcmul_(1 - beta2_hat, grad, grad)
denom_smooth = torch.mean(exp_avg_sq) * group['eta'] + group['eps']*group['eps']
denom = exp_avg_sq.mul(fade_in).add_(denom_smooth).sqrt()
num_coeff = group['eta'] + fade_in + group['eps']
wd = group['weight_decay']*group['lr']
p.data.add_(-wd, p.data)
if nesterov:
p.data.addcdiv_(-group['lr']*beta1*num_coeff, exp_avg, denom)
p.data.addcdiv_(-group['lr']*(1-beta1)*num_coeff, grad, denom)
else:
p.data.addcdiv_(-group['lr']*num_coeff, exp_avg, denom)
return loss
|
the-stack_106_15513
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import EarlyStopping
from torchmetrics.functional import accuracy
import hydra
from omegaconf import DictConfig, OmegaConf
import os
from high_order_layers_torch.layers import *
transformStandard = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
transformPoly = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.0,), (1.0,))]
)
# transformPoly = transformStandard
classes = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
class Net(LightningModule):
def __init__(self, cfg: DictConfig):
super().__init__()
self.save_hyperparameters(cfg)
self._cfg = cfg
try:
self._data_dir = f"{hydra.utils.get_original_cwd()}/data"
except:
self._data_dir = "../data"
n = cfg.n
self._batch_size = cfg.batch_size
self._layer_type = cfg.layer_type
self._train_fraction = cfg.train_fraction
layer_type = cfg.layer_type
segments = cfg.segments
self._transform = transformPoly
in_channels = 1
if self._cfg.add_pos == True:
in_channels = 3
if self._layer_type == "standard":
self.conv1 = torch.nn.Conv2d(
in_channels=in_channels,
out_channels=6 * ((n - 1) * segments + 1),
kernel_size=5,
)
self.conv2 = torch.nn.Conv2d(
in_channels=6 * ((n - 1) * segments + 1), out_channels=16, kernel_size=5
)
else:
self.conv1 = high_order_convolution_layers(
layer_type=self._layer_type,
n=n,
in_channels=in_channels,
out_channels=6,
kernel_size=5,
segments=cfg.segments,
)
self.conv2 = high_order_convolution_layers(
layer_type=self._layer_type,
n=n,
in_channels=6,
out_channels=16,
kernel_size=5,
segments=cfg.segments,
)
w1 = 28 - 4
w2 = (w1 // 2) - 4
c1 = 6
c2 = 16
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 4 * 4, 10)
# Create xy objects
if self._cfg.add_pos == True:
xm = torch.linspace(-1, 1, 28, device=self.device)
ym = torch.linspace(-1, 1, 28, device=self.device)
xv, yv = torch.meshgrid(xm, ym)
xv = torch.stack(self._batch_size * [xv], dim=0)
yv = torch.stack(self._batch_size * [yv], dim=0)
# This is a hack. Apparently self.device is not on cuda.
self._pos = torch.stack([xv, yv], dim=1).cuda()
def forward(self, xin):
if self._cfg.add_pos == True:
x = torch.cat([xin, self._pos], dim=1)
else:
x = xin
if self._layer_type == "standard":
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.reshape(-1, 16 * 4 * 4)
x = self.fc1(x)
else:
x = self.pool(self.conv1(x))
x = self.pool(self.conv2(x))
x = x.reshape(-1, 16 * 4 * 4)
x = self.fc1(x)
return x
def setup(self, stage):
num_train = int(self._train_fraction * 50000)
num_val = 10000
num_extra = 50000 - num_train
train = torchvision.datasets.MNIST(
root=self._data_dir, train=True, download=True, transform=self._transform
)
self._train_subset, self._val_subset, extra = torch.utils.data.random_split(
train,
[num_train, 10000, num_extra],
generator=torch.Generator().manual_seed(1),
)
def training_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "train")
def train_dataloader(self):
return torch.utils.data.DataLoader(
self._train_subset,
batch_size=self._batch_size,
shuffle=True,
num_workers=10,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self._val_subset, batch_size=self._batch_size, shuffle=False, num_workers=10
)
def test_dataloader(self):
testset = torchvision.datasets.MNIST(
root=self._data_dir, train=False, download=True, transform=self._transform
)
return torch.utils.data.DataLoader(
testset, batch_size=self._batch_size, shuffle=False, num_workers=10
)
def validation_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "val")
def eval_step(self, batch, batch_idx, name):
x, y = batch
logits = self(x)
loss = F.cross_entropy(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log(f"{name}_loss", loss, prog_bar=True)
self.log(f"{name}_acc", acc, prog_bar=True)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.eval_step(batch, batch_idx, "test")
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=0.001)
def mnist(cfg: DictConfig):
print(OmegaConf.to_yaml(cfg))
print("Working directory : {}".format(os.getcwd()))
try:
print(f"Orig working directory : {hydra.utils.get_original_cwd()}")
except:
pass
early_stop_callback = EarlyStopping(
monitor="val_loss", min_delta=0.00, patience=3, verbose=False, mode="min"
)
trainer = Trainer(
max_epochs=cfg.max_epochs, gpus=cfg.gpus, callbacks=[early_stop_callback]
)
model = Net(cfg)
trainer.fit(model)
print("testing")
results = trainer.test(model)
print("finished testing")
return results
@hydra.main(config_path="../config", config_name="mnist_config")
def run(cfg: DictConfig):
mnist(cfg)
if __name__ == "__main__":
run()
|
the-stack_106_15514
|
import logging
import time
from ...widgets.rules import RulesEngine, RulesDispatcher
from ...widgets.label import PyDMLabel
def test_rules_dispatcher(qapp, caplog):
"""
Test the dispatcher to ensure that it is a singleton.
Parameters
----------
qapp : QApplication
Reference to the QApplication
caplog : fixture
To capture the log messages
"""
disp1 = RulesDispatcher()
disp2 = RulesDispatcher()
assert disp1 is disp2
assert disp1.rules_engine.isRunning()
payload = {"foo": "bar"}
disp1.dispatch(payload)
for record in caplog.records:
assert record.levelno == logging.ERROR
assert "Error at RulesDispatcher" in caplog.text
def test_unregister(qtbot):
"""
Test the dispatcher for registering and unregistering of widgets.
Parameters
----------
qtbot : fixture
Parent of all the widgets
"""
widget = PyDMLabel()
qtbot.addWidget(widget)
rules = [{'name': 'Rule #1', 'property': 'Visible',
'expression': 'ch[0] < 1',
'channels': [{'channel': 'ca://MTEST:Float', 'trigger': True}]}]
dispatcher = RulesDispatcher()
dispatcher.register(widget, rules)
assert widget in dispatcher.rules_engine.widget_map
dispatcher.unregister(widget)
assert widget not in dispatcher.rules_engine.widget_map
def test_rules_full(qtbot, caplog):
"""
Test the rules mechanism.
Parameters
----------
qtbot : fixture
Parent of all the widgets
caplog : fixture
To capture the log messages
"""
widget = PyDMLabel()
qtbot.addWidget(widget)
widget.show()
assert widget.isVisible()
rules = [{'name': 'Rule #1', 'property': 'Visible',
'expression': 'ch[0] < 1',
'channels': [{'channel': 'ca://MTEST:Float', 'trigger': True}]}]
dispatcher = RulesDispatcher()
dispatcher.register(widget, rules)
re = dispatcher.rules_engine
assert widget in re.widget_map
assert len(re.widget_map[widget]) == 1
assert re.widget_map[widget][0]['rule'] == rules[0]
re.callback_value(widget, 0, 0, trigger=True, value=1)
for record in caplog.records:
assert record.levelno == logging.ERROR
assert "Not all channels are connected" in caplog.text
blocker = qtbot.waitSignal(re.rule_signal, timeout=1000)
re.callback_conn(widget, 0, 0, value=True)
re.callback_value(widget, 0, 0, trigger=True, value=5)
assert re.widget_map[widget][0]['calculate'] is True
time.sleep(2)
assert re.widget_map[widget][0]['calculate'] is False
blocker.wait()
assert not widget.isVisible()
caplog.clear()
rules[0]['expression'] = 'foo'
dispatcher.register(widget, rules)
assert len(re.widget_map[widget]) == 1
re.callback_conn(widget, 0, 0, value=True)
re.callback_value(widget, 0, 0, trigger=True, value='a')
time.sleep(2)
for record in caplog.records:
assert record.levelno == logging.ERROR
assert "Error while evaluating Rule" in caplog.text
dispatcher.unregister(widget)
assert widget not in re.widget_map
|
the-stack_106_15515
|
import sys
from pathlib import Path
from textwrap import dedent
sys.path.append(str(Path(__file__).parent.resolve().parent))
from texlive.github_handler import *
from texlive.utils import find_checksum_from_url
template = dedent(
"""\
# MSYS2 TexLive {version}
See https://github.com/msys2/msys2-texlive for more information.
## Checksums
```
{checksums_string}
```
"""
)
repo = get_repo()
release = repo.get_release(environ["tag_act"].split("/")[-1])
release_assets = get_release_assets(release)
checksums = {}
for asset in release_assets:
url = asset.browser_download_url
checksums[asset.name] = find_checksum_from_url(url, "sha256")
checksums_string = "\n".join(
[f"{checksum} {name}" for name, checksum in checksums.items()]
)
release.update_release(
name=f"MSYS2 TexLive {release.tag_name}",
message=template.format(
version=release.tag_name, checksums_string=checksums_string
),
)
|
the-stack_106_15516
|
#!/usr/bin/env python
# coding: utf-8
"""
Test panel class
Copyright 2017 MicaSense, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import os, glob
import math
import micasense.image as image
import micasense.panel as panel
@pytest.fixture()
def panel_image_name():
image_path = os.path.join('data', '0000SET', '000')
return os.path.join(image_path, 'IMG_0000_1.tif')
@pytest.fixture()
def flight_image_name():
image_path = os.path.join('data', '0000SET', '000')
return os.path.join(image_path, 'IMG_0001_1.tif')
def test_qr_corners():
img = image.Image(panel_image_name())
pan = panel.Panel(img)
qr_corners = pan.qr_corners()
good_qr_corners = [[460, 599], [583, 599], [584, 478], [462, 477]]
assert qr_corners is not None
assert len(qr_corners) == len(good_qr_corners)
assert pan.serial == b"RP02-1603036-SC"
for i, pt in enumerate(qr_corners):
# different opencv/zbar versions round differently it seems
assert pt[0] == pytest.approx(good_qr_corners[i][0], abs=3)
assert pt[1] == pytest.approx(good_qr_corners[i][1], abs=3)
def test_panel_corners():
img = image.Image(panel_image_name())
pan = panel.Panel(img)
panel_pts = pan.panel_corners()
good_pts = [[809, 613], [648, 615], [646, 454], [808, 452]]
assert panel_pts is not None
assert len(panel_pts) == len(good_pts)
assert pan.serial == b"RP02-1603036-SC"
for i, pt in enumerate(panel_pts):
# different opencv/zbar versions round differently it seems
assert pt[0] == pytest.approx(good_pts[i][0], abs=3)
assert pt[1] == pytest.approx(good_pts[i][1], abs=3)
# test manually providing bad corners - in this case the corners of the qr code itself
def test_raw_panel_bad_corners():
img = image.Image(panel_image_name())
pan = panel.Panel(img,panelCorners=[[460, 599], [583, 599], [584, 478], [462, 477]])
mean, std, num, sat = pan.raw()
assert mean == pytest.approx(26965, rel=0.01)
assert std == pytest.approx(15396.0, rel=0.05)
assert num == pytest.approx(14824, rel=0.01)
assert sat == pytest.approx(0, abs=2)
# test manually providing good corners
def test_raw_panel_manual():
img = image.Image(panel_image_name())
pan = panel.Panel(img,panelCorners=[[809, 613], [648, 615], [646, 454], [808, 452]])
mean, std, num, sat = pan.raw()
assert mean == pytest.approx(45406, rel=0.01)
assert std == pytest.approx(738.0, rel=0.05)
assert num == pytest.approx(26005, rel=0.001)
assert sat == pytest.approx(0, abs=2)
def test_raw_panel():
img = image.Image(panel_image_name())
pan = panel.Panel(img)
mean, std, num, sat = pan.raw()
assert mean == pytest.approx(45406.0, rel=0.01)
assert std == pytest.approx(738.0, rel=0.05)
assert num == pytest.approx(26005, rel=0.02)
assert sat == pytest.approx(0, abs=2)
def test_intensity_panel():
img = image.Image(panel_image_name())
pan = panel.Panel(img)
mean, std, num, sat = pan.intensity()
assert mean == pytest.approx(1162, rel=0.01)
assert std == pytest.approx(23, rel=0.03)
assert num == pytest.approx(26005, rel=0.02)
assert sat == pytest.approx(0, abs=2)
def test_radiance_panel():
img = image.Image(panel_image_name())
pan = panel.Panel(img)
mean, std, num, sat = pan.radiance()
assert mean == pytest.approx(0.170284, rel=0.01)
assert std == pytest.approx(0.0033872969661854742, rel=0.02)
assert num == pytest.approx(26005, rel=0.02)
assert sat == pytest.approx(0, abs=2)
def test_irradiance_mean():
img = image.Image(panel_image_name())
pan = panel.Panel(img)
panel_reflectance = 0.67
mean = pan.irradiance_mean(panel_reflectance)
assert mean == pytest.approx(0.7984, rel=0.001)
def test_panel_detected():
img = image.Image(panel_image_name())
pan = panel.Panel(img)
assert pan.panel_detected() == True
def test_panel_not_detected():
img = image.Image(flight_image_name())
pan = panel.Panel(img)
assert pan.panel_detected() == False
|
the-stack_106_15521
|
from drf_spectacular.extensions import OpenApiSerializerExtension
from drf_spectacular.plumbing import force_instance, warn
class PolymorphicProxySerializerExtension(OpenApiSerializerExtension):
target_class = 'drf_spectacular.utils.PolymorphicProxySerializer'
priority = -1
def get_name(self):
return self.target.component_name
def map_serializer(self, auto_schema, direction):
""" custom handling for @extend_schema's injection of PolymorphicProxySerializer """
if isinstance(self.target.serializers, dict):
sub_components = self._get_explicit_sub_components(auto_schema, direction)
else:
sub_components = self._get_implicit_sub_components(auto_schema, direction)
return {
'oneOf': [ref for _, ref in sub_components],
'discriminator': {
'propertyName': self.target.resource_type_field_name,
'mapping': {resource_type: ref['$ref'] for resource_type, ref in sub_components}
}
}
def _get_implicit_sub_components(self, auto_schema, direction):
sub_components = []
for sub_serializer in self.target.serializers:
sub_serializer = force_instance(sub_serializer)
sub_serializer.partial = self.target.partial
resolved_sub_serializer = auto_schema.resolve_serializer(sub_serializer, direction)
try:
discriminator_field = sub_serializer.fields[self.target.resource_type_field_name]
resource_type = discriminator_field.to_representation(None)
except: # noqa: E722
warn(
f'sub-serializer {resolved_sub_serializer.name} of {self.target.component_name} '
f'must contain the discriminator field "{self.target.resource_type_field_name}". '
f'defaulting to sub-serializer name, but schema will likely not match the API.'
)
resource_type = resolved_sub_serializer.name
sub_components.append((resource_type, resolved_sub_serializer.ref))
return sub_components
def _get_explicit_sub_components(self, auto_schema, direction):
sub_components = []
for resource_type, sub_serializer in self.target.serializers.items():
sub_serializer = force_instance(sub_serializer)
sub_serializer.partial = self.target.partial
resolved_sub_serializer = auto_schema.resolve_serializer(sub_serializer, direction)
sub_components.append((resource_type, resolved_sub_serializer.ref))
return sub_components
|
the-stack_106_15522
|
#!/usr/bin/python2
# /*
# * Copyright (c) 2017, Doug Smith, KEBA Corp
# * All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions are met:
# *
# * 1. Redistributions of source code must retain the above copyright notice, this
# * list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright notice,
# * this list of conditions and the following disclaimer in the documentation
# * and/or other materials provided with the distribution.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *
# *
# * Created on: October 4, 2017
# * Author: Doug Smith
# */
# This started as an implementation of the robodk processor but it wasn't flexible enough.
# See the global define section at the bottom for Kairo compatibility
# Todo: Stuff. This was made in a couple hours.
from robot_movement_interface import msg as rmi_msg
import rospy
import functools
# ----------------------------------------------------
# Positions
class RmiFrame(object):
def __init__(self, frame):
'''
:param frame: The frame
:type frame: list[float]
'''
assert(len(frame) == 6)
self.frame = frame
class RmiPos(object):
'''
'''
def __init__(self, pose, pose_type='', aux_values=None):
'''
A position
:param pose: Position values
:type pose: list[float]
:param pose_type:
:type pose_type: str
:param aux_values:
:type aux_values: list[str]
'''
self.pose = pose
self.pose_type = pose_type
self.aux_values = []
if aux_values is not None:
self.aux_values = aux_values
def SetCmd(self, cmd):
'''
Update cmd with the data in this pose
:param cmd: The Command to update
:type cmd: rmi_msg.Command
'''
# assert isinstance(cmd, Command)
cmd.pose = self.pose
cmd.pose_type = self.pose_type
for aux in self.aux_values:
cmd.additional_parameters.append(aux)
class RmiPosJoints(RmiPos):
def __init__(self, pose):
# Joint positions don't have aux values
super(RmiPosJoints, self).__init__(pose, 'JOINTS')
class RmiPosQuaternion(RmiPos):
def __init__(self, pose, aux_values=None):
super(RmiPosQuaternion, self).__init__(pose, 'QUATERNION', aux_values)
class RmiPosEulerZyx(RmiPos):
def __init__(self, pose, aux_values=None):
super(RmiPosEulerZyx, self).__init__(pose, 'EULER_INTRINSIC_ZYX', aux_values)
# Dynamics
class RmiVelo(object):
def __init__(self, velocity, velocity_type):
self.velocity = velocity
self.velocity_type = velocity_type
def SetCmd(self, cmd):
':param rmi_msg.Command cmd:'
#assert(isinstance(cmd, rmi_msg.Command))
class RmiDyn(RmiVelo):
def __init__(self, dynamic):
super(RmiDyn, self).__init__(dynamic, 'DYN')
def SetCmd(self, cmd):
RmiVelo.SetCmd(self, cmd)
cmd.velocity_type = self.velocity_type
cmd.velocity = self.velocity
# Overlapping
class RmiBlending(object):
def __init__(self, blending, blending_type):
self.blending_type = blending_type
self.blending = blending
def SetCmd(self, cmd):
':param rmi_msg.Command cmd:'
cmd.blending_type = self.blending_type
cmd.blending = self.blending
class RmiOvlRel(RmiBlending):
def __init__(self, percent):
':type percent: int'
RmiBlending.__init__(self, [percent], 'OVLREL')
class RmiOvlSuppos(RmiBlending):
def __init__(self, percent):
':type percent: int'
RmiBlending.__init__(self, [percent], 'OVLSUPPOS')
class RmiOvlAbs(RmiBlending):
def __init__(self, blending):
'type blending: list[float]'
RmiBlending.__init__(self, blending, 'OVLABS')
# ----------------------------------------------------
# Object class that handles the robot instructions/syntax
class RobotPost(object):
def __init__(self, topic_command='command_list', topic_result='command_result'):
# Incremented at the start of every method that adds a command. If it
# doesn't match the cmd_list.commands len there was a problem.
self.num_commands = 0
self.num_results = 0
self.cmd_list = rmi_msg.CommandList()
self.pub = rospy.Publisher(topic_command, rmi_msg.CommandList, latch=True, queue_size=10)
self.sub = rospy.Subscriber(topic_result, rmi_msg.Result, callback=self.CommandResultCb)
def CommandResultCb(self, data):
'''
Subscriber callback
:param data: The result
:type data: rmi_msg.Result
'''
res_str = 'OK(0)'
log_func = rospy.logout # info by default
if data.result_code != 0: # some kind of error
res_str = 'ERROR(' + str(data.result_code) + '), ' + data.additional_information
log_func = rospy.logerr # log to error
self.num_results = self.num_commands # Bail out immediately
log_func('Result ' + str(data.command_id) + ': ' + res_str)
self.num_results += 1
def ProgStart(self):
self.cmd_list = rmi_msg.CommandList()
self.num_commands = 0
def ProgRun(self):
"""Publish the command list"""
self.num_results = 0
assert(self.num_commands == len(self.cmd_list.commands))
rospy.logout('Publishing a CommandList with ' + str(len(self.cmd_list.commands)) + ' commands')
for cmd_num, cmd in enumerate(self.cmd_list.commands): # : :type cmd: rmi_msg.Command
rospy.logout('Command ' + str(cmd_num) + ': ' + cmd.command_type)
self.pub.publish(self.cmd_list)
while(self.num_results < self.num_commands and not rospy.is_shutdown()):
rospy.sleep(0.5)
rospy.logout('ProgRun exiting')
def AddCommand(self, cmd):
'''
Add a command. It will set command_id to self.num_commands and then increment self.num_commands.
:param cmd: The command to add
:type cmd: rmi_msg.Command
'''
cmd.command_id = self.num_commands
self.cmd_list.commands.append(cmd)
self.num_commands += 1
def MoveJ(self, pose, dynamic=None, overlap=None):
'''
PTP moves
:param pose: A position (joints, quaternion, euler)
:type pose: RmiPos
:param dynamic: Dynamic containing velo/accel
:type dynamic: RmiVelo
:param overlap: Blending
:type overlap: RmiBlending
'''
#self.num_commands += 1
cmd = rmi_msg.Command()
cmd.command_type = "PTP"
assert isinstance(pose, RmiPos)
pose.SetCmd(cmd)
if(isinstance(dynamic, RmiVelo)):
dynamic.SetCmd(cmd)
if isinstance(overlap, RmiBlending):
overlap.SetCmd(cmd)
if(len(cmd.pose_type) < 1):
assert(False)
else:
self.AddCommand(cmd)
# self.cmd_list.commands.append(cmd)
def MoveL(self, pose, dynamic=None, overlap=None):
'''
Linear moves
:param pose: A position (joints, quaternion, euler)
:type pose: RmiPos
:param dynamic: Dynamic containing velo/accel
:type dynamic: RmiVelo
:param overlap: Blending
:type overlap: RmiBlending
'''
#self.num_commands += 1
cmd = rmi_msg.Command()
cmd.command_type = "LIN"
assert isinstance(pose, RmiPos)
pose.SetCmd(cmd)
if isinstance(dynamic, RmiVelo):
dynamic.SetCmd(cmd)
if isinstance(overlap, RmiBlending):
overlap.SetCmd(cmd)
if(len(cmd.pose_type) < 1):
pass
else:
self.AddCommand(cmd)
# self.cmd_list.commands.append(cmd)
def Settings(self, dynamic=None, overlap=None):
'@type dynamic: RmiVelo'
'@type overlap: RmiBlending'
#self.num_commands += 1
# Give me something
assert(dynamic is not None or overlap is not None)
cmd = rmi_msg.Command()
cmd.command_type = "SETTING"
if(isinstance(dynamic, RmiVelo)):
dynamic.SetCmd(cmd)
if isinstance(overlap, RmiBlending):
overlap.SetCmd(cmd)
self.AddCommand(cmd)
# self.cmd_list.commands.append(cmd)
def WaitIsFinished(self):
'''
Call WaitIsFinished()
'''
#self.num_commands += 1
cmd = rmi_msg.Command()
cmd.command_type = 'WAIT'
cmd.pose_type = 'IS_FINISHED'
self.AddCommand(cmd)
# self.cmd_list.commands.append(cmd)
def Abort(self):
'''
Immediately sends an ABORT message. It will call ProgStart, add the ABORT command, then call ProgRun
'''
self.ProgStart()
self.cmd_list.replace_previous_commands = True
cmd = rmi_msg.Command()
cmd.command_type = 'ABORT'
self.AddCommand(cmd)
self.ProgRun()
def Tool(self, frame):
'''
:param frame: the tool frame
:type frame: RmiFrame
'''
cmd = rmi_msg.Command()
cmd.command_type = 'FRAME'
cmd.pose_reference = 'TOOL'
cmd.pose = frame.frame
self.AddCommand(cmd)
def IoOut(self, value):
cmd = rmi_msg.Command()
cmd.command_type = 'IO_OUT'
cmd.pose_reference = 'DO'
cmd.pose = value
self.AddCommand(cmd)
#===============================================================================
# Global defines for Kairo look and feel
#===============================================================================
default_rob = RobotPost()
# Positions
CARTPOS = RmiPosEulerZyx # ([x, y, z, a, b, c], aux_values=None)
QUATPOS = RmiPosQuaternion # ([x, y, z, rw, rx, ry, rz], aux_values=None)
AXISPOS = RmiPosJoints # ([a1, a2, ...], aux_values=None)
#Dynamics and Overlaps
OVLABS = RmiOvlAbs # ([posDist, oriDist, linAxDist, rotAxDist, vConst(boolean)])
OVLSUPPOS = RmiOvlSuppos # (ovl %)
OVLREL = RmiOvlRel # (ovl %)
# ([velAxis(0..100->), accAxis, decAxis, jerkAxis, vel(mm/s->), acc, dec, jerk, velOri(deg/s->), accOri, decOri, jerkOri])
DYNAMIC = RmiDyn
# Commands
Lin = default_rob.MoveL # (pose, dynamic=None, overlap=None)
PTP = default_rob.MoveJ # (pose, dynamic=None, overlap=None)
Settings = default_rob.Settings # (dynamic=None, overlap=None)
WaitIsFinished = default_rob.WaitIsFinished
def Dyn(dynamic):
'''
calls Dyn
:param dynamic: the dynamic
:type dynamic: RmiDyn
'''
default_rob.Settings(dynamic=dynamic)
def Ovl(overlap):
'''
calls Ovl
:param overlap: OVERLAP_
:type overlap: RmiBlending
'''
default_rob.Settings(overlap=overlap)
#------------------------------------------------------------------------------
|
the-stack_106_15524
|
from sympy.core.numbers import Float
from sympy.core.singleton import S
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.special.polynomials import assoc_laguerre
from sympy.functions.special.spherical_harmonics import Ynm
def R_nl(n, l, r, Z=1):
"""
Returns the Hydrogen radial wavefunction R_{nl}.
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
l : integer
``l`` is the Angular Momentum Quantum Number with
values ranging from 0 to ``n-1``.
r :
Radial coordinate.
Z :
Atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples
========
>>> from sympy.physics.hydrogen import R_nl
>>> from sympy.abc import r, Z
>>> R_nl(1, 0, r, Z)
2*sqrt(Z**3)*exp(-Z*r)
>>> R_nl(2, 0, r, Z)
sqrt(2)*(-Z*r + 2)*sqrt(Z**3)*exp(-Z*r/2)/4
>>> R_nl(2, 1, r, Z)
sqrt(6)*Z*r*sqrt(Z**3)*exp(-Z*r/2)/12
For Hydrogen atom, you can just use the default value of Z=1:
>>> R_nl(1, 0, r)
2*exp(-r)
>>> R_nl(2, 0, r)
sqrt(2)*(2 - r)*exp(-r/2)/4
>>> R_nl(3, 0, r)
2*sqrt(3)*(2*r**2/9 - 2*r + 3)*exp(-r/3)/27
For Silver atom, you would use Z=47:
>>> R_nl(1, 0, r, Z=47)
94*sqrt(47)*exp(-47*r)
>>> R_nl(2, 0, r, Z=47)
47*sqrt(94)*(2 - 47*r)*exp(-47*r/2)/4
>>> R_nl(3, 0, r, Z=47)
94*sqrt(141)*(4418*r**2/9 - 94*r + 3)*exp(-47*r/3)/27
The normalization of the radial wavefunction is:
>>> from sympy import integrate, oo
>>> integrate(R_nl(1, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r)**2 * r**2, (r, 0, oo))
1
It holds for any atomic number:
>>> integrate(R_nl(1, 0, r, Z=2)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 0, r, Z=3)**2 * r**2, (r, 0, oo))
1
>>> integrate(R_nl(2, 1, r, Z=4)**2 * r**2, (r, 0, oo))
1
"""
# sympify arguments
n, l, r, Z = map(S, [n, l, r, Z])
# radial quantum number
n_r = n - l - 1
# rescaled "r"
a = 1/Z # Bohr radius
r0 = 2 * r / (n * a)
# normalization coefficient
C = sqrt((S(2)/(n*a))**3 * factorial(n_r) / (2*n*factorial(n + l)))
# This is an equivalent normalization coefficient, that can be found in
# some books. Both coefficients seem to be the same fast:
# C = S(2)/n**2 * sqrt(1/a**3 * factorial(n_r) / (factorial(n+l)))
return C * r0**l * assoc_laguerre(n_r, 2*l + 1, r0).expand() * exp(-r0/2)
def Psi_nlm(n, l, m, r, phi, theta, Z=1):
"""
Returns the Hydrogen wave function psi_{nlm}. It's the product of
the radial wavefunction R_{nl} and the spherical harmonic Y_{l}^{m}.
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
l : integer
``l`` is the Angular Momentum Quantum Number with
values ranging from 0 to ``n-1``.
m : integer
``m`` is the Magnetic Quantum Number with values
ranging from ``-l`` to ``l``.
r :
radial coordinate
phi :
azimuthal angle
theta :
polar angle
Z :
atomic number (1 for Hydrogen, 2 for Helium, ...)
Everything is in Hartree atomic units.
Examples
========
>>> from sympy.physics.hydrogen import Psi_nlm
>>> from sympy import Symbol
>>> r=Symbol("r", positive=True)
>>> phi=Symbol("phi", real=True)
>>> theta=Symbol("theta", real=True)
>>> Z=Symbol("Z", positive=True, integer=True, nonzero=True)
>>> Psi_nlm(1,0,0,r,phi,theta,Z)
Z**(3/2)*exp(-Z*r)/sqrt(pi)
>>> Psi_nlm(2,1,1,r,phi,theta,Z)
-Z**(5/2)*r*exp(I*phi)*exp(-Z*r/2)*sin(theta)/(8*sqrt(pi))
Integrating the absolute square of a hydrogen wavefunction psi_{nlm}
over the whole space leads 1.
The normalization of the hydrogen wavefunctions Psi_nlm is:
>>> from sympy import integrate, conjugate, pi, oo, sin
>>> wf=Psi_nlm(2,1,1,r,phi,theta,Z)
>>> abs_sqrd=wf*conjugate(wf)
>>> jacobi=r**2*sin(theta)
>>> integrate(abs_sqrd*jacobi, (r,0,oo), (phi,0,2*pi), (theta,0,pi))
1
"""
# sympify arguments
n, l, m, r, phi, theta, Z = map(S, [n, l, m, r, phi, theta, Z])
# check if values for n,l,m make physically sense
if n.is_integer and n < 1:
raise ValueError("'n' must be positive integer")
if l.is_integer and not (n > l):
raise ValueError("'n' must be greater than 'l'")
if m.is_integer and not (abs(m) <= l):
raise ValueError("|'m'| must be less or equal 'l'")
# return the hydrogen wave function
return R_nl(n, l, r, Z)*Ynm(l, m, theta, phi).expand(func=True)
def E_nl(n, Z=1):
"""
Returns the energy of the state (n, l) in Hartree atomic units.
The energy doesn't depend on "l".
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
Z :
Atomic number (1 for Hydrogen, 2 for Helium, ...)
Examples
========
>>> from sympy.physics.hydrogen import E_nl
>>> from sympy.abc import n, Z
>>> E_nl(n, Z)
-Z**2/(2*n**2)
>>> E_nl(1)
-1/2
>>> E_nl(2)
-1/8
>>> E_nl(3)
-1/18
>>> E_nl(3, 47)
-2209/18
"""
n, Z = S(n), S(Z)
if n.is_integer and (n < 1):
raise ValueError("'n' must be positive integer")
return -Z**2/(2*n**2)
def E_nl_dirac(n, l, spin_up=True, Z=1, c=Float("137.035999037")):
"""
Returns the relativistic energy of the state (n, l, spin) in Hartree atomic
units.
The energy is calculated from the Dirac equation. The rest mass energy is
*not* included.
Parameters
==========
n : integer
Principal Quantum Number which is
an integer with possible values as 1, 2, 3, 4,...
l : integer
``l`` is the Angular Momentum Quantum Number with
values ranging from 0 to ``n-1``.
spin_up :
True if the electron spin is up (default), otherwise down
Z :
Atomic number (1 for Hydrogen, 2 for Helium, ...)
c :
Speed of light in atomic units. Default value is 137.035999037,
taken from http://arxiv.org/abs/1012.3627
Examples
========
>>> from sympy.physics.hydrogen import E_nl_dirac
>>> E_nl_dirac(1, 0)
-0.500006656595360
>>> E_nl_dirac(2, 0)
-0.125002080189006
>>> E_nl_dirac(2, 1)
-0.125000416028342
>>> E_nl_dirac(2, 1, False)
-0.125002080189006
>>> E_nl_dirac(3, 0)
-0.0555562951740285
>>> E_nl_dirac(3, 1)
-0.0555558020932949
>>> E_nl_dirac(3, 1, False)
-0.0555562951740285
>>> E_nl_dirac(3, 2)
-0.0555556377366884
>>> E_nl_dirac(3, 2, False)
-0.0555558020932949
"""
n, l, Z, c = map(S, [n, l, Z, c])
if not (l >= 0):
raise ValueError("'l' must be positive or zero")
if not (n > l):
raise ValueError("'n' must be greater than 'l'")
if (l == 0 and spin_up is False):
raise ValueError("Spin must be up for l==0.")
# skappa is sign*kappa, where sign contains the correct sign
if spin_up:
skappa = -l - 1
else:
skappa = -l
beta = sqrt(skappa**2 - Z**2/c**2)
return c**2/sqrt(1 + Z**2/(n + skappa + beta)**2/c**2) - c**2
|
the-stack_106_15525
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
import json
import pandas as pd
import numpy as np
import os
import xmljson
import jsonschema
import re
from urllib.parse import urlencode
from lxml import etree
from collections import defaultdict
from django.conf import settings
from label_studio.core.utils.io import find_file
from label_studio.core.utils.exceptions import LabelStudioValidationErrorSentryIgnored
logger = logging.getLogger(__name__)
_DATA_EXAMPLES = None
_LABEL_TAGS = {'Label', 'Choice'}
_NOT_CONTROL_TAGS = {'Filter',}
# TODO: move configs in right place
_LABEL_CONFIG_SCHEMA = find_file('label_config_schema.json')
with open(_LABEL_CONFIG_SCHEMA) as f:
_LABEL_CONFIG_SCHEMA_DATA = json.load(f)
def parse_config(config_string):
"""
:param config_string: Label config string
:return: structured config of the form:
{
"<ControlTag>.name": {
"type": "ControlTag",
"to_name": ["<ObjectTag1>.name", "<ObjectTag2>.name"],
"inputs: [
{"type": "ObjectTag1", "value": "<ObjectTag1>.value"},
{"type": "ObjectTag2", "value": "<ObjectTag2>.value"}
],
"labels": ["Label1", "Label2", "Label3"] // taken from "alias" if exists or "value"
}
"""
if not config_string:
return {}
def _is_input_tag(tag):
return tag.attrib.get('name') and tag.attrib.get('value')
def _is_output_tag(tag):
return tag.attrib.get('name') and tag.attrib.get('toName') and tag.tag not in _NOT_CONTROL_TAGS
def _get_parent_output_tag_name(tag, outputs):
# Find parental <Choices> tag for nested tags like <Choices><View><View><Choice>...
parent = tag
while True:
parent = parent.getparent()
if parent is None:
return
name = parent.attrib.get('name')
if name in outputs:
return name
xml_tree = etree.fromstring(config_string)
inputs, outputs, labels = {}, {}, defaultdict(dict)
for tag in xml_tree.iter():
if _is_output_tag(tag):
tag_info = {'type': tag.tag, 'to_name': tag.attrib['toName'].split(',')}
# Grab conditionals if any
conditionals = {}
if tag.attrib.get('perRegion') == 'true':
if tag.attrib.get('whenTagName'):
conditionals = {'type': 'tag', 'name': tag.attrib['whenTagName']}
elif tag.attrib.get('whenLabelValue'):
conditionals = {'type': 'label', 'name': tag.attrib['whenLabelValue']}
elif tag.attrib.get('whenChoiceValue'):
conditionals = {'type': 'choice', 'name': tag.attrib['whenChoiceValue']}
if conditionals:
tag_info['conditionals'] = conditionals
outputs[tag.attrib['name']] = tag_info
elif _is_input_tag(tag):
inputs[tag.attrib['name']] = {'type': tag.tag, 'value': tag.attrib['value'].lstrip('$')}
if tag.tag not in _LABEL_TAGS:
continue
parent_name = _get_parent_output_tag_name(tag, outputs)
if parent_name is not None:
actual_value = tag.attrib.get('alias') or tag.attrib.get('value')
if not actual_value:
logger.debug(
'Inspecting tag {tag_name}... found no "value" or "alias" attributes.'.format(
tag_name=etree.tostring(tag, encoding='unicode').strip()[:50]))
else:
labels[parent_name][actual_value] = dict(tag.attrib)
for output_tag, tag_info in outputs.items():
tag_info['inputs'] = []
for input_tag_name in tag_info['to_name']:
if input_tag_name not in inputs:
logger.warning(
f'to_name={input_tag_name} is specified for output tag name={output_tag}, '
'but we can\'t find it among input tags')
continue
tag_info['inputs'].append(inputs[input_tag_name])
tag_info['labels'] = list(labels[output_tag])
tag_info['labels_attrs'] = labels[output_tag]
return outputs
def parse_config_to_json(config_string):
parser = etree.XMLParser(recover=False)
xml = etree.fromstring(config_string, parser)
if xml is None:
raise etree.XMLSchemaParseError('xml is empty or incorrect')
config = xmljson.badgerfish.data(xml)
return config
def validate_label_config(config_string):
# xml and schema
try:
config = parse_config_to_json(config_string)
jsonschema.validate(config, _LABEL_CONFIG_SCHEMA_DATA)
except (etree.XMLSyntaxError, etree.XMLSchemaParseError, ValueError) as exc:
raise LabelStudioValidationErrorSentryIgnored(str(exc))
except jsonschema.exceptions.ValidationError as exc:
error_message = exc.context[-1].message if len(exc.context) else exc.message
error_message = 'Validation failed on {}: {}'.format('/'.join(exc.path), error_message.replace('@', ''))
raise LabelStudioValidationErrorSentryIgnored(error_message)
# unique names in config # FIXME: 'name =' (with spaces) won't work
all_names = re.findall(r'name="([^"]*)"', config_string)
if len(set(all_names)) != len(all_names):
raise LabelStudioValidationErrorSentryIgnored('Label config contains non-unique names')
# toName points to existent name
names = set(all_names)
toNames = re.findall(r'toName="([^"]*)"', config_string)
for toName_ in toNames:
for toName in toName_.split(','):
if toName not in names:
raise LabelStudioValidationErrorSentryIgnored(f'toName="{toName}" not found in names: {sorted(names)}')
def extract_data_types(label_config):
# load config
parser = etree.XMLParser()
xml = etree.fromstring(label_config, parser)
if xml is None:
raise etree.XMLSchemaParseError('Project config is empty or incorrect')
# take all tags with values attribute and fit them to tag types
data_type = {}
parent = xml.findall('.//*[@value]')
for match in parent:
if not match.get('name'):
continue
name = match.get('value')
if len(name) > 1 and name[0] == '$':
name = name[1:]
data_type[name] = match.tag
return data_type
def get_all_labels(label_config):
outputs = parse_config(label_config)
labels = defaultdict(list)
for control_name in outputs:
for label in outputs[control_name].get('labels', []):
labels[control_name].append(label)
return labels
def get_annotation_tuple(from_name, to_name, type):
if isinstance(to_name, list):
to_name = ','.join(to_name)
return '|'.join([from_name, to_name, type.lower()])
def get_all_control_tag_tuples(label_config):
outputs = parse_config(label_config)
out = []
for control_name, info in outputs.items():
out.append(get_annotation_tuple(control_name, info['to_name'], info['type']))
return out
def get_all_object_tag_names(label_config):
return set(extract_data_types(label_config))
def config_line_stipped(c):
tree = etree.fromstring(c)
comments = tree.xpath('//comment()')
for c in comments:
p = c.getparent()
if p is not None:
p.remove(c)
c = etree.tostring(tree, method='html').decode("utf-8")
return c.replace('\n', '').replace('\r', '')
def get_task_from_labeling_config(config):
""" Get task, annotations and predictions from labeling config comment,
it must start from "<!-- {" and end as "} -->"
"""
# try to get task data, annotations & predictions from config comment
task_data, annotations, predictions = {}, None, None
start = config.find('<!-- {')
start = start if start >= 0 else config.find('<!--{')
start += 4
end = config[start:].find('-->') if start >= 0 else -1
if 3 < start < start + end:
try:
logger.debug('Parse ' + config[start:start + end])
body = json.loads(config[start:start + end])
except Exception as exc:
logger.error(exc, exc_info=True)
pass
else:
logger.debug(json.dumps(body, indent=2))
dont_use_root = 'predictions' in body or 'annotations' in body
task_data = body['data'] if 'data' in body else (None if dont_use_root else body)
predictions = body['predictions'] if 'predictions' in body else None
annotations = body['annotations'] if 'annotations' in body else None
return task_data, annotations, predictions
def data_examples(mode):
""" Data examples for editor preview and task upload examples
"""
global _DATA_EXAMPLES
if _DATA_EXAMPLES is None:
with open(find_file('data_examples.json'), encoding='utf-8') as f:
_DATA_EXAMPLES = json.load(f)
roots = ['editor_preview', 'upload']
for root in roots:
for key, value in _DATA_EXAMPLES[root].items():
if isinstance(value, str):
_DATA_EXAMPLES[root][key] = value.replace('<HOSTNAME>', settings.HOSTNAME)
return _DATA_EXAMPLES[mode]
def generate_sample_task_without_check(label_config, mode='upload', secure_mode=False):
""" Generate sample task only
"""
# load config
parser = etree.XMLParser()
xml = etree.fromstring(label_config, parser)
if xml is None:
raise etree.XMLSchemaParseError('Project config is empty or incorrect')
# make examples pretty
examples = data_examples(mode=mode)
# iterate over xml tree and find values with '$'
task = {}
parent = xml.findall('.//*[@value]') # take all tags with value attribute
for p in parent:
# Make sure it is a real object tag, extract data placeholder key
value = p.get('value')
if not value or not value.startswith('$'):
continue
value = value[1:]
# detect secured mode - objects served as URLs
value_type = p.get('valueType') or p.get('valuetype')
only_urls = secure_mode or value_type == 'url'
example_from_field_name = examples.get('$' + value)
if example_from_field_name:
# try get example by variable name
task[value] = example_from_field_name
elif value == 'video' and p.tag == 'HyperText':
task[value] = examples.get('$videoHack')
elif p.tag == 'Paragraphs':
# Paragraphs special case - replace nameKey/textKey if presented
name_key = p.get('nameKey') or p.get('namekey') or 'author'
text_key = p.get('textKey') or p.get('textkey') or 'text'
task[value] = []
for item in examples[p.tag]:
task[value].append({name_key: item['author'], text_key: item['text']})
elif p.tag == 'TimeSeries':
# TimeSeries special case - generate signals on-the-fly
time_column = p.get('timeColumn')
value_columns = []
for ts_child in p:
if ts_child.tag != 'Channel':
continue
value_columns.append(ts_child.get('column'))
sep = p.get('sep')
time_format = p.get('timeFormat')
if only_urls:
# data is URL
params = {'time': time_column, 'values': ','.join(value_columns)}
if sep:
params['sep'] = sep
if time_format:
params['tf'] = time_format
task[value] = '/samples/time-series.csv?' + urlencode(params)
else:
# data is JSON
task[value] = generate_time_series_json(time_column, value_columns, time_format)
else:
# patch for valueType="url"
examples['Text'] = examples['TextUrl'] if only_urls else examples['TextRaw']
# not found by name, try get example by type
task[value] = examples.get(p.tag, 'Something')
return task
def _is_strftime_string(s):
# simple way to detect strftime format
return '%' in s
def generate_time_series_json(time_column, value_columns, time_format=None):
""" Generate sample for time series
"""
n = 100
if time_format is not None and not _is_strftime_string(time_format):
time_fmt_map = {
'yyyy-MM-dd': '%Y-%m-%d'
}
time_format = time_fmt_map.get(time_format)
if time_format is None:
times = np.arange(n).tolist()
else:
times = pd.date_range('2020-01-01', periods=n, freq='D').strftime(time_format).tolist()
ts = {time_column: times}
for value_col in value_columns:
ts[value_col] = np.random.randn(n).tolist()
return ts
def get_sample_task(label_config, secure_mode=False):
""" Get sample task from labeling config and combine it with generated sample task
"""
predefined_task, annotations, predictions = get_task_from_labeling_config(label_config)
generated_task = generate_sample_task_without_check(label_config, mode='editor_preview', secure_mode=secure_mode)
if predefined_task is not None:
generated_task.update(predefined_task)
return generated_task, annotations, predictions
def config_essential_data_has_changed(new_config_str, old_config_str):
""" Detect essential changes of the labeling config
"""
new_config = parse_config(new_config_str)
old_config = parse_config(old_config_str)
for tag, new_info in new_config.items():
if tag not in old_config:
return True
old_info = old_config[tag]
if new_info['type'] != old_info['type']:
return True
if new_info['inputs'] != old_info['inputs']:
return True
if not set(old_info['labels']).issubset(new_info['labels']):
return True
def replace_task_data_undefined_with_config_field(data, project):
# assign undefined key name from data to the first key from config, e.g. for txt loading
if settings.DATA_UNDEFINED_NAME in data and project.data_types.keys():
key = list(project.data_types.keys())[0]
data[key] = data[settings.DATA_UNDEFINED_NAME]
del data[settings.DATA_UNDEFINED_NAME]
|
the-stack_106_15526
|
import asyncio
import random
import time
from gql import gql, Client
from gql.transport.aiohttp import AIOHTTPTransport
import config
transport = AIOHTTPTransport(url="https://api.github.com/graphql", headers={
'Authorization': 'bearer %s' % config.config["token"]})
client = Client(transport=transport,
fetch_schema_from_transport=True)
def execute(query):
try:
result = client.execute(gql(query))
return result
except asyncio.CancelledError:
print("interrupt")
time.sleep(random.randint(1, 5) / 10.)
print("continue")
return execute(query)
except Exception:
return 0
|
the-stack_106_15527
|
# -*- coding:utf-8 -*-
import json
#import xxhash
import base64
class NodeStatus:
GRAY = 0
EXISTING_TERMINAL = 1
EMPTY_TERMINAL = 2
SURELY_MIXED = 3
class Node:
def __init__(self,value,status=NodeStatus.GRAY):
self.value = value
self.children = [None,None,None,None]
self.parent = None
self.status = status
self.path_cache = []
def update_child(self,child,slot):
if self.children[slot] != None:
self.children[slot].parent = None
self.children[slot] = child
class Tree:
def __init__(self):
self.root = Node(None)
self.found_appending_status = {
'i': 0,
'node': self.root
}
def find_appending_node(self,adding_path):
current_node = self.root
appending_root_node = None
for i in range(len(adding_path)):
slot = adding_path[i]
child_node = current_node.children[slot]
if child_node == None:
appending_root_node = current_node
break
current_node = child_node
if appending_root_node == None:
appending_root_node = current_node
self.found_appending_status['node'] = appending_root_node
self.found_appending_status['i'] = i
def append_rest_of_the_nodes_required(self,adding_path,appending_node,appending_path_index,final_status,value):
assert(appending_node.status != NodeStatus.EXISTING_TERMINAL and appending_node.status != NodeStatus.EMPTY_TERMINAL) # You cannot add nodes to terminal
for i in range(appending_path_index,len(adding_path)):
appending_slot = adding_path[i]
status = final_status
if i < len(adding_path)-1:
status = NodeStatus.GRAY
new_child_node = Node(None,status)
appending_node.update_child(new_child_node,appending_slot)
new_child_node.parent = appending_node
new_path_cache = appending_node.path_cache[:]
new_path_cache.append(appending_slot)
new_child_node.path_cache = new_path_cache
appending_node = new_child_node
appending_node.value = value
return appending_node
@staticmethod
def merge_parents_from_end(end_node):
while True:
parent_node = end_node.parent
if parent_node == None: break
there_are_still_gray_children_left = False
found_existing_leaves = 0
found_empty_leaves = 0
for i in range(4):
child = parent_node.children[i]
if child == None:
there_are_still_gray_children_left = True
else:
status = child.status
if status == NodeStatus.EXISTING_TERMINAL:
found_existing_leaves = found_existing_leaves+1
elif status == NodeStatus.EMPTY_TERMINAL:
found_empty_leaves = found_empty_leaves+1
elif status == NodeStatus.SURELY_MIXED:
found_existing_leaves = found_existing_leaves+1
found_empty_leaves = found_empty_leaves+1
else:
there_are_still_gray_children_left = True
if there_are_still_gray_children_left: break
if there_are_still_gray_children_left: break
merged = False
if found_empty_leaves < 1 and found_existing_leaves > 0:
parent_node.status = NodeStatus.EXISTING_TERMINAL
merged = True
elif found_existing_leaves < 1 and found_empty_leaves > 0:
parent_node.status = NodeStatus.EMPTY_TERMINAL
merged = True
else:
parent_node.status = NodeStatus.SURELY_MIXED
if merged:
for i in range(4):
parent_node.update_child(None,i)
end_node = parent_node
def add_terminal(self,adding_path,is_existing,value):
self.find_appending_node(adding_path)
appending_path_index = self.found_appending_status['i']
appending_node = self.found_appending_status['node']
final_status = NodeStatus.EMPTY_TERMINAL
if is_existing:
final_status = NodeStatus.EXISTING_TERMINAL
end_node = self.append_rest_of_the_nodes_required(adding_path,appending_node,appending_path_index,final_status,value)
Tree.merge_parents_from_end(end_node)
def add_gray(self,adding_path):
self.find_appending_node(adding_path)
appending_path_index = self.found_appending_status['i']
appending_node = self.found_appending_status['node']
end_node = self.append_rest_of_the_nodes_required(adding_path,appending_node,appending_path_index,NodeStatus.GRAY,None)
def for_each_shallow_paths_in_path(self,target_path,callback):
starting_node = self.root
is_invalid_path = False
for starting_index in range(len(target_path)):
is_terminal = False
slot = target_path[starting_index]
found_child = starting_node.children[slot]
if found_child != None:
status = found_child.status
if status == NodeStatus.EXISTING_TERMINAL or status == NodeStatus.EMPTY_TERMINAL:
is_terminal = True
break
else:
is_invalid_path = True
break
starting_node = found_child
if is_terminal: break
if is_invalid_path:
invalid_root_path = starting_node.path_cache[:]
invalid_root_path.append(target_path[starting_index])
callback(invalid_root_path,NodeStatus.GRAY,None)
return
found_nodes0 = [starting_node]
found_nodes1 = []
open_nodes = found_nodes0
closed_nodes = found_nodes1
while True:
while len(open_nodes) > 0:
open_node = open_nodes.pop()
self.for_each_shallow_paths_in_node(open_node,closed_nodes,callback)
for open_node in open_nodes:
closed_nodes.append(open_node)
if len(closed_nodes) < 1: break
t = closed_nodes
closed_nodes = open_nodes
open_nodes = t
closed_nodes = []
def for_each_shallow_paths_in_node(self,node,found_children,callback):
path_cache = node.path_cache
node_status = node.status
if node_status == NodeStatus.EXISTING_TERMINAL or node_status == NodeStatus.EMPTY_TERMINAL:
value = node.value
callback(path_cache,node_status,value)
elif node_status == NodeStatus.SURELY_MIXED:
for i in range(4):
child = node.children[i]
found_children.append(child)
elif node_status == NodeStatus.GRAY:
for i in range(4):
child = node.children[i]
if child == None:
path_cache.append(i)
callback(path_cache,NodeStatus.GRAY,None)
path_cache.pop()
else:
found_children.append(child)
def status_for_path(self,path):
self.find_appending_node(path)
appending_node = self.found_appending_status['node']
if appending_node == None:
return NodeStatus.GRAY
return appending_node.status
def value_for_path(self,path):
self.find_appending_node(path)
appending_node = self.found_appending_status['node']
if appending_node == None:
return None
return appending_node.value
def mark_unknown_leaves(self,is_existing):
Tree.mark_unknown_leaves_under_node(self.root,is_existing)
@staticmethod
def mark_unknown_leaves_under_node(node,is_existing):
if node.status == NodeStatus.EXISTING_TERMINAL or node.status == NodeStatus.EMPTY_TERMINAL or node.status == NodeStatus.SURELY_MIXED:
return
for i in range(4):
child = node.children[i]
if child == None:
new_child_status = NodeStatus.EMPTY_TERMINAL
if is_existing:
new_child_status = NodeStatus.EXISTING_TERMINAL
new_child_node = Node(None, new_child_status)
node.update_child(new_child_node,i)
new_child_node.parent = node
new_path_cache = node.path_cache[:]
new_path_cache.append(i)
new_child_node.path_cache = new_path_cache
Tree.merge_parents_from_end(new_child_node)
else:
Tree.mark_unknown_leaves_under_node(child,is_existing)
def for_each_leaves(self,callback):
Tree.for_each_leaves_in_node(self.root,callback)
@staticmethod
def for_each_leaves_in_node(node,callback):
is_leaf = True
for i in range(4):
child = node.children[i]
if child != None:
is_leaf = False
Tree.for_each_leaves_in_node(child,callback)
if not is_leaf: return
callback(node.path_cache,node.status,node.value)
def for_each_nodes(self,callback):
Tree.for_each_nodes_in_node(self.root,callback)
@staticmethod
def for_each_nodes_in_node(node,callback):
for i in range(4):
child = node.children[i]
if child != None:
Tree.for_each_nodes_in_node(child,callback)
callback(node,node.path_cache,node.status,node.value)
def dump_to_redis(self,prefix,r):
def callback(node,path,status,value):
pathstr = ''.join(map(str,path))
# x = xxhash.xxh64()
# x.update(pathstr)
# pathstr = base64.b64encode(x.digest())
name = prefix+'!'+pathstr
r.hset(name,'status',status)
if value != None:
r.hset(name,'value',json.dumps(value))
children = []
for i in range(4):
if node.children[i] != None:
children.append(i)
r.hset(name,'children',json.dumps(children))
self.for_each_nodes(callback)
def load_from_redis(self,prefix,r):
self.root = Node(None)
self.for_each_found_elements_in_redis([],prefix,r)
def for_each_found_elements_in_redis(self,path,prefix,r):
pathstr = ''.join(map(str,path))
# x = xxhash.xxh64()
# x.update(pathstr)
# pathstr = base64.b64encode(x.digest())
name = prefix+'!'+pathstr
h = r.hgetall(name)
status = int(h['status'])
children = json.loads(h['children'])
if status == NodeStatus.EXISTING_TERMINAL and len(children) == 0:
if h.has_key('value'):
value = json.loads(h['value'])
else:
value = None
self.add_terminal(path,True,value)
elif status == NodeStatus.EMPTY_TERMINAL and len(children) == 0:
self.add_terminal(path,False,None)
for child in children:
path.append(child)
self.for_each_found_elements_in_redis(path,prefix,r)
path.pop()
|
the-stack_106_15528
|
import copy
import logging
from functools import wraps
from typing import Callable, Optional, Type, Union
from great_expectations.core import ExpectationConfiguration
from great_expectations.core.util import nested_update
from great_expectations.exceptions.metric_exceptions import MetricProviderError
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.execution_engine.execution_engine import (
MetricDomainTypes,
MetricFunctionTypes,
MetricPartialFunctionTypes,
)
from great_expectations.expectations.registry import (
get_metric_function_type,
get_metric_provider,
register_metric,
register_renderer,
)
from great_expectations.validator.validation_graph import MetricConfiguration
logger = logging.getLogger(__name__)
def metric_value(
engine: Type[ExecutionEngine],
metric_fn_type: Union[str, MetricFunctionTypes] = MetricFunctionTypes.VALUE,
**kwargs
):
"""The metric decorator annotates a method"""
def wrapper(metric_fn: Callable):
@wraps(metric_fn)
def inner_func(*args, **kwargs):
return metric_fn(*args, **kwargs)
inner_func.metric_engine = engine
inner_func.metric_fn_type = MetricFunctionTypes(metric_fn_type)
inner_func.metric_definition_kwargs = kwargs
return inner_func
return wrapper
def metric_partial(
engine: Type[ExecutionEngine],
partial_fn_type: Union[str, MetricPartialFunctionTypes],
domain_type: Union[str, MetricDomainTypes],
**kwargs
):
"""The metric decorator annotates a method"""
def wrapper(metric_fn: Callable):
@wraps(metric_fn)
def inner_func(*args, **kwargs):
return metric_fn(*args, **kwargs)
inner_func.metric_engine = engine
inner_func.metric_fn_type = MetricPartialFunctionTypes(
partial_fn_type
) # raises ValueError if unknown type
inner_func.domain_type = MetricDomainTypes(domain_type)
inner_func.metric_definition_kwargs = kwargs
return inner_func
return wrapper
class MetaMetricProvider(type):
"""MetaMetricProvider registers metrics as they are defined."""
def __new__(cls, clsname, bases, attrs):
newclass = super().__new__(cls, clsname, bases, attrs)
newclass._register_metric_functions()
return newclass
class MetricProvider(metaclass=MetaMetricProvider):
"""Base class for all metric providers.
MetricProvider classes *must* have the following attributes set:
1. `metric_name`: the name to use. Metric Name must be globally unique in
a great_expectations installation.
1. `domain_keys`: a tuple of the *keys* used to determine the domain of the
metric
2. `value_keys`: a tuple of the *keys* used to determine the value of
the metric.
In some cases, subclasses of Expectation, such as TableMetricProvider will already
have correct values that may simply be inherited.
They *may* optionally override the `default_kwarg_values` attribute.
MetricProvider classes *must* implement the following:
1. `_get_evaluation_dependencies`. Note that often, _get_evaluation_dependencies should
augment dependencies provided by a parent class; consider calling super()._get_evaluation_dependencies
In some cases, subclasses of Expectation, such as MapMetricProvider will already
have correct implementations that may simply be inherited.
Additionally, they *may* provide implementations of:
1. Data Docs rendering methods decorated with the @renderer decorator. See the guide
"How to create renderers for custom expectations" for more information.
"""
domain_keys = tuple()
value_keys = tuple()
default_kwarg_values = dict()
@classmethod
def _register_metric_functions(cls):
metric_name = getattr(cls, "metric_name", None)
metric_domain_keys = cls.domain_keys
metric_value_keys = cls.value_keys
for attr_name in dir(cls):
attr_obj = getattr(cls, attr_name)
if not hasattr(attr_obj, "metric_engine") and not hasattr(
attr_obj, "_renderer_type"
):
# This is not a metric or renderer
continue
elif hasattr(attr_obj, "metric_engine"):
engine = getattr(attr_obj, "metric_engine")
if not issubclass(engine, ExecutionEngine):
raise ValueError(
"metric functions must be defined with an Execution Engine"
)
metric_fn = attr_obj
if metric_name is None:
# No metric name has been defined
continue
metric_definition_kwargs = getattr(
metric_fn, "metric_definition_kwargs", dict()
)
declared_metric_name = metric_name + metric_definition_kwargs.get(
"metric_name_suffix", ""
)
metric_fn_type = getattr(
metric_fn, "metric_fn_type", MetricFunctionTypes.VALUE
)
if metric_fn_type == MetricFunctionTypes.VALUE:
register_metric(
metric_name=declared_metric_name,
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=metric_fn,
metric_fn_type=metric_fn_type,
)
else:
register_metric(
metric_name=declared_metric_name
+ "."
+ metric_fn_type.metric_suffix, # this will be a MetricPartial
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=metric_fn,
metric_fn_type=metric_fn_type,
)
register_metric(
metric_name=declared_metric_name,
metric_domain_keys=metric_domain_keys,
metric_value_keys=metric_value_keys,
execution_engine=engine,
metric_class=cls,
metric_provider=None,
metric_fn_type=metric_fn_type,
)
elif hasattr(attr_obj, "_renderer_type"):
register_renderer(
object_name=metric_name, parent_class=cls, renderer_fn=attr_obj
)
@classmethod
def get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""This should return a dictionary:
{
"dependency_name": MetricConfiguration,
...
}
"""
return (
cls._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
or dict()
)
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
metric_name = metric.metric_name
dependencies = dict()
for metric_fn_type in MetricPartialFunctionTypes:
metric_suffix = "." + metric_fn_type.metric_suffix
try:
_ = get_metric_provider(metric_name + metric_suffix, execution_engine)
has_aggregate_fn = True
except MetricProviderError:
has_aggregate_fn = False
if has_aggregate_fn:
dependencies["metric_partial_fn"] = MetricConfiguration(
metric_name + metric_suffix,
metric.metric_domain_kwargs,
metric.metric_value_kwargs,
)
return dependencies
|
the-stack_106_15529
|
"""
MIT License
Copyright (c) 2019 UCSF Hu Lab
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
from pathlib import Path
from vitalfilepy import VitalFile
from vitalfilepy import VITALBINARY
def test_vitalfilewriter():
filename = "tmp_test_vitalfilewriter.vital"
valueList = [80, 90, 85, 130, 135]
offsetList = [0, 60, 12, 180, 360]
lowList = [0, 0, 0, 0, 0]
highList = [1000, 1000, 1000, 150, 150]
# remove test file if exist
try:
outfile = Path(filename)
if outfile.exists():
outfile.unlink()
except:
# ignore error
pass
# dummy test for now
assert(len(filename) > 0)
with VitalFile(filename, "w") as f:
header = VITALBINARY("HR", "Bpm", "T1ICU", "101", 2019, 3, 31, 8, 15, 30.0)
f.setHeader(header)
f.writeHeader()
for i in range(0, 5):
f.writeVitalData(valueList[i], offsetList[i], lowList[i], highList[i])
with VitalFile(filename, "r") as f:
f.readHeader()
print("Start Date/Time: {0}/{1}/{2} {3}:{4}:{5:.0f}".format(f.header.Month, f.header.Day, f.header.Year, f.header.Hour, f.header.Minute, f.header.Second))
assert(f.header.Label == "HR")
assert(f.header.Uom == "Bpm")
assert(f.header.Unit == "T1ICU")
assert(f.header.Bed == "101")
assert(f.header.Year == 2019)
assert(f.header.Month == 3)
assert(f.header.Day == 31)
assert(f.header.Hour == 8)
assert(f.header.Minute == 15)
assert(f.header.Second == 30.0)
for i in range(0, 5):
value, offset, low, high = f.readVitalData()
print("value, offset, low, high: {0}, {1}, {2}, {3}".format(value, offset, low, high))
assert(value == valueList[i])
assert(offset == offsetList[i])
assert(low == lowList[i])
assert(high == highList[i])
# remove temporary file created
try:
outfile = Path(filename)
if outfile.exists():
outfile.unlink()
except:
# ignore error
pass
return
if __name__ == "__main__":
# execute only if run as a script
test_vitalfilewriter()
|
the-stack_106_15531
|
# ---
# jupyter:
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook contains outputs of many different types: text, HTML, plots and errors.
# # Text outputs
#
# Using `print`, `sys.stdout` and `sys.stderr`
import sys
print('using print')
sys.stdout.write('using sys.stdout.write')
sys.stderr.write('using sys.stderr.write')
import logging
logging.debug('Debug')
logging.info('Info')
logging.warning('Warning')
logging.error('Error')
# # HTML outputs
#
# Using `pandas`. Here we find two representations: both text and HTML.
import pandas as pd
pd.DataFrame([4])
from IPython.display import display
display(pd.DataFrame([5]))
display(pd.DataFrame([6]))
# # Images
# %matplotlib inline
# First plot
from matplotlib import pyplot as plt
import numpy as np
w, h = 3, 3
data = np.zeros((h, w, 3), dtype=np.uint8)
data[0,:] = [0,255,0]
data[1,:] = [0,0,255]
data[2,:] = [0,255,0]
data[1:3,1:3] = [255, 0, 0]
plt.imshow(data)
plt.axis('off')
plt.show()
# Second plot
data[1:3,1:3] = [255, 255, 0]
plt.imshow(data)
plt.axis('off')
plt.show()
# # Errors
undefined_variable
|
the-stack_106_15532
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .health_evaluation_py3 import HealthEvaluation
class DeployedServicePackagesHealthEvaluation(HealthEvaluation):
"""Represents health evaluation for deployed service packages, containing
health evaluations for each unhealthy deployed service package that
impacted current aggregated health state. Can be returned when evaluating
deployed application health and the aggregated health state is either Error
or Warning.
All required parameters must be populated in order to send to Azure.
:param aggregated_health_state: The health state of a Service Fabric
entity such as Cluster, Node, Application, Service, Partition, Replica
etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error',
'Unknown'
:type aggregated_health_state: str or
~azure.servicefabric.models.HealthState
:param description: Description of the health evaluation, which represents
a summary of the evaluation process.
:type description: str
:param kind: Required. Constant filled by server.
:type kind: str
:param total_count: Total number of deployed service packages of the
deployed application in the health store.
:type total_count: long
:param unhealthy_evaluations: List of unhealthy evaluations that led to
the aggregated health state. Includes all the unhealthy
DeployedServicePackageHealthEvaluation that impacted the aggregated
health.
:type unhealthy_evaluations:
list[~azure.servicefabric.models.HealthEvaluationWrapper]
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'kind': {'key': 'Kind', 'type': 'str'},
'total_count': {'key': 'TotalCount', 'type': 'long'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
}
def __init__(self, *, aggregated_health_state=None, description: str=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None:
super(DeployedServicePackagesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs)
self.total_count = total_count
self.unhealthy_evaluations = unhealthy_evaluations
self.kind = 'DeployedServicePackages'
|
the-stack_106_15533
|
from tests.core.helpers import pagination_response
from hamcrest import *
from six.moves import xrange
from trakt import Trakt
import responses
@responses.activate
def test_basic():
responses.add_callback(
responses.GET, 'http://mock/sync/history',
callback=pagination_response(
'fixtures/sync/history.json',
authenticated=True
)
)
Trakt.base_url = 'http://mock'
with Trakt.configuration.auth('mock', 'mock'):
history = Trakt['sync/history'].get(pagination=True, per_page=5)
# Ensure collection is valid
assert_that(history, not_none())
# Resolve all pages
items = list(history)
# Ensure all items have been returned
assert_that(items, has_length(3))
# Verify item identifiers
assert_that(
[item.id for item in items],
equal_to(list(xrange(1, 4)))
)
|
the-stack_106_15534
|
# -*- coding: utf-8 -*-
import base64
import io
import json
import os
from datetime import datetime
from dateutil import parser
from flask import (
Response,
flash,
jsonify,
redirect,
render_template,
request,
send_file,
stream_with_context,
url_for,
)
from notifications_python_client.errors import APIError, HTTPError
from notifications_utils import LETTER_MAX_PAGE_COUNT
from notifications_utils.letter_timings import (
get_letter_timings,
letter_can_be_cancelled,
)
from notifications_utils.pdf import pdf_page_count
from PyPDF2.utils import PdfReadError
from app import (
current_service,
format_date_numeric,
job_api_client,
notification_api_client,
)
from app.main import main
from app.notify_client.api_key_api_client import KEY_TYPE_TEST
from app.template_previews import get_page_count_for_letter
from app.utils import (
DELIVERED_STATUSES,
FAILURE_STATUSES,
get_help_argument,
parse_filter_args,
set_status_filters,
)
from app.utils.csv import generate_notifications_csv
from app.utils.letters import (
get_letter_printing_statement,
get_letter_validation_error,
)
from app.utils.templates import get_template
from app.utils.user import user_has_permissions
@main.route("/services/<uuid:service_id>/notification/<uuid:notification_id>")
@user_has_permissions('view_activity', 'send_messages')
def view_notification(service_id, notification_id):
notification = notification_api_client.get_notification(service_id, str(notification_id))
notification['template'].update({'reply_to_text': notification['reply_to_text']})
personalisation = get_all_personalisation_from_notification(notification)
error_message = None
if notification['template']['is_precompiled_letter']:
try:
file_contents, metadata = get_letter_file_data(
service_id, notification_id, "pdf", with_metadata=True
)
page_count = int(
metadata["page_count"]
) if metadata.get("page_count") else pdf_page_count(io.BytesIO(file_contents))
if notification["status"] == "validation-failed":
invalid_pages = metadata.get("invalid_pages")
invalid_pages = json.loads(invalid_pages) if invalid_pages else invalid_pages
error_message = get_letter_validation_error(
metadata.get("message"), invalid_pages, page_count
)
except PdfReadError:
return render_template(
'views/notifications/invalid_precompiled_letter.html',
created_at=notification['created_at']
)
else:
page_count = get_page_count_for_letter(notification['template'], values=personalisation)
if page_count and page_count > LETTER_MAX_PAGE_COUNT:
# We check page count here to show the right error message for a letter that is too long.
# Another way to do this would be to get the status and error message from letter metadata.
# This would be a significant amount of work though, out of scope for this bug fix.
# This is because currently we do not pull the letter from S3 when showing preview.
# Instead, we generate letter preview based on the letter template and personalisation.
# Additionally, when a templated letter is sent via the api and the personalisation pushes the
# page count over 10 pages, it takes a while for validation status to come through.
# Checking page count here will enable us to show the error message even if the letter is not
# fully processed yet.
error_message = get_letter_validation_error(
"letter-too-long", [1], page_count
)
if notification.get('postage'):
if notification["status"] == "validation-failed":
notification['template']['postage'] = None
else:
notification['template']['postage'] = notification['postage']
template = get_template(
notification['template'],
current_service,
letter_preview_url=url_for(
'.view_letter_notification_as_preview',
service_id=service_id,
notification_id=notification_id,
filetype='png',
),
page_count=page_count,
show_recipient=True,
redact_missing_personalisation=True,
sms_sender=notification['reply_to_text'],
email_reply_to=notification['reply_to_text'],
)
template.values = personalisation
if notification['job']:
job = job_api_client.get_job(service_id, notification['job']['id'])['data']
else:
job = None
letter_print_day = get_letter_printing_statement(notification['status'], notification['created_at'])
notification_created = parser.parse(notification['created_at']).replace(tzinfo=None)
show_cancel_button = notification['notification_type'] == 'letter' and \
letter_can_be_cancelled(notification['status'], notification_created)
if get_help_argument() or request.args.get('help') == '0':
# help=0 is set when you’ve just sent a notification. We
# only want to show the back link when you’ve navigated to a
# notification, not when you’ve just sent it.
back_link = None
elif request.args.get('from_job'):
back_link = url_for(
'main.view_job',
service_id=current_service.id,
job_id=request.args.get('from_job'),
)
elif request.args.get('from_uploaded_letters'):
back_link = url_for(
'main.uploaded_letters',
service_id=current_service.id,
letter_print_day=request.args.get('from_uploaded_letters'),
)
else:
back_link = url_for(
'main.view_notifications',
service_id=current_service.id,
message_type=template.template_type,
status='sending,delivered,failed',
)
if notification['notification_type'] == 'letter':
estimated_letter_delivery_date = get_letter_timings(
notification['created_at'],
postage=notification['postage']
).earliest_delivery
else:
estimated_letter_delivery_date = None
return render_template(
'views/notifications/notification.html',
finished=(notification['status'] in (DELIVERED_STATUSES + FAILURE_STATUSES)),
notification_status=notification['status'],
message=error_message,
uploaded_file_name='Report',
template=template,
job=job,
updates_url=url_for(
".view_notification_updates",
service_id=service_id,
notification_id=notification['id'],
status=request.args.get('status'),
help=get_help_argument()
),
partials=get_single_notification_partials(notification),
created_by=notification.get('created_by'),
created_at=notification['created_at'],
updated_at=notification['updated_at'],
help=get_help_argument(),
estimated_letter_delivery_date=estimated_letter_delivery_date,
notification_id=notification['id'],
postage=notification['postage'],
can_receive_inbound=(current_service.has_permission('inbound_sms')),
is_precompiled_letter=notification['template']['is_precompiled_letter'],
letter_print_day=letter_print_day,
show_cancel_button=show_cancel_button,
sent_with_test_key=(
notification.get('key_type') == KEY_TYPE_TEST
),
back_link=back_link,
)
@main.route("/services/<uuid:service_id>/notification/<uuid:notification_id>/cancel", methods=['GET', 'POST'])
@user_has_permissions('view_activity', 'send_messages')
def cancel_letter(service_id, notification_id):
if request.method == 'POST':
try:
notification_api_client.update_notification_to_cancelled(current_service.id, notification_id)
except HTTPError as e:
message_fragments = ["already been cancelled", "too late to cancel"]
if e.status_code == 400 and any(fragment in e.message for fragment in message_fragments):
flash(e.message)
else:
raise e
return redirect(url_for('main.view_notification', service_id=service_id, notification_id=notification_id))
flash("Are you sure you want to cancel sending this letter?", 'cancel')
return view_notification(service_id, notification_id)
def get_preview_error_image():
path = os.path.join(os.path.dirname(__file__), "..", "..", "static", "images", "preview_error.png")
with open(path, "rb") as file:
return file.read()
@main.route("/services/<uuid:service_id>/notification/<uuid:notification_id>.<letter_file_extension:filetype>")
@user_has_permissions('view_activity', 'send_messages')
def view_letter_notification_as_preview(
service_id, notification_id, filetype, with_metadata=False
):
image_data = get_letter_file_data(service_id, notification_id, filetype, with_metadata)
file = io.BytesIO(image_data)
mimetype = 'image/png' if filetype == 'png' else 'application/pdf'
return send_file(
filename_or_fp=file,
mimetype=mimetype,
)
def get_letter_file_data(service_id, notification_id, filetype, with_metadata=False):
try:
preview = notification_api_client.get_notification_letter_preview(
service_id,
notification_id,
filetype,
page=request.args.get('page')
)
display_file = base64.b64decode(preview['content'])
except APIError:
display_file = get_preview_error_image()
preview = {"metadata": {}}
if with_metadata:
return display_file, preview['metadata']
return display_file
@main.route("/services/<uuid:service_id>/notification/<uuid:notification_id>.json")
@user_has_permissions('view_activity', 'send_messages')
def view_notification_updates(service_id, notification_id):
return jsonify(**get_single_notification_partials(
notification_api_client.get_notification(service_id, notification_id)
))
def get_single_notification_partials(notification):
return {
'status': render_template(
'partials/notifications/status.html',
notification=notification,
sent_with_test_key=(
notification.get('key_type') == KEY_TYPE_TEST
),
),
}
def get_all_personalisation_from_notification(notification):
if notification['template'].get('redact_personalisation'):
notification['personalisation'] = {}
if notification['template']['template_type'] == 'email':
notification['personalisation']['email_address'] = notification['to']
if notification['template']['template_type'] == 'sms':
notification['personalisation']['phone_number'] = notification['to']
return notification['personalisation']
@main.route("/services/<uuid:service_id>/download-notifications.csv")
@user_has_permissions('view_activity')
def download_notifications_csv(service_id):
filter_args = parse_filter_args(request.args)
filter_args['status'] = set_status_filters(filter_args)
service_data_retention_days = current_service.get_days_of_retention(filter_args.get('message_type')[0])
return Response(
stream_with_context(
generate_notifications_csv(
service_id=service_id,
job_id=None,
status=filter_args.get('status'),
page=request.args.get('page', 1),
page_size=10000,
format_for_csv=True,
template_type=filter_args.get('message_type'),
limit_days=service_data_retention_days,
)
),
mimetype='text/csv',
headers={
'Content-Disposition': 'inline; filename="{} - {} - {} report.csv"'.format(
format_date_numeric(datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
filter_args['message_type'][0],
current_service.name)
}
)
|
the-stack_106_15535
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all labels.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v202002')
# Create a statement to select labels.
statement = ad_manager.StatementBuilder(version='v202002')
# Retrieve a small amount of labels at a time, paging
# through until all labels have been retrieved.
while True:
response = label_service.getLabelsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for label in response['results']:
# Print out some information for each label.
print('Label with ID "%d" and name "%s" was found.\n' % (label['id'],
label['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
the-stack_106_15539
|
class Tile(object):
def __init__(self, height, width, row=None, col=None, **kw):
self.width = width
self.height = height
self.row = row
self.col = col
self.__dict__.update(kw)
def copy(self):
return self.__class__(**self.__dict__)
class Layout(object):
def __init__(self, width, tile_size=100, padding=0):
self._width = width
self._holes = []
self._tiles = {}
self._tile_size = tile_size
self._padding = padding
@property
def width(self):
return self._width
@property
def height(self):
return len(self._holes)
def css(self, tile):
return {
'top': tile.row * (self._tile_size + self._padding),
'left': tile.col * (self._tile_size + self._padding),
'height': tile.height * (self._tile_size + self._padding) - self._padding,
'width': tile.width * (self._tile_size + self._padding) - self._padding,
}
def add(self, tile=None, **kw):
tile = tile.copy() if tile else Tile(**kw)
if tile.row is None or tile.col is None:
self._find_hole(tile)
self._occupy(tile)
return tile
def linearize(self):
seen = set()
for r in range(len(self._holes)):
for c in range(self._width):
t = self._tiles.get((r, c))
if not t:
continue
if t not in seen:
yield t
seen.add(t)
def _find_hole(self, tile):
for r in range(len(self._holes)):
for c in range(self._width):
if self._holes[r] and self._holes[r][c]:
if self._fits_holes(tile, r, c):
tile.row = r
tile.col = c
return tile
tile.row = len(self._holes)
tile.col = 0
return tile
def _fits_holes(self, tile, r, c):
for R in range(r, r + tile.height):
for C in range(c, c + tile.width):
if C >= self._width:
return False
if R < len(self._holes) and (not self._holes[R] or not self._holes[R][C]):
return False
return True
def _occupy(self, tile):
while len(self._holes) < tile.row + tile.height:
self._holes.append([True] * self._width)
for r in range(tile.row, tile.row + tile.height):
for c in range(tile.col, tile.col + tile.width):
self._holes[r][c] = False
self._tiles[(r,c)] = tile
if not any(self._holes[r]):
self._holes[r] = False
|
the-stack_106_15540
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 20 11:22:02 2019
@author: Jason
@e-mail: [email protected]
"""
import json as js
import os
import pandas as pd
import scrapy
from Timetables_Crawler.constants import DIR_VENUES, URL_HOMEPAGE, YEAR
from Timetables_Crawler.constants import DIR_TIMETABLES
class TimetablesSpider(scrapy.Spider):
name = 'timetables'
allowed_domains = ['web.timetable.usyd.edu.au']
custom_settings = {
'ROBOTSTXT_OBEY': False}
try:
with open(DIR_VENUES, 'r') as f:
df_venues = pd.DataFrame(js.load(f), columns=['VenueId', 'Venue'])
except FileNotFoundError as e:
df_venues = {'VenueId': [], 'Venue': []}
print(e)
start_urls = [
URL_HOMEPAGE + r'?vs=&0&mode=Bookings&rangeType=year' +
'&uosyear=' + YEAR + '&venueId=' + str(i)
for i in df_venues['VenueId']]
def __init__(self, *args, **kwargs):
if os.path.isfile(DIR_TIMETABLES):
self.log('Deleted file{}'.format(''))
os.remove(DIR_TIMETABLES)
def parse(self, response):
def _estimate_semester(dates):
if ('Feb' in dates or
'Mar' in dates or 'Apr' in dates or 'May' in dates):
return 'Semester 1'
elif ('Aug' in dates or
'Sep' in dates or 'Oct' in dates or 'Nov' in dates):
return 'Semester 2'
else:
return 'Out of Semesters'
venueid = response.url.split('&')[-1].lstrip('venueId=')
venue = self.df_venues.loc[
self.df_venues['VenueId'] == venueid, 'Venue'].iloc[0]
for _tr in response.css(
'body div.section div.content table tr'):
if _tr.css('td'):
yield {
'Bookings': _tr.css(
'td:nth-child(1) a::text').get(),
'ClientOrUOS': _tr.css(
'td:nth-child(2) a::text').get(),
'Day': _tr.css('td:nth-child(3)::text').get(),
'Times': _tr.css('td:nth-child(4)::text').get(),
'Dates': _tr.css('td:nth-child(5)::text').get(),
'Frequency': _tr.css(
'td:nth-child(6)::text').get(),
'Capacity': _tr.css('td:nth-child(7)::text').get(),
'Purpose': _tr.css('td:nth-child(8)::text').get(),
'Venue': venue,
'VenueId': venueid,
'Semester': _estimate_semester(
_tr.css('td:nth-child(5)::text').get())}
else:
pass
|
the-stack_106_15541
|
"""
Book: Building RESTful Python Web Services
Chapter 9: Developing RESTful APIs with Tornado
Author: Gaston C. Hillar - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from random import randint
from time import sleep
class HexacopterStatus:
def __init__(self, motor_speed, turned_on):
self.motor_speed = motor_speed
self.turned_on = turned_on
class Hexacopter:
MIN_SPEED = 0
MAX_SPEED = 1000
def __init__(self):
self.motor_speed = self.__class__.MIN_SPEED
self.turned_on = False
def get_motor_speed(self):
return self.motor_speed
def set_motor_speed(self, motor_speed):
if motor_speed < self.__class__.MIN_SPEED:
raise ValueError('The minimum speed is {0}'.format(self.__class__.MIN_SPEED))
if motor_speed > self.__class__.MAX_SPEED:
raise ValueError('The maximum speed is {0}'.format(self.__class__.MAX_SPEED))
self.motor_speed = motor_speed
self.turned_on = (self.motor_speed is not 0)
sleep(2)
return HexacopterStatus(self.get_motor_speed(), self.is_turned_on())
def is_turned_on(self):
return self.turned_on
def get_hexacopter_status(self):
sleep(3)
return HexacopterStatus(self.get_motor_speed(), self.is_turned_on())
class LightEmittingDiode:
MIN_BRIGHTNESS_LEVEL = 0
MAX_BRIGHTNESS_LEVEL = 255
def __init__(self, identifier , description):
self.identifier = identifier
self.description = description
self.brightness_level = self.__class__.MIN_BRIGHTNESS_LEVEL
def get_brightness_level(self):
sleep(1)
return self.brightness_level
def set_brightness_level(self, brightness_level):
if brightness_level < self.__class__.MIN_BRIGHTNESS_LEVEL:
raise ValueError('The minimum brightness level is {0}'.format(self.__class__.MIN_BRIGHTNESS_LEVEL))
if brightness_level > self.__class__.MAX_BRIGHTNESS_LEVEL:
raise ValueError('The maximum brightness level is {0}'.format(self.__class__.MAX_BRIGHTNESS_LEVEL))
sleep(2)
self.brightness_level = brightness_level
class Altimeter:
def get_altitude(self):
sleep(1)
return randint(0, 3000)
class Drone:
def __init__(self):
self.hexacopter = Hexacopter()
self.altimeter = Altimeter()
self.blue_led = LightEmittingDiode(1, 'Blue LED')
self.white_led = LightEmittingDiode(2, 'White LED')
self.leds = {
self.blue_led.identifier: self.blue_led,
self.white_led.identifier: self.white_led
}
|
the-stack_106_15542
|
import logging
import torch
import torch.distributed as dist
from torch import nn
from torch.autograd.function import Function
from torch.nn import functional as F
from FasterRCNN.utils import comm
from .wrappers import BatchNorm2d
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features) - eps)
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable):
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
"SyncBN": NaiveSyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
"nnSyncBN": nn.SyncBatchNorm, # keep for debugging
}[norm]
return norm(out_channels)
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class NaiveSyncBatchNorm(BatchNorm2d):
"""
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used, or when it is applied to mask head).
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def forward(self, input):
if comm.get_world_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs"
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean(input * input, dim=[0, 2, 3])
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return input * scale + bias
|
the-stack_106_15543
|
"""
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import logging
import os
import random
from ._config import get_config, set_config, config_context
logger = logging.getLogger(__name__)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.24.1'
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to enable importing subpackages of sklearn when
# the binaries are not built
# mypy error: Cannot determine type of '__SKLEARN_SETUP__'
__SKLEARN_SETUP__ # type: ignore
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of scikit-learn during the build
# process, as it may not be compiled yet
else:
# `_distributor_init` allows distributors to run custom init code.
# For instance, for the Windows wheel, this is used to pre-load the
# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
# sub-folder.
# It is necessary to do this prior to importing show_versions as the
# later is linked to the OpenMP runtime to make it possible to introspect
# it and importing it first would fail if the OpenMP dll cannot be found.
from . import _distributor_init # noqa: F401
from . import __check_build # noqa: F401
from .base import clone
from .utils._show_versions import show_versions
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'datasets', 'decomposition', 'dummy', 'ensemble', 'exceptions',
'experimental', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'inspection',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'linear_model', 'manifold', 'metrics', 'mixture',
'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis', 'impute', 'compose',
# Non-modules:
'clone', 'get_config', 'set_config', 'config_context',
'show_versions']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
the-stack_106_15544
|
from fastapi import status, HTTPException, Depends
from fastapi.security import (
OAuth2PasswordBearer,
SecurityScopes,
)
from jose import jwt, JWTError
import app.models as models
import app.settings as settings
import logging
logger = logging.getLogger(__name__)
AUTH_TOKEN_URL = settings.AUTH_TOKEN_URL
AUTH_TOKEN_SIGN_SECRET = settings.AUTH_TOKEN_SIGN_SECRET
AUTH_TOKEN_SIGN_ALGORITHM = settings.AUTH_TOKEN_SIGN_ALGORITHM
SCOPES = settings.SCOPES
CREDENTIAL_EXCEPTION = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl=AUTH_TOKEN_URL, scopes=SCOPES)
async def get_current_user(
security_scopes: SecurityScopes, token: str = Depends(oauth2_scheme)
):
if security_scopes.scopes:
authenticate_value = f'Bearer scope="{security_scopes.scope_str}"'
else:
authenticate_value = "Bearer"
try:
logger.debug(f"Decode jwt {token}")
payload = jwt.decode(
token, AUTH_TOKEN_SIGN_SECRET, algorithms=[AUTH_TOKEN_SIGN_ALGORITHM]
)
token_data = models.TokenData(**payload)
user = token_data.user
token_scopes = token_data.scopes
except (JWTError, models.ValidationError):
raise CREDENTIAL_EXCEPTION
logger.debug(f"Checking required security scopes {security_scopes.scopes}")
for scope in security_scopes.scopes:
if scope not in token_scopes:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Not enough permissions",
headers={"WWW-Authenticate": authenticate_value},
)
return user
|
the-stack_106_15546
|
# -*- coding: utf-8 -*-
from OpenGL.GL import *
from .. GLGraphicsItem import GLGraphicsItem
from ...Qt import QtGui
import numpy as np
__all__ = ['GLVolumeItem']
class GLVolumeItem(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
Displays volumetric data.
"""
def __init__(self, data, sliceDensity=1, smooth=True, glOptions='translucent'):
"""
============== =======================================================================================
**Arguments:**
data Volume data to be rendered. *Must* be 4D numpy array (x, y, z, RGBA) with dtype=ubyte.
sliceDensity Density of slices to render through the volume. A value of 1 means one slice per voxel.
smooth (bool) If True, the volume slices are rendered with linear interpolation
============== =======================================================================================
"""
self.sliceDensity = sliceDensity
self.smooth = smooth
self.data = None
self._needUpload = False
self.texture = None
GLGraphicsItem.__init__(self)
self.setGLOptions(glOptions)
self.setData(data)
def setData(self, data):
self.data = data
self._needUpload = True
self.update()
def _uploadData(self):
glEnable(GL_TEXTURE_3D)
if self.texture is None:
self.texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.texture)
if self.smooth:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
else:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)
shape = self.data.shape
## Test texture dimensions first
glTexImage3D(GL_PROXY_TEXTURE_3D, 0, GL_RGBA, shape[0], shape[1], shape[2], 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_3D, 0, GL_TEXTURE_WIDTH) == 0:
raise Exception("OpenGL failed to create 3D texture (%dx%dx%d); too large for this hardware." % shape[:3])
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA, shape[0], shape[1], shape[2], 0, GL_RGBA, GL_UNSIGNED_BYTE, self.data.transpose((2,1,0,3)))
glDisable(GL_TEXTURE_3D)
self.lists = {}
for ax in [0,1,2]:
for d in [-1, 1]:
l = glGenLists(1)
self.lists[(ax,d)] = l
glNewList(l, GL_COMPILE)
self.drawVolume(ax, d)
glEndList()
self._needUpload = False
def paint(self):
if self.data is None:
return
if self._needUpload:
self._uploadData()
self.setupGLState()
glEnable(GL_TEXTURE_3D)
glBindTexture(GL_TEXTURE_3D, self.texture)
#glEnable(GL_DEPTH_TEST)
#glDisable(GL_CULL_FACE)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#glEnable( GL_BLEND )
#glEnable( GL_ALPHA_TEST )
glColor4f(1,1,1,1)
view = self.view()
center = QtGui.QVector3D(*[x/2. for x in self.data.shape[:3]])
cam = self.mapFromParent(view.cameraPosition()) - center
#print "center", center, "cam", view.cameraPosition(), self.mapFromParent(view.cameraPosition()), "diff", cam
cam = np.array([cam.x(), cam.y(), cam.z()])
ax = np.argmax(abs(cam))
d = 1 if cam[ax] > 0 else -1
glCallList(self.lists[(ax,d)]) ## draw axes
glDisable(GL_TEXTURE_3D)
def drawVolume(self, ax, d):
N = 5
imax = [0,1,2]
imax.remove(ax)
tp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
vp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
nudge = [0.5/x for x in self.data.shape]
tp[0][imax[0]] = 0+nudge[imax[0]]
tp[0][imax[1]] = 0+nudge[imax[1]]
tp[1][imax[0]] = 1-nudge[imax[0]]
tp[1][imax[1]] = 0+nudge[imax[1]]
tp[2][imax[0]] = 1-nudge[imax[0]]
tp[2][imax[1]] = 1-nudge[imax[1]]
tp[3][imax[0]] = 0+nudge[imax[0]]
tp[3][imax[1]] = 1-nudge[imax[1]]
vp[0][imax[0]] = 0
vp[0][imax[1]] = 0
vp[1][imax[0]] = self.data.shape[imax[0]]
vp[1][imax[1]] = 0
vp[2][imax[0]] = self.data.shape[imax[0]]
vp[2][imax[1]] = self.data.shape[imax[1]]
vp[3][imax[0]] = 0
vp[3][imax[1]] = self.data.shape[imax[1]]
slices = self.data.shape[ax] * self.sliceDensity
r = list(range(slices))
if d == -1:
r = r[::-1]
glBegin(GL_QUADS)
tzVals = np.linspace(nudge[ax], 1.0-nudge[ax], slices)
vzVals = np.linspace(0, self.data.shape[ax], slices)
for i in r:
z = tzVals[i]
w = vzVals[i]
tp[0][ax] = z
tp[1][ax] = z
tp[2][ax] = z
tp[3][ax] = z
vp[0][ax] = w
vp[1][ax] = w
vp[2][ax] = w
vp[3][ax] = w
glTexCoord3f(*tp[0])
glVertex3f(*vp[0])
glTexCoord3f(*tp[1])
glVertex3f(*vp[1])
glTexCoord3f(*tp[2])
glVertex3f(*vp[2])
glTexCoord3f(*tp[3])
glVertex3f(*vp[3])
glEnd()
## Interesting idea:
## remove projection/modelview matrixes, recreate in texture coords.
## it _sorta_ works, but needs tweaking.
#mvm = glGetDoublev(GL_MODELVIEW_MATRIX)
#pm = glGetDoublev(GL_PROJECTION_MATRIX)
#m = QtGui.QMatrix4x4(mvm.flatten()).inverted()[0]
#p = QtGui.QMatrix4x4(pm.flatten()).inverted()[0]
#glMatrixMode(GL_PROJECTION)
#glPushMatrix()
#glLoadIdentity()
#N=1
#glOrtho(-N,N,-N,N,-100,100)
#glMatrixMode(GL_MODELVIEW)
#glLoadIdentity()
#glMatrixMode(GL_TEXTURE)
#glLoadIdentity()
#glMultMatrixf(m.copyDataTo())
#view = self.view()
#w = view.width()
#h = view.height()
#dist = view.opts['distance']
#fov = view.opts['fov']
#nearClip = dist * .1
#farClip = dist * 5.
#r = nearClip * np.tan(fov)
#t = r * h / w
#p = QtGui.QMatrix4x4()
#p.frustum( -r, r, -t, t, nearClip, farClip)
#glMultMatrixf(p.inverted()[0].copyDataTo())
#glBegin(GL_QUADS)
#M=1
#for i in range(500):
#z = i/500.
#w = -i/500.
#glTexCoord3f(-M, -M, z)
#glVertex3f(-N, -N, w)
#glTexCoord3f(M, -M, z)
#glVertex3f(N, -N, w)
#glTexCoord3f(M, M, z)
#glVertex3f(N, N, w)
#glTexCoord3f(-M, M, z)
#glVertex3f(-N, N, w)
#glEnd()
#glDisable(GL_TEXTURE_3D)
#glMatrixMode(GL_PROJECTION)
#glPopMatrix()
|
the-stack_106_15550
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pytext.common.utils import eprint
from .pytext_config import LATEST_VERSION, PyTextConfig
ADAPTERS = {}
DOWNGRADE_ADAPTERS = {}
NOT_THERE = (None, None, None)
def register_adapter(from_version):
def decorator(fn):
if from_version in ADAPTERS:
raise Exception(
"Duplicated adapter from_version={}: '{}' and '{}'".format(
from_version, fn.__name__, ADAPTERS[from_version].__name__
)
)
else:
ADAPTERS[from_version] = fn
return fn
return decorator
def register_down_grade_adapter(from_version):
def decorator(fn):
if from_version in DOWNGRADE_ADAPTERS:
raise Exception(
"Duplicated adapter from_version={}: '{}' and '{}'".format(
from_version, fn.__name__, DOWNGRADE_ADAPTERS[from_version].__name__
)
)
else:
DOWNGRADE_ADAPTERS[from_version] = fn
return fn
return decorator
def find_dicts_containing_key(json_config, key):
if key in json_config:
yield json_config
for _, v in json_config.items():
if hasattr(v, "__contains__") and hasattr(v, "items"):
yield from find_dicts_containing_key(v, key)
def rename(json_config, old_name, new_name):
for section in find_dicts_containing_key(json_config, old_name):
value = section.pop(old_name)
if new_name:
section[new_name] = value
def is_type_specifier(json_dict):
"""If a config object is a class, it might have a level which is a type specifier,
with one key corresponding to the name of whichever type it is. These types should
not be explicitly named in the path."""
# heuristic: one key, starting with uppercase character
if len(json_dict) != 1:
return False
key = next(iter(json_dict))
return key[0] == key[0].upper()
def find_parameter(config, path_str):
# Recursively find path elements, skipping into type specifiers.
# Return the value and its container so the value can be deleted.
path = path_str.split(".")
value = config
container = None
for segment in path:
while is_type_specifier(value):
container, value = value, next(iter(value.values()))
if segment not in value:
return NOT_THERE
container, value = value, value[segment]
return path[-1], container, value
def _create_path(config, path):
# Recursively find path elements, skipping into type specifiers.
# If any container isn't there, create a new empty object for it.
# This will only be created if the
value = config
for segment in path:
while is_type_specifier(value):
value = next(iter(value.values()))
if segment not in value:
value[segment] = {}
value = value[segment]
while is_type_specifier(value):
value = next(iter(value.values()))
return value
def create_parameter(config, path_str, value):
*path, param = path_str.split(".")
new_container = _create_path(config, path)
new_container[param] = value
def delete_parameter(config, path_str):
param_name, container, _ = find_parameter(config, path_str)
if container:
container.pop(param_name, None)
def rename_parameter(config, old_path, new_path, transform=lambda x: x):
"""A powerful tool for writing config adapters, this allows you to specify
a JSON-style path for an old and new config parameter. For instance
rename_parameter(config, "task.data.epoch_size", "task.trainer.batches_per_epoch")
will look through the config for task.data.epoch_size, including moving through
explicitly specified types. If it's specified, it will delete the value and
set it in task.trainer.num_batches_per_epoch instead, creating trainer as an empty
dictionary if necessary."""
found = find_parameter(config, old_path)
if found is not NOT_THERE:
param_name, container, old_value = found
# Delete old value
container.pop(param_name)
# Update new value
create_parameter(config, new_path, transform(old_value))
return config
@register_adapter(from_version=0)
def v0_to_v1(json_config):
# migrate optimizer and random_seed params
[task] = json_config["task"].values()
if (
"optimizer" not in task
or "Adam" in task["optimizer"]
or "SGD" in task["optimizer"]
or "NAG" in task["optimizer"]
) and ("trainer" not in task or "random_seed" not in task["trainer"]):
return json_config
if "trainer" in task and "random_seed" in task["trainer"]:
json_config["random_seed"] = task["trainer"]["random_seed"]
del task["trainer"]["random_seed"]
if "optimizer" in task and not any(
opt in task["optimizer"] for opt in ["Adam", "SGD", "NAG"]
):
op_type = task["optimizer"].get("type", "adam")
if op_type == "adam":
op_config = {"Adam": {}}
for key in ["lr", "weight_decay"]:
if key in task["optimizer"]:
op_config["Adam"][key] = task["optimizer"][key]
elif op_type == "sgd":
op_config = {"SGD": {}}
for key in ["lr", "momentum"]:
if key in task["optimizer"]:
op_config["SGD"][key] = task["optimizer"][key]
elif op_type == "nag":
op_config = {"NAG": {}}
for key in ["lr", "weight_decay", "momentum"]:
if key in task["optimizer"]:
op_config["NAG"][key] = task["optimizer"][key]
else:
raise ValueError("Migration not supported for your optimizer")
task["optimizer"] = op_config
return json_config
@register_adapter(from_version=1)
def v1_to_v2(json_config):
# migrate optimizer params
[task] = json_config["task"].values()
if (
"scheduler" not in task
or task["scheduler"] is None
or task["scheduler"].get("type") is None
):
return json_config
op_type = task["scheduler"].get("type")
if op_type == "step_lr":
op_config = {"StepLR": {}}
for key in ["step_size", "gamma"]:
if key in task["scheduler"]:
op_config["StepLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "lm_fine_tuning":
op_config = {"LmFineTuning": {}}
for key in [
"cut_frac",
"ratio",
"non_pretrained_param_groups",
"lm_lr_multiplier",
"lm_use_per_layer_lr",
"lm_gradual_unfreezing",
"last_epoch",
]:
if key in task["scheduler"]:
op_config["LmFineTuning"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "reduce_lr_on_plateau":
op_config = {"ReduceLROnPlateau": {}}
for key in [
"lower_is_better",
"factor",
"patience",
"min_lr",
"threshold",
"threshold_is_absolute",
"cooldown",
]:
if key in task["scheduler"]:
op_config["ReduceLROnPlateau"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "cosine_annealing_lr":
op_config = {"CosineAnnealingLR": {}}
for key in ["t_max", "eta_min"]:
if key in task["scheduler"]:
op_config["CosineAnnealingLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "exponential_lr":
op_config = {"ExponentialLR": {}}
for key in ["gamma"]:
if key in task["scheduler"]:
op_config["ExponentialLR"][key] = task["scheduler"][key]
task["scheduler"] = op_config
elif op_type == "none":
del task["scheduler"]
else:
raise ValueError("Migration for your scheduler %s not supported." % op_type)
return json_config
@register_adapter(from_version=2)
def v2_to_v3(json_config):
"""Optimizer and Scheduler configs used to be part of the task config,
they now live in the trainer's config.
"""
[task] = json_config["task"].values()
for section_str in ["optimizer", "scheduler"]:
if section_str in task:
if "trainer" not in task:
task["trainer"] = {}
trainer = task["trainer"]
# a hack to support an older hack:
# some tasks like ensemble have a 'real_trainer' section inside trainer
# that has the actual trainer config
if "real_trainer" in trainer:
real_trainer = trainer["real_trainer"]
real_trainer[section_str] = task[section_str]
else:
trainer[section_str] = task[section_str]
# remove from task config
task.pop(section_str)
return json_config
@register_adapter(from_version=3)
def v3_to_v4(json_config):
"""Key for provding the path for contextual token embedding has changed from
`pretrained_model_embedding` to `contextual_token_embedding. This affects the
`features` section of the config.
"""
[task] = json_config["task"].values()
old_key = "pretrained_model_embedding"
new_key = "contextual_token_embedding"
for section_str in ["features", "labels"]:
if section_str in task:
section = task[section_str]
if section and old_key in section:
section[new_key] = section[old_key]
section.pop(old_key)
return json_config
def deprecate(json_config, t):
for section in find_dicts_containing_key(json_config, t):
section[t + "_Deprecated"] = section.pop(t)
@register_adapter(from_version=4)
def doc_model_deprecated(json_config):
"""Rename DocModel to DocModel_Deprecated."""
deprecate(json_config, "DocModel")
return json_config
@register_adapter(from_version=5)
def old_tasks_deprecated(json_config):
"""
Rename tasks with data_handler config to _Deprecated
"""
deprecate(json_config, "BertClassificationTask")
deprecate(json_config, "BertPairClassificationTask")
deprecate(json_config, "BertPairwiseClassificationTask")
deprecate(json_config, "COLMClassifyTask")
deprecate(json_config, "ContextSCLSTMCompositionalTask")
deprecate(json_config, "DocClassificationTask")
deprecate(json_config, "ElmoDocClassificationTask")
deprecate(json_config, "ElmoFineTunePairwiseClassificationTask")
deprecate(json_config, "EnsembleTask")
deprecate(json_config, "FederatedLearningTaskBase")
deprecate(json_config, "FLDocClassificationTask")
deprecate(json_config, "FLQueryDocumentPairwiseRankingTask")
deprecate(json_config, "KDDocClassificationTask")
deprecate(json_config, "LMTask")
deprecate(json_config, "QueryDocumentPairwiseRankingTask")
deprecate(json_config, "SCLSTMCompositionalTask")
deprecate(json_config, "SCLSTMTask")
deprecate(json_config, "SemanticParsingCppTask")
deprecate(json_config, "SemanticParsingTask")
deprecate(json_config, "Seq2SeqTask")
deprecate(json_config, "Seq2SeqCompositionalMetricReporter")
deprecate(json_config, "Seq2SeqMetricReporter")
deprecate(json_config, "RNNEncoderDecoder")
deprecate(json_config, "SeqNNTask")
deprecate(json_config, "SGNNClassificationTask")
deprecate(json_config, "ShallowClassificationTask")
deprecate(json_config, "ShallowTaggingTask")
deprecate(json_config, "SpanClassificationTask")
deprecate(json_config, "TreeParserTask")
return json_config
@register_adapter(from_version=6)
def v6_to_v7(json_config):
"""
Make `LabelTensorizer` expansible. If the `labels` field should be an instance of
`LabelTensorizer`, convert it to`{LabelTensorizer: labels}`.
"""
[(task_name, task)] = json_config["task"].items()
if task_name in (
"BertPairRegressionTask",
"NewDocumentRegression",
"NewWordTaggingTask",
):
# Task has a label tensorizer different from LabelTensorizer.
return json_config
model = task.get("model")
if not model:
return json_config
model_name = None
if "inputs" in model:
inputs = model["inputs"]
elif len(model) == 1:
[(model_name, model_val)] = model.items()
inputs = model_val.get("inputs")
else:
inputs = None
if not inputs:
return json_config
if model_name in (
"NewBertRegressionModel",
"DocRegressionModel",
"NewWordTaggingModel",
"ELModel",
"EntitySalienceModel",
"MatchaTwoTowerModel",
):
# Model has a label tensorizer different from LabelTensorizer.
return json_config
labels = inputs.get("labels")
if labels is None:
return json_config
inputs["labels"] = {"LabelTensorizer": labels}
return json_config
@register_adapter(from_version=7)
def lm_model_deprecated(json_config):
"""
Rename LM model to _Deprecated (LMTask is already deprecated in v5)
"""
deprecate(json_config, "LMLSTM")
return json_config
@register_adapter(from_version=8)
def new_tasks_rename(json_config):
"""
Rename tasks with new API consistently
"""
# Deprecated
rename(
json_config,
"QueryDocumentPairwiseRankingModel",
"QueryDocumentPairwiseRankingModel_Deprecated",
)
# New
rename(json_config, "NewDocModel", "DocModel")
rename(json_config, "NewDocRegressionModel", "DocRegressionModel")
rename(json_config, "NewDocumentClassification", "DocumentClassificationTask")
rename(json_config, "NewDocumentRegression", "DocumentRegressionTask")
rename(
json_config,
"NewQueryDocumentPairwiseRankingModel",
"QueryDocPairwiseRankingModel",
)
rename(json_config, "NewWordTaggingModel", "WordTaggingModel")
rename(json_config, "NewWordTaggingTask", "WordTaggingTask")
rename(json_config, "PairwiseClassification", "PairwiseClassificationTask")
rename(
json_config, "QueryDocumentPairwiseRanking", "QueryDocumentPairwiseRankingTask"
)
return json_config
@register_adapter(from_version=9)
def move_epoch_size(json_config):
return rename_parameter(
json_config, "task.data.epoch_size", "task.trainer.num_batches_per_epoch"
)
@register_adapter(from_version=10)
def ensemble_task_deprecated(json_config):
"""
Rename tasks with new API consistently
"""
# Deprecated
deprecate(json_config, "BaggingDocEnsemble")
deprecate(json_config, "BaggingIntentSlotEnsemble")
deprecate(json_config, "EnsembleTrainer")
return json_config
@register_adapter(from_version=11)
def rename_bitransformer_inputs(json_config):
"""
In "BiTransformer" model, rename input "characters" -> "bytes" and update subfields.
"""
[task] = json_config["task"].values()
model = task.get("model")
if model and len(model) == 1 and "BiTransformer" in model:
model_val = list(model.values())[0]
if "inputs" not in model_val:
model_val["inputs"] = {}
inputs = model_val["inputs"]
char_config = inputs.pop("characters", {})
if "max_char_length" in char_config:
char_config["max_byte_len"] = char_config.pop("max_char_length")
char_config["offset_for_non_padding"] = 1
model_val["inputs"]["bytes"] = char_config
return json_config
@register_adapter(from_version=12)
def v12_to_v13(json_config):
"""remove_output_encoded_layers(json_config)"""
rename(json_config, "output_encoded_layers", None)
"""
Make 'ClassificationMetricReporter'
expansible.
If the 'metric_reporter' field should be an instance of
'ClassificationMetricReporter',
convert it to '{ClassificationMetricReporter: metric_reporter}'.
"""
[(task_name, task)] = json_config["task"].items()
if task_name not in (
"EnsembleTask",
"DocClassificationTask_Deprecated",
"DocumentClassificationTask",
"PairwiseClassificationTask",
"SeqNNTask",
"ShallowClassificationTask_Deprecated",
"KDDocClassificationTask_Deprecated",
"XLMDocumentClassification",
"XLMPairClassification",
"NewBertClassificationTask",
"NewBertPairClassificationTask",
"LaserClassificationTask",
):
# Task has a metric reporter different from ClassificationMetricReporter
return json_config
metric_reporter = task.get("metric_reporter")
if metric_reporter is None:
return json_config
keys = list(metric_reporter.keys())
if keys == []:
return json_config
set = {"output_path", "model_select_metric", "target_label", "text_column_names"}
if keys[0] in set:
task["metric_reporter"] = {"ClassificationMetricReporter": metric_reporter}
else:
return json_config
return json_config
@register_adapter(from_version=13)
def rename_tensorizer_vocab_params(json_config):
[(task_name, task)] = json_config["task"].items()
# XLM and Bert models use the `vocab_file` field, but in a custom way. This
# field should not be migrated to `vocab.vocab_files` as for TokenTensorizer.
if "XLM" in task_name or "Bert" in task_name:
return json_config
def resolve_model(model_config):
if len(model_config) == 1 and list(model_config)[0][0].isupper():
[(model_name, model_config)] = model_config.items()
if "XLM" in model_name or "Bert" in model_name:
return {}
return model_config
model = resolve_model(task.get("model", {}))
if not model:
return json_config
def update_model_config(model_config):
model_config = resolve_model(model_config)
tokens = model_config.get("inputs", {}).get("tokens")
if not tokens:
return
vocab = {"build_from_data": tokens.pop("build_vocab", True), "vocab_files": []}
if "vocab_file" in tokens:
vocab["vocab_files"].append(
{
"filepath": tokens.pop("vocab_file"),
"size_limit": tokens.pop("vocab_file_size_limit", 0),
}
)
if "models" in model:
# ensemble model
for sub_model in model["models"]:
update_model_config(sub_model)
else:
update_model_config(model)
return json_config
@register_adapter(from_version=14)
def flatten_deprecated_ensemble_config(json_config):
# Deprecated ensemble is removed from codebase, so this is now just a no-op
return json_config
def migrate_to_new_data_handler(task, columns):
create_parameter(task, "data.source", {"TSVDataSource": {}})
rename_parameter(task, "data_handler.eval_path", "data.source.eval_filename")
rename_parameter(task, "data_handler.test_path", "data.source.test_filename")
rename_parameter(task, "data_handler.train_path", "data.source.train_filename")
columns_to_read = next(find_dicts_containing_key(task, "columns_to_read"), None)
if columns_to_read:
rename_parameter(
task, "data_handler.columns_to_read", "data.source.field_names"
)
else:
create_parameter(task, "data.source.field_names", columns)
rename_parameter(
task, "data_handler.append_bos", "model.inputs.tokens.add_bos_token"
)
rename_parameter(
task, "data_handler.append_eos", "model.inputs.tokens.add_eos_token"
)
rename_parameter(
task, "data_handler.max_seq_len", "model.inputs.tokens.max_seq_len"
)
rename_parameter(
task, "features.shared_module_key", "model.embedding.shared_module_key"
)
rename_parameter(task, "features.word_feat.embed_dim", "model.embedding.embed_dim")
rename_parameter(task, "features.dense_feat", "model.inputs.dense")
create_parameter(task, "data.batcher", {"PoolingBatcher": {}})
rename_parameter(
task, "data_handler.eval_batch_size", "data.batcher.eval_batch_size"
)
rename_parameter(
task, "data_handler.test_batch_size", "data.batcher.test_batch_size"
)
rename_parameter(
task, "data_handler.train_batch_size", "data.batcher.train_batch_size"
)
rename_parameter(
task,
"features.word_feat.vocab_size",
"model.inputs.tokens.vocab.size_from_data",
)
rename_parameter(
task,
"features.word_feat.vocab_from_train_data",
"model.inputs.tokens.vocab.build_from_data",
)
rename_parameter(
task,
"features.word_feat.vocab_file",
"model.inputs.tokens.vocab.vocab_files",
lambda x: [{"filepath": x}],
)
rename_parameter(task, "labels.label_weights", "model.output_layer.label_weights")
delete_parameter(task, "data_handler")
delete_parameter(task, "exporter")
delete_parameter(task, "features")
delete_parameter(task, "featurizer")
delete_parameter(task, "labels")
@register_adapter(from_version=15)
def remove_lmtask_deprecated(json_config):
for section in find_dicts_containing_key(json_config, "LMTask_Deprecated"):
task = section.pop("LMTask_Deprecated")
migrate_to_new_data_handler(task, ["text"])
section["LMTask"] = task
return json_config
@register_adapter(from_version=16)
def remove_docclassificationtask_deprecated(json_config):
for section in find_dicts_containing_key(
json_config, "DocClassificationTask_Deprecated"
):
task = section.pop("DocClassificationTask_Deprecated")
convert = next(find_dicts_containing_key(task, "convert_to_bytes"), None)
section["DocumentClassificationTask"] = task
migrate_to_new_data_handler(task, ["doc_label", "text"])
create_parameter(task, "model.inputs.labels.column", "doc_label")
# In DocumentClassificationTask.Config:
# model: BaseModel.Config = DocModel.Config()
# It will create a BaseModel if model class is implicit in json.
# We make it explicit to avoid errors.
for model in find_dicts_containing_key(section, "model"):
if next(iter(model["model"]))[0].islower():
model["model"] = {"DocModel": model.pop("model")}
if convert and convert["convert_to_bytes"]:
rename(section, "DocModel", "ByteTokensDocumentModel")
return json_config
@register_adapter(from_version=17)
def rename_fl_task(json_config):
# remove 'NewDoc' from FL task names
for trainer_suffix in ["SyncTrainer", "AsyncTrainer"]:
old_trainer_name = f"FLNewDoc{trainer_suffix}"
new_trainer_name = f"FL{trainer_suffix}"
for section in find_dicts_containing_key(json_config, old_trainer_name):
section[new_trainer_name] = section.pop(old_trainer_name)
return json_config
@register_adapter(from_version=18)
def upgrade_if_xlm(json_config):
"""
Make `XLMModel` Union changes for encoder and tokens config.
Since they are now unions, insert the old class into the config if
no class name is mentioned.
"""
_, _, model = find_parameter(json_config, "task.model")
if model and "XLMModel" in model:
_, inputs, tokens = find_parameter(json_config, "task.model.inputs.tokens")
if tokens and "XLMTensorizer" not in tokens:
inputs["tokens"] = {}
inputs["tokens"]["XLMTensorizer"] = tokens
return json_config
@register_adapter(from_version=19)
def fix_fl_local_optimizer_and_trainer(json_config):
"""a) Change FL local optimizer from optimizer:{SGD:{lr=0.1, momentum=0.2}}
to optimizer:{lr=0.1, momentum=0.2}
b) Replace trainer:{FLSyncTrainer:{foo}} by
trainer:{fl_trainer:{foo, type:SyncTrainer}}
Same for FLAsyncTrainer
"""
# only for tasks that contain FLSyncTrainer or FLAsyncTrainer
_, container, trainer = find_parameter(json_config, "task.trainer")
if not trainer:
return json_config
if "FLSyncTrainer" in trainer:
fl_trainer_name, fl_trainer_type = "FLSyncTrainer", "SyncTrainer"
elif "FLAsyncTrainer" in trainer:
fl_trainer_name, fl_trainer_type = "FLAsyncTrainer", "AsyncTrainer"
else:
return json_config
trainer_section = trainer.pop(fl_trainer_name)
# first, replace optimizer:{SGD:{lr=0.1, momentum=0.2}} by
# optimizer:{lr=0.1, momentum=0.2}
if "optimizer" in trainer_section:
optimizer = trainer_section.pop("optimizer")
sgd_config = optimizer.pop("SGD")
trainer_section["optimizer"] = sgd_config
# rename "global_optimizer" to "aggregator"
if "global_optimizer" in trainer_section:
aggregator = trainer_section.pop("global_optimizer")
trainer_section["aggregator"] = aggregator
trainer_section["type"] = fl_trainer_type
trainer["fl_trainer"] = trainer_section
return json_config
@register_adapter(from_version=20)
def upgrade_padding(json_config):
"""
Upgrade config option padding_control to seq_padding_control.
"""
json_config["seq_padding_control"] = json_config.pop("padding_control", None)
return json_config
@register_adapter(from_version=21)
def upgrade_export_config(json_config):
"""
Upgrade model export related config fields to the new "export" section.
"""
export_config_fields = [
"export_caffe2_path",
"export_onnx_path",
"export_torchscript_path",
"torchscript_quantize",
"accelerate",
"inference_interface",
"seq_padding_control",
"batch_padding_control",
"target",
]
export_config = {}
for f in export_config_fields:
if f in json_config:
export_config[f] = json_config.pop(f, None)
json_config["export"] = export_config
return json_config
@register_adapter(from_version=22)
def v22_to_v23(json_config):
"""
Upgrade by adding read_chunk_size option
"""
if "read_chunk_size" not in json_config:
json_config["read_chunk_size"] = PyTextConfig.read_chunk_size
return json_config
@register_adapter(from_version=23)
def v23_to_v24(json_config):
"""
No-op since export_list is optional
"""
return json_config
@register_adapter(from_version=24)
def v24_to_v25(json_config):
"""
Upgrade by adding max_input_text_length option and default to None
"""
for v in get_json_config_iterator(json_config, "SentencePieceTokenizer"):
if "max_input_text_length" not in v:
v["max_input_text_length"] = None
return json_config
@register_down_grade_adapter(from_version=23)
def v23_to_v22(json_config):
"""
Upgrade by removing read_chunk_size option
"""
if "read_chunk_size" in json_config:
del json_config["read_chunk_size"]
return json_config
@register_down_grade_adapter(from_version=24)
def v24_to_v23(json_config):
"""
Downgrade by removing export_list option
"""
if "export_list" in json_config:
if len(json_config["export_list"]) > 1:
raise Exception(
"Current version does not support multiple exports in export_list"
)
elif len(json_config["export_list"]) == 0:
raise Exception("Current version does not support empty export_list")
json_config["export"] = json_config["export_list"][0]
del json_config["export_list"]
return json_config
@register_down_grade_adapter(from_version=25)
def v25_to_v24(json_config):
"""
Downgrade by removing max_input_text_length option for SentencePieceTokenizer
"""
for v in get_json_config_iterator(json_config, "SentencePieceTokenizer"):
if "max_input_text_length" in v:
del v["max_input_text_length"]
return json_config
def get_json_config_iterator(json_config, lookup_key):
for key, value in json_config.items():
if key == lookup_key:
yield value
elif isinstance(value, dict):
for v in get_json_config_iterator(value, lookup_key):
yield v
@register_down_grade_adapter(from_version=26)
def v26_to_v25(json_config):
"""
Downgrade by removing target option from all
exports in export_list
"""
if "export" in json_config:
if "target" in json_config["export"]:
json_config["export"].pop("target")
if "export_list" in json_config:
export_list = json_config["export_list"]
for export_cfg in export_list:
if "target" in export_cfg:
export_cfg.pop("target")
json_config["export_list"] = export_list
return json_config
@register_adapter(from_version=25)
def v25_to_v26(json_config):
if "export" in json_config:
export_cfg = json_config["export"]
export_cfg["target"] = get_name_from_options(export_cfg)
if "inference_interface" in export_cfg:
export_cfg.pop("inference_interface")
json_config["export"] = export_cfg
if "export_list" in json_config:
export_list = json_config["export_list"]
for export_cfg in export_list:
export_cfg["target"] = get_name_from_options(export_cfg)
if "inference_interface" in export_cfg:
export_cfg.pop("inference_interface")
json_config["export_list"] = export_list
return json_config
def get_name_from_options(export_config):
"""
Reverse engineer which model is which based on recognized
export configurations. If the export configurations don't adhere
to the set of recognized backends, then set target name to unknown
"""
if "accelerate" in export_config and len(export_config["accelerate"]) != 0:
if export_config["accelerate"][0] == "cuda:half":
tgt = "gpu-fp16"
elif (
export_config["accelerate"][0] == "nnpi"
and "seq_padding_control" in export_config
and "batch_padding_control" in export_config
):
tgt = "nnpi"
else:
pass
elif "seq_padding_control" and "batch_padding_control" in export_config:
tgt = "nnpi"
else:
tgt = "unknown"
return tgt
def upgrade_one_version(json_config):
current_version = json_config.get("version", 0)
adapter = ADAPTERS.get(current_version)
if not adapter:
raise Exception(f"no adapter found for version {current_version}")
json_config = adapter(json_config)
eprint(
f"WARNING - Applying old config adapter for version={current_version}. "
"Please consider migrating your old configs to the latest version."
)
json_config["version"] = current_version + 1
return json_config
def downgrade_one_version(json_config):
current_version = json_config.get("version", 0)
downgrade_adapter = DOWNGRADE_ADAPTERS.get(current_version)
if not downgrade_adapter:
raise Exception(f"no downgrade adapter found for version {current_version}")
json_config = downgrade_adapter(json_config)
eprint(
f"WARNING - Downgrading your current config version={current_version}. "
"Please wait for next pytext pkg release to let new config take effect."
)
json_config["version"] = current_version - 1
return json_config
def upgrade_to_latest(json_config):
current_version = json_config.get("version") or 0
if current_version > LATEST_VERSION:
raise Exception(
f"config version {json_config['version']} shouldn't exceed lastest \
version {LATEST_VERSION}"
)
while current_version != LATEST_VERSION:
print(f"Current Version: {current_version}")
json_config = upgrade_one_version(json_config)
current_version = json_config["version"]
return json_config
def update_to_version(json_config, expected_version=LATEST_VERSION):
current_version = json_config.get("version") or 0
if current_version > expected_version:
while current_version != expected_version:
print(f"Current Version: {current_version}")
json_config = downgrade_one_version(json_config)
current_version = json_config["version"]
while current_version != expected_version:
print(f"Current Version: {current_version}")
json_config = upgrade_one_version(json_config)
current_version = json_config["version"]
return json_config
|
the-stack_106_15551
|
"""
Layout dimensions are used to give the minimum, maximum and preferred
dimensions for containers and controls.
"""
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union
__all__ = [
"Dimension",
"D",
"sum_layout_dimensions",
"max_layout_dimensions",
"AnyDimension",
"to_dimension",
"is_dimension",
]
if TYPE_CHECKING:
from typing_extensions import TypeGuard
class Dimension:
"""
Specified dimension (width/height) of a user control or window.
The layout engine tries to honor the preferred size. If that is not
possible, because the terminal is larger or smaller, it tries to keep in
between min and max.
:param min: Minimum size.
:param max: Maximum size.
:param weight: For a VSplit/HSplit, the actual size will be determined
by taking the proportion of weights from all the children.
E.g. When there are two children, one with a weight of 1,
and the other with a weight of 2, the second will always be
twice as big as the first, if the min/max values allow it.
:param preferred: Preferred size.
"""
def __init__(
self,
min: Optional[int] = None,
max: Optional[int] = None,
weight: Optional[int] = None,
preferred: Optional[int] = None,
) -> None:
if weight is not None:
assert weight >= 0 # Also cannot be a float.
assert min is None or min >= 0
assert max is None or max >= 0
assert preferred is None or preferred >= 0
self.min_specified = min is not None
self.max_specified = max is not None
self.preferred_specified = preferred is not None
self.weight_specified = weight is not None
if min is None:
min = 0 # Smallest possible value.
if max is None: # 0-values are allowed, so use "is None"
max = 1000**10 # Something huge.
if preferred is None:
preferred = min
if weight is None:
weight = 1
self.min = min
self.max = max
self.preferred = preferred
self.weight = weight
# Don't allow situations where max < min. (This would be a bug.)
if max < min:
raise ValueError("Invalid Dimension: max < min.")
# Make sure that the 'preferred' size is always in the min..max range.
if self.preferred < self.min:
self.preferred = self.min
if self.preferred > self.max:
self.preferred = self.max
@classmethod
def exact(cls, amount: int) -> "Dimension":
"""
Return a :class:`.Dimension` with an exact size. (min, max and
preferred set to ``amount``).
"""
return cls(min=amount, max=amount, preferred=amount)
@classmethod
def zero(cls) -> "Dimension":
"""
Create a dimension that represents a zero size. (Used for 'invisible'
controls.)
"""
return cls.exact(amount=0)
def is_zero(self) -> bool:
"True if this `Dimension` represents a zero size."
return self.preferred == 0 or self.max == 0
def __repr__(self) -> str:
fields = []
if self.min_specified:
fields.append("min=%r" % self.min)
if self.max_specified:
fields.append("max=%r" % self.max)
if self.preferred_specified:
fields.append("preferred=%r" % self.preferred)
if self.weight_specified:
fields.append("weight=%r" % self.weight)
return "Dimension(%s)" % ", ".join(fields)
def sum_layout_dimensions(dimensions: List[Dimension]) -> Dimension:
"""
Sum a list of :class:`.Dimension` instances.
"""
min = sum(d.min for d in dimensions)
max = sum(d.max for d in dimensions)
preferred = sum(d.preferred for d in dimensions)
return Dimension(min=min, max=max, preferred=preferred)
def max_layout_dimensions(dimensions: List[Dimension]) -> Dimension:
"""
Take the maximum of a list of :class:`.Dimension` instances.
Used when we have a HSplit/VSplit, and we want to get the best width/height.)
"""
if not len(dimensions):
return Dimension.zero()
# If all dimensions are size zero. Return zero.
# (This is important for HSplit/VSplit, to report the right values to their
# parent when all children are invisible.)
if all(d.is_zero() for d in dimensions):
return dimensions[0]
# Ignore empty dimensions. (They should not reduce the size of others.)
dimensions = [d for d in dimensions if not d.is_zero()]
if dimensions:
# Take the highest minimum dimension.
min_ = max(d.min for d in dimensions)
# For the maximum, we would prefer not to go larger than then smallest
# 'max' value, unless other dimensions have a bigger preferred value.
# This seems to work best:
# - We don't want that a widget with a small height in a VSplit would
# shrink other widgets in the split.
# If it doesn't work well enough, then it's up to the UI designer to
# explicitly pass dimensions.
max_ = min(d.max for d in dimensions)
max_ = max(max_, max(d.preferred for d in dimensions))
# Make sure that min>=max. In some scenarios, when certain min..max
# ranges don't have any overlap, we can end up in such an impossible
# situation. In that case, give priority to the max value.
# E.g. taking (1..5) and (8..9) would return (8..5). Instead take (8..8).
if min_ > max_:
max_ = min_
preferred = max(d.preferred for d in dimensions)
return Dimension(min=min_, max=max_, preferred=preferred)
else:
return Dimension()
# Anything that can be converted to a dimension.
AnyDimension = Union[
None, # None is a valid dimension that will fit anything.
int,
Dimension,
# Callable[[], 'AnyDimension'] # Recursive definition not supported by mypy.
Callable[[], Any],
]
def to_dimension(value: AnyDimension) -> Dimension:
"""
Turn the given object into a `Dimension` object.
"""
if value is None:
return Dimension()
if isinstance(value, int):
return Dimension.exact(value)
if isinstance(value, Dimension):
return value
if callable(value):
return to_dimension(value())
raise ValueError("Not an integer or Dimension object.")
def is_dimension(value: object) -> "TypeGuard[AnyDimension]":
"""
Test whether the given value could be a valid dimension.
(For usage in an assertion. It's not guaranteed in case of a callable.)
"""
if value is None:
return True
if callable(value):
return True # Assume it's a callable that doesn't take arguments.
if isinstance(value, (int, Dimension)):
return True
return False
# Common alias.
D = Dimension
# For backward-compatibility.
LayoutDimension = Dimension
|
the-stack_106_15556
|
'''
Function:
多肉数据爬虫
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import os
import time
import random
import pickle
import requests
from lxml import etree
'''多肉数据爬虫'''
class SucculentCrawler():
def __init__(self, **kwargs):
self.referer_list = ["http://www.google.com/", "http://www.bing.com/", "http://www.baidu.com/", "https://www.360.cn/"]
self.ua_list = ['Mozilla/5.0 (Linux; Android 5.1.1; Z828 Build/LMY47V) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.111 Mobile Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.172 Safari/537.22',
'Mozilla/5.0 (iPad; CPU OS 8_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/12F69 Safari/600.1.4',
'Mozilla/5.0 (iPad; CPU OS 11_2_5 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) CriOS/64.0.3282.112 Mobile/15D60 Safari/604.1',
'Mozilla/5.0 (Linux; Android 7.1.1; SM-T350 Build/NMF26X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.111 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36',
'Mozilla/5.0 (Linux; Android 6.0.1; SM-G610F Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Mobile Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Mozilla/5.0 (Linux; Android 5.1.1; 5065N Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/46.0.2490.76 Mobile Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36']
self.page_urls = self.__getAllPageUrls()
self.page_pointer = -1
self.savedir = 'resources/succulents'
'''爬取下一页数据'''
def next(self):
# 获取链接
self.page_pointer += 1
if self.page_pointer >= len(self.page_urls):
return True
page_url = self.page_urls[self.page_pointer]
# 提取该页中多肉的图片+详情页链接
res = requests.get(page_url, headers=self.__randomHeaders())
res.encoding = 'gbk'
html = etree.HTML(res.text)
html = html.xpath('//span[@class="tImgUlImg"]')
succulent_list = []
for item in html:
succulent_list.append([item.xpath('a/@title')[0].replace('/', '-').replace('\\', '-'), item.xpath('a/img/@src')[0], item.xpath('a/@href')[0]])
# 爬取详情页数据
for item in succulent_list:
data = [item[0], item[1]]
headers = self.__randomHeaders()
headers.update({'Referer': page_url})
res = requests.get(item[-1], headers=headers)
res.encoding = 'gbk'
html_root = etree.HTML(res.text).xpath('//div[@class="cbRight"]/div[@class="mainBox"]')[0]
html = html_root.xpath('div[2]/table[@class="tTable"]/tr')[1:]
intro = ['繁殖: ', '易活度: ', '季节: ', '温度: ', '日照: ', '浇水量: ',
'日照说明: ', '浇水说明: ', '大类/属: ', '中文种名: ', '英文学名: ']
for idx, tr in enumerate(html):
if idx == 0:
intro[0] = intro[0] + tr.xpath('./td[2]/text()')[0] if tr.xpath('./td[2]/text()') else intro[0] + '未知'
intro[1] = intro[1] + int(tr.xpath('./td[4]/img/@src')[0].split('/')[-1].split('.')[0][1:]) * '⭐'
elif idx == 1:
intro[2] = intro[2] + tr.xpath('./td[2]/text()')[0] if tr.xpath('./td[2]/text()') else intro[2] + '未知'
intro[3] = intro[3] + tr.xpath('./td[4]/text()')[0].strip().replace(' ', '') if tr.xpath('./td[4]/text()') else intro[3]
elif idx == 2:
intro[4] = intro[4] + int(tr.xpath('./td[2]/img/@src')[0].split('/')[-1].split('.')[0]) * '☀'
intro[5] = intro[5] + int(tr.xpath('./td[4]/img/@src')[0].split('/')[-1].split('.')[0][1:]) * '💧'
html = html_root.xpath('div[2]/div')[0].xpath('//div[@class="pt5"]')
for idx, item in enumerate(html):
if idx == 0:
intro[6] = intro[6] + item.xpath('./span/text()')[0]
elif idx == 1:
intro[7] = intro[7] + item.xpath('./span/text()')[0]
elif idx == 3:
intro[8] = intro[8] + item.xpath('text()')[0] if item.xpath('text()') else intro[8] + '未知'
elif idx == 4:
intro[9] = intro[9] + item.xpath('text()')[0] if item.xpath('text()') else intro[9] + '未知'
elif idx == 5:
intro[10] = intro[10] + item.xpath('text()')[0] if item.xpath('text()') else intro[10] + '未知'
data.append(intro)
self.__saveItem(data)
time.sleep(random.random())
return False
'''数据保存'''
def __saveItem(self, data):
if not os.path.exists(self.savedir):
os.mkdir(self.savedir)
savepath = os.path.join(self.savedir, data[0])
if not os.path.exists(savepath):
os.mkdir(savepath)
f = open(os.path.join(savepath, 'show.jpg'), 'wb')
f.write(requests.get(data[1], headers=self.__randomHeaders()).content)
f.close()
f = open(os.path.join(savepath, 'info.pkl'), 'wb')
pickle.dump(data, f)
f.close()
'''获得所有链接'''
def __getAllPageUrls(self):
res = requests.get('http://www.mengsang.com/duorou/list_1_1.html', headers=self.__randomHeaders())
res.encoding = 'gbk'
html = etree.HTML(res.text)
num_pages = html.xpath('//span[@class="pageinfo"]/strong')[0].text
page_urls = ['http://www.mengsang.com/duorou/list_1_%s.html' % i for i in range(1, int(num_pages)+1)]
return page_urls
'''随机请求头'''
def __randomHeaders(self):
return {'user-agent': random.choice(self.ua_list), 'referer': random.choice(self.referer_list)}
|
the-stack_106_15558
|
# python 3.6
"""Inverts given images to latent codes with In-Domain GAN Inversion.
Basically, for a particular image (real or synthesized), this script first
employs the domain-guided encoder to produce a initial point in the latent
space and then performs domain-regularized optimization to refine the latent
code.
"""
import os
import argparse
import pickle
from training.misc import progress as tqdm
import numpy as np
import tensorflow as tf
from dnnlib import tflib
from perceptual_model import PerceptualModel
from training import misc
from utils.logger import setup_logger
from utils.visualizer import adjust_pixel_range
from utils.visualizer import HtmlPageVisualizer
from utils.visualizer import save_image, load_image, resize_image
def parse_args():
"""Parses arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str,
help='Path to the pre-trained model.')
parser.add_argument('src_dir', type=str,
help='Path to the classifier model.')
parser.add_argument('dst_dir', type=str,
help='Image directory, which includes original images, '
'inverted codes, and image list.')
parser.add_argument('-o', '--output_dir', type=str, default='',
help='Directory to save the results. If not specified, '
'`./results/inversion/${IMAGE_LIST}` '
'will be used by default.')
parser.add_argument('--batch_size', type=int, default=1,
help='Batch size. (default: 1)')
parser.add_argument('--viz_size', type=int, default=256,
help='Image size for visualization. (default: 256)')
parser.add_argument('--gpu_id', type=str, default='0',
help='Which GPU(s) to use. (default: `0`)')
parser.add_argument('--src_start', type=int, default=0,
help='decide start from which images')
parser.add_argument('--src_end', type=int, default=1,
help='decide end with which images')
parser.add_argument('--dst_start', type=int, default=0,
help='decide start from which images')
parser.add_argument('--dst_end', type=int, default=1,
help='decide end with which images')
parser.add_argument('--num_iterations', type=int, default=50,
help='Number of optimization iterations. (default: 50)')
parser.add_argument('--loss_weight_feat', type=float, default=1e-3,
help='The perceptual loss scale for optimization. '
'(default: 1e-3)')
parser.add_argument('--loss_weight_pixel', type=float, default=20,
help='The pixel loss scale for optimization. '
'(default: 20)')
parser.add_argument('--d_scale', type=float, default=1,
help='The discriminator loss scale for optimization. '
'(default: 1)')
parser.add_argument('--latent_scale', type=float, default=1,
help='The latent loss scale for optimization. '
'(default: 1)')
parser.add_argument('--learning_rate', type=float, default=0.01,
help='Learning rate for optimization. (default: 0.01)')
parser.add_argument('--num_images', type=int, default=10)
parser.add_argument('--model_name', type=str, default='ffhq')
return parser.parse_args()
def main():
"""Main function."""
args = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
src_dir = args.src_dir
src_dir_name = os.path.basename(src_dir.rstrip('/'))
assert os.path.exists(src_dir)
assert os.path.exists(f'{src_dir}/image_list.txt')
assert os.path.exists(f'{src_dir}/inverted_codes.npy')
dst_dir = args.dst_dir
dst_dir_name = os.path.basename(dst_dir.rstrip('/'))
assert os.path.exists(dst_dir)
assert os.path.exists(f'{dst_dir}/image_list.txt')
assert os.path.exists(f'{dst_dir}/inverted_codes.npy')
output_dir = args.output_dir or 'results/interpolation'
job_name = f'{src_dir_name}_TO_{dst_dir_name}'
logger = setup_logger(output_dir, f'{job_name}.log', f'{job_name}_logger')
logger.info(f'Loading model.')
tflib.init_tf({'rnd.np_random_seed': 1000})
assert os.path.exists(args.model_path)
E, _, D, Gs = misc.load_pkl(args.model_path)
# Get input size.
image_size = E.input_shape[2]
assert image_size == E.input_shape[3]
input_shape = E.input_shape
perceptual_model = PerceptualModel([image_size, image_size], False)
num_layers, z_dim = Gs.components.synthesis.input_shape[1:3]
# Build graph.
logger.info(f'Building graph.')
sess = tf.get_default_session()
x = tf.placeholder(tf.float32, shape=input_shape, name='real_image')
latent_src = tf.placeholder(tf.float32, shape=[args.batch_size, num_layers, z_dim], name='latent_src')
latent_dst = tf.placeholder(tf.float32, shape=[args.batch_size, num_layers, z_dim], name='latent_dst')
wp = tf.get_variable(shape=[args.batch_size, num_layers, z_dim], name='latent_code')
x_rec = Gs.components.synthesis.get_output_for(wp, randomize_noise=False)
setter = tf.assign(wp, latent_src)
x_255 = (tf.transpose(x, [0, 2, 3, 1]) + 1) / 2 * 255
x_rec_255 = (tf.transpose(x_rec, [0, 2, 3, 1]) + 1) / 2 * 255
x_feat = perceptual_model(x_255)
x_rec_feat = perceptual_model(x_rec_255)
loss_feat = tf.reduce_mean(tf.square(x_feat - x_rec_feat), axis=1)
loss_feat = args.loss_weight_feat * loss_feat
loss_pixel = tf.reduce_mean(tf.square(x_rec - x), axis=[1, 2, 3])
loss_pixel = args.loss_weight_pixel * loss_pixel
adv_score = D.get_output_for(x_rec, None)
loss_adv = tf.reduce_mean(tf.nn.softplus(-adv_score), axis=1)
loss_adv = args.d_scale * loss_adv
w_loss = args.latent_scale * tf.reduce_mean(tf.square(wp - latent_dst))
loss = loss_feat + loss_pixel + loss_adv + w_loss
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
train_op = optimizer.minimize(loss, var_list=[wp])
tflib.init_uninitialized_vars()
# Load image, codes, and boundary.
logger.info(f'Loading images and corresponding inverted latent codes.')
src_images_name = []
src_images_orin = []
src_images_invi = []
with open(f'{src_dir}/image_list.txt', 'r') as f:
for line in f:
name = os.path.splitext(os.path.basename(line.strip()))[0]
assert os.path.exists(f'{src_dir}/{name}_ori.png')
assert os.path.exists(f'{src_dir}/{name}_inv.png')
src_images_name.append(name)
image = load_image(f'{src_dir}/{name}_ori.png')
src_images_orin.append(np.transpose(image, [2, 0, 1]))
image = load_image(f'{src_dir}/{name}_inv.png')
src_images_invi.append(image)
src_images_orin = np.asarray(src_images_orin)
src_images_invi = np.asarray(src_images_invi)
src_latent_codes = np.load(f'{src_dir}/inverted_codes.npy')
assert src_latent_codes.shape[0] == src_images_orin.shape[0] == src_images_invi.shape[0]
src_images_orin = src_images_orin.astype(np.float32) / 255 * 2.0 - 1.0
src_images_orin = src_images_orin[args.src_start: args.src_end]
src_images_invi = src_images_invi[args.src_start: args.src_end]
src_latent_codes = src_latent_codes[args.src_start: args.src_end]
num_src = args.src_end - args.src_start
dst_images_name = []
dst_images_orin = []
dst_images_invi = []
with open(f'{dst_dir}/image_list.txt', 'r') as f:
for line in f:
name = os.path.splitext(os.path.basename(line.strip()))[0]
assert os.path.exists(f'{dst_dir}/{name}_ori.png')
assert os.path.exists(f'{dst_dir}/{name}_inv.png')
dst_images_name.append(name)
image = load_image(f'{dst_dir}/{name}_ori.png')
dst_images_orin.append(np.transpose(image, [2, 0, 1]))
image = load_image(f'{dst_dir}/{name}_inv.png')
dst_images_invi.append(image)
dst_images_orin = np.asarray(dst_images_orin)
dst_images_invi = np.asarray(dst_images_invi)
dst_latent_codes = np.load(f'{dst_dir}/inverted_codes.npy')
assert dst_latent_codes.shape[0] == dst_images_orin.shape[0] == dst_images_invi.shape[0]
dst_images_orin = dst_images_orin.astype(np.float32) / 255 * 2.0 - 1.0
dst_images_orin = dst_images_orin[args.dst_start: args.dst_end]
dst_images_invi = dst_images_invi[args.dst_start: args.dst_end]
dst_latent_codes = dst_latent_codes[args.dst_start: args.dst_end]
num_dst = args.dst_end - args.dst_start
save_interval = args.num_iterations // args.num_images
headers = ['Name', 'Original Image', 'Inversion Image']
for step in range(1, args.num_iterations + 1):
if step == args.num_iterations or step % save_interval == 0:
headers.append(f'Step {step:04d}')
headers.append('Target Image')
viz_size = None if args.viz_size == 0 else args.viz_size
visualizer = HtmlPageVisualizer(
num_rows=src_images_orin.shape[0] * dst_images_orin.shape[0],
num_cols=len(headers), viz_size=viz_size)
visualizer.set_headers(headers)
for src_ind in range(num_src):
img_src = src_images_orin[src_ind:src_ind+1]
img_src = adjust_pixel_range(img_src)
latent_code_src = src_latent_codes[src_ind:src_ind + 1]
for dst_ind in range(num_dst):
latent_code_dst = dst_latent_codes[dst_ind:dst_ind + 1]
sess.run(setter, {latent_src: latent_code_src})
dst_imgs = dst_images_orin[dst_ind:dst_ind + 1]
visualizer.set_cell(src_ind*num_dst+dst_ind, 0, text=src_images_name[src_ind])
visualizer.set_cell(src_ind*num_dst+dst_ind, 1, image=img_src[0])
visualizer.set_cell(src_ind*num_dst+dst_ind, 2, image=src_images_invi[src_ind])
col_idx = 3
for it in range(1, args.num_iterations+1):
output_node = [train_op, loss, loss_feat, loss_pixel, loss_adv, w_loss]
feed_dict = {x: dst_imgs, latent_dst: latent_code_dst}
_, loss_, feat_loss_, loss_pixel_, loss_adv_, w_loss_ = sess.run(output_node, feed_dict)
if it % save_interval == 0:
x_rec_ = sess.run(x_rec)
imgs_ = adjust_pixel_range(x_rec_)
visualizer.set_cell(src_ind*num_dst+dst_ind, col_idx, image=imgs_[0])
col_idx += 1
print(f'Iter: {it:04d} loss: {np.mean(loss_):6.4f} feat_loss: {np.mean(feat_loss_):6.4f}'
f' pixel_loss: {np.mean(loss_pixel_):6.4f} adv: {np.mean(loss_adv_):6.4f} '
f'w_loss: {np.mean(w_loss_):6.4}')
visualizer.set_cell(src_ind * num_dst + dst_ind, col_idx, image=dst_images_invi[dst_ind])
visualizer.save(f'{output_dir}/{job_name}.html')
if __name__ == '__main__':
main()
|
the-stack_106_15560
|
import numpy as np
def train_test_split(
X: np.ndarray,
y: np.ndarray,
train_size: float = 0.8
):
"""Split dataset into training and test.
:param X: training data.
:param y: labels.
:param train_size: Float, should be between 0.0 and 1.0 and
represent the proportion of the dataset to include in the
train split. The rest proportion will be considered as test.
"""
indices = np.random.permutation(X.shape[0])
num_train_samples = int(train_size * len(indices))
indices_train = indices[: num_train_samples]
indices_test = indices[num_train_samples:]
return (X[indices_train, :], y[indices_train, :]), (
X[indices_test, :], y[indices_test, :])
|
the-stack_106_15563
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import torch.utils.data as data
import examples.torch.semantic_segmentation.utils.data as data_utils
class Cityscapes(data.Dataset):
"""Cityscapes dataset https://www.cityscapes-dataset.com/.
Keyword arguments:
- root_dir (``string``): Root directory path.
- mode (``string``): The type of dataset: 'train' for training set, 'val'
for validation set, and 'test' for test set.
- transform (``callable``, optional): A function/transform that takes in
an PIL image and returns a transformed version. Default: None.
- label_transform (``callable``, optional): A function/transform that takes
in the target and transforms it. Default: None.
- loader (``callable``, optional): A function to load an image given its
path. By default ``default_loader`` is used.
"""
# Training dataset root folders
train_folder = "leftImg8bit/train"
train_lbl_folder = "gtFine/train"
# Validation dataset root folders
val_folder = "leftImg8bit/val"
val_lbl_folder = "gtFine/val"
# Test dataset root folders
test_folder = "leftImg8bit/val"
test_lbl_folder = "gtFine/val"
# Filters to find the images
img_extension = '.png'
lbl_name_filter = 'labelIds'
# The values associated with the 35 classes
full_classes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, -1)
# The values above are remapped to the following
new_classes = (0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 5, 0, 0, 0, 6, 0, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 17, 18, 19, 0)
# Default encoding for pixel value, class name, and class color
color_encoding = OrderedDict([
('unlabeled', (0, 0, 0)),
('road', (128, 64, 128)),
('sidewalk', (244, 35, 232)),
('building', (70, 70, 70)),
('wall', (102, 102, 156)),
('fence', (190, 153, 153)),
('pole', (153, 153, 153)),
('traffic_light', (250, 170, 30)),
('traffic_sign', (220, 220, 0)),
('vegetation', (107, 142, 35)),
('terrain', (152, 251, 152)),
('sky', (70, 130, 180)),
('person', (220, 20, 60)),
('rider', (255, 0, 0)),
('car', (0, 0, 142)),
('truck', (0, 0, 70)),
('bus', (0, 60, 100)),
('train', (0, 80, 100)),
('motorcycle', (0, 0, 230)),
('bicycle', (119, 11, 32))
])
def __init__(self,
root,
image_set='train',
transforms=None,
loader=data_utils.pil_loader):
super().__init__()
self.root_dir = root
self.mode = image_set
self.transforms = transforms
self.loader = loader
if self.mode.lower() == 'train':
# Get the training data and labels filepaths
self.train_data = data_utils.get_files(
os.path.join(self.root_dir, self.train_folder),
extension_filter=self.img_extension)
self.train_labels = data_utils.get_files(
os.path.join(self.root_dir, self.train_lbl_folder),
name_filter=self.lbl_name_filter,
extension_filter=self.img_extension)
elif self.mode.lower() == 'val':
# Get the validation data and labels filepaths
self.val_data = data_utils.get_files(
os.path.join(self.root_dir, self.val_folder),
extension_filter=self.img_extension)
self.val_labels = data_utils.get_files(
os.path.join(self.root_dir, self.val_lbl_folder),
name_filter=self.lbl_name_filter,
extension_filter=self.img_extension)
elif self.mode.lower() == 'test':
# Get the test data and labels filepaths
self.test_data = data_utils.get_files(
os.path.join(self.root_dir, self.test_folder),
extension_filter=self.img_extension)
self.test_labels = data_utils.get_files(
os.path.join(self.root_dir, self.test_lbl_folder),
name_filter=self.lbl_name_filter,
extension_filter=self.img_extension)
else:
raise RuntimeError("Unexpected dataset mode. "
"Supported modes are: train, val and test")
def __getitem__(self, index):
"""
Args:
- index (``int``): index of the item in the dataset
Returns:
A tuple of ``PIL.Image`` (image, label) where label is the ground-truth
of the image.
"""
if self.mode.lower() == 'train':
data_path, label_path = self.train_data[index], self.train_labels[
index]
elif self.mode.lower() == 'val':
data_path, label_path = self.val_data[index], self.val_labels[
index]
elif self.mode.lower() == 'test':
data_path, label_path = self.test_data[index], self.test_labels[
index]
else:
raise RuntimeError("Unexpected dataset mode. "
"Supported modes are: train, val and test")
img, label = self.loader(data_path, label_path)
# Remap class labels
label = data_utils.remap(label, self.full_classes, self.new_classes)
if self.transforms is not None:
img, label = self.transforms(img, label)
return img, label
def __len__(self):
"""Returns the length of the dataset."""
if self.mode.lower() == 'train':
return len(self.train_data)
if self.mode.lower() == 'val':
return len(self.val_data)
if self.mode.lower() == 'test':
return len(self.test_data)
raise RuntimeError("Unexpected dataset mode. "
"Supported modes are: train, val and test")
|
the-stack_106_15564
|
from cosivina.base import *
from cosivina.auxiliary import *
from cosivina.Element import Element, elementSpec
lateralInteractions1DSpec = [
('size', sizeTupleType),
('sigmaExc', floatType),
('amplitudeExc', floatType),
('sigmaInh', floatType),
('amplitudeInh', floatType),
('amplitudeGlobal', floatType),
('circular', boolType),
('normalized', boolType),
('cutoffFactor', floatType),
('kernelRange', intArrayType),
('kernel', arrayType1D),
('output', arrayType2D),
('fullSum', arrayType2D)
]
@jitclass(elementSpec + lateralInteractions1DSpec)
class LateralInteractions1D(Element):
''' Connective element performing 1D convolution with a
difference-of-Gaussians kernel with a global component. '''
initElement = Element.__init__
def __init__(self, label, size = (1, 1), sigmaExc = 1., amplitudeExc = 0.,
sigmaInh = 1., amplitudeInh = 0., amplitudeGlobal = 0.,
circular = True, normalized = True, cutoffFactor = 5.):
'''
Args:
label (str): Element label.
size (tuple of int): Size of the input and output.
sigmaExc (float): Width parameter of the excitatory Gaussian
component of the kernel.
amplitudeExc (float): Amplitude of the excitatory component.
sigmaInh (float): Width parameter of the inhibitory Gaussian
component of the kernel.
amplitudeInh (float): Amplitude of the inhibitory component.
amplitudeGlobal (float): Amplitude of the global component.
circular (bool): Flag indicating whether convolution is
circular.
normalized (bool): Flag indicating whether Gaussian
components are normalized before scaling with amplitude.
cutoffFactor (float): Multiple of the greater sigma value
at which the kernel is truncated.
'''
self.initElement(label)
self.parameters = makeParamDict({
'size': PS_FIXED,
'sigmaExc': PS_INIT_STEP_REQUIRED,
'amplitudeExc': PS_INIT_STEP_REQUIRED,
'sigmaInh': PS_INIT_STEP_REQUIRED,
'amplitudeInh': PS_INIT_STEP_REQUIRED,
'amplitudeGlobal': PS_CHANGEABLE,
'circular': PS_INIT_STEP_REQUIRED,
'normalized': PS_INIT_STEP_REQUIRED,
'cutoffFactor': PS_INIT_STEP_REQUIRED
})
self.components = makeComponentList(['output', 'fullSum'])
self.defaultOutputComponent = 'output'
self.size = size
self.sigmaExc = sigmaExc
self.amplitudeExc = amplitudeExc
self.sigmaInh = sigmaInh
self.amplitudeInh = amplitudeInh
self.amplitudeGlobal = amplitudeGlobal
self.circular = circular
self.normalized = normalized
self.cutoffFactor = cutoffFactor
def init(self):
self.kernelRange = computeKernelRange(
max((self.amplitudeExc != 0) * self.sigmaExc,
(self.amplitudeInh != 0) * self.sigmaInh),
self.cutoffFactor, self.size[1], self.circular);
# note flipped kernel ranges (because kernel is flipped again in convolution)
self.kernel = self.amplitudeExc * \
gauss(np.arange(-self.kernelRange[1], self.kernelRange[0] + 1),
0, self.sigmaExc, self.normalized) - self.amplitudeInh * \
gauss(np.arange(-self.kernelRange[1], self.kernelRange[0] + 1),
0, self.sigmaInh, self.normalized)
self.output = np.zeros(self.size)
self.fullSum = np.zeros((1, 1))
def step(self, time, deltaT):
if self.circular:
for i in range(self.size[0]):
self.output[i][:] = circConv(self.inputs[0][i], self.kernel, self.kernelRange)
else:
for i in range(self.size[0]):
self.output[i][:] = linearConv(self.inputs[0][i], self.kernel, self.kernelRange)
self.fullSum[0][0] = np.sum(self.inputs[0])
self.output += self.amplitudeGlobal * self.fullSum
|
the-stack_106_15566
|
import pandas as pd
QUERY_SAMPLE = 'nmdc:mga04781'
QUERY_AWS_PATH = 's3://psss-metagenomics-codeathon-data/marine/nmdc:mga04781/assembly/nmdc_mga04781_contigs.fna'
metadata = pd.read_csv('data.tsv', sep='\t')
metadata = metadata.query("MGA_ID != @QUERY_SAMPLE")
metadata = metadata.set_index('MGA_ID')
open('query_paths.txt', 'w').write(QUERY_AWS_PATH + '\n')
full_paths = ['%s/assembly/%s_contigs.fna' % (aws_path, sample.replace(':', '_'))
for sample, aws_path in metadata['AWS_PATH'].iteritems()]
open('reference_paths.txt', 'w').write('%s\n' % '\n'.join(full_paths))
|
the-stack_106_15567
|
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class CodepipelineStages(CloudFormationLintRule):
"""Check if CodePipeline Stages are set up properly."""
id = 'E2540'
shortdesc = 'CodePipeline Stages'
description = 'See if CodePipeline stages are set correctly'
source_url = 'https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#pipeline-requirements'
tags = ['properties', 'codepipeline']
def check_stage_count(self, stages, path):
"""Check that there is minimum 2 stages."""
matches = []
if len(stages) < 2:
message = 'A pipeline must contain at least two stages.'
matches.append(RuleMatch(path, message))
return matches
def check_first_stage(self, stages, path):
"""Validate the first stage of a pipeline has source actions."""
matches = []
if len(stages) < 1: # pylint: disable=C1801
self.logger.debug('Stages was empty. Should have been caught by generic linting.')
return matches
# pylint: disable=R1718
first_stage = set([a.get('ActionTypeId').get('Category') for a in stages[0]['Actions']])
if first_stage and 'Source' not in first_stage:
message = 'The first stage of a pipeline must contain at least one source action.'
matches.append(RuleMatch(path + [0], message))
if len(first_stage) != 1:
message = 'The first stage of a pipeline must contain only source actions.'
matches.append(RuleMatch(path + [0], message))
return matches
def check_source_actions(self, stages, path):
"""Validate the all of the stages."""
matches = []
categories = set()
if len(stages) < 1: # pylint: disable=C1801
self.logger.debug('Stages was empty. Should have been caught by generic linting.')
return matches
for sidx, stage in enumerate(stages):
for aidx, action in enumerate(stage.get('Actions', [])):
action_type_id = action.get('ActionTypeId')
categories.add(action_type_id.get('Category'))
if sidx > 0 and action_type_id.get('Category') == 'Source':
message = 'Only the first stage of a pipeline may contain source actions.'
matches.append(RuleMatch(path + [sidx, 'Actions', aidx], message))
if not (categories - set(['Source'])):
message = 'At least one stage in pipeline must contain an action that is not a source action.'
matches.append(RuleMatch(path, message))
return matches
def check_names_unique(self, value, path):
"""Check that stage names are unique."""
matches = []
stage_names = set()
for sidx, stage in enumerate(value):
stage_name = stage.get('Name')
if isinstance(stage_name, six.string_types):
if stage_name in stage_names:
message = 'All stage names within a pipeline must be unique. ({name})'.format(
name=stage_name,
)
matches.append(RuleMatch(path + [sidx, 'Name'], message))
stage_names.add(stage_name)
else:
self.logger.debug('Found non string for stage name: %s', stage_name)
return matches
def match(self, cfn):
"""Check CodePipeline stages"""
matches = []
resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])
for resource in resources:
path = resource['Path']
properties = resource['Value']
stages = properties.get('Stages')
if not isinstance(stages, list):
self.logger.debug('Stages not list. Should have been caught by generic linting.')
return matches
try:
matches.extend(
self.check_stage_count(stages, path + ['Stages'])
)
matches.extend(
self.check_first_stage(stages, path + ['Stages'])
)
matches.extend(
self.check_source_actions(stages, path + ['Stages'])
)
matches.extend(
self.check_names_unique(stages, path + ['Stages'])
)
except AttributeError as err:
self.logger.debug('Got AttributeError. Should have been caught by generic linting. '
'Ignoring the error here: %s', str(err))
return matches
|
the-stack_106_15572
|
import logging
from cached_http_fetcher.meta import get_valid_meta, put_meta
from cached_http_fetcher.model import Meta
from cached_http_fetcher.storage import MemoryStorage
def test_get_valid_meta(logger: logging.Logger) -> None:
now = 1617355068
future = now + 3600
past = now - 3600
url = "http://example.com/image1.jpg"
cached_url = "http://cdn.example.com/example.com/image1.jpg"
meta_storage = MemoryStorage()
meta_storage_dict = meta_storage.dict_for_debug()
# get empty
meta = get_valid_meta(url, now, meta_storage, logger=logger)
assert meta is None
# get non-expired meta
meta = Meta(
cached_url=cached_url,
etag=None,
last_modified=None,
content_sha1=None,
fetched_at=now,
expired_at=future,
)
put_meta(url, meta, meta_storage)
assert get_valid_meta(url, now, meta_storage, logger=logger) == meta
assert len(meta_storage_dict) == 1 # this entry will be deleted on the next call
# get expired meta
meta = Meta(
cached_url=cached_url,
etag=None,
last_modified=None,
content_sha1=None,
fetched_at=now,
expired_at=past,
)
put_meta(url, meta, meta_storage)
assert get_valid_meta(url, now, meta_storage, logger=logger) is None
assert len(meta_storage_dict) == 1 # not deleted
|
the-stack_106_15573
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig = plt.figure()
ax = fig.add_subplot(111)
plt.subplots_adjust(left=0.25, bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
a0 = 5
f0 = 3
s = a0*np.sin(2*np.pi*f0*t)
l, = plt.plot(t,s, lw=2, color='red')
plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
sfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
sfreq.on_changed(update)
samp.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sfreq.reset()
samp.reset()
button.on_clicked(reset)
rax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
plt.show()
|
the-stack_106_15579
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exception Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.sheet
# Libre Office Version: 7.3
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('Message', 'Context')
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.sheet.NoConvergenceException'
ex = uno.getClass(type_name)
ex.__ooo_ns__ = 'com.sun.star.sheet'
ex.__ooo_full_ns__= type_name
ex.__ooo_type_name__ = 'exception'
orig_init = ex.__init__
ex.__init__ = init
return ex
NoConvergenceException = _get_class()
else:
from ...lo.sheet.no_convergence_exception import NoConvergenceException as NoConvergenceException
__all__ = ['NoConvergenceException']
|
the-stack_106_15584
|
import numpy as np
import os
import sys
import ntpath
import time
from . import util, html
from subprocess import Popen, PIPE
if sys.version_info[0] == 2:
VisdomExceptionBase = Exception
else:
VisdomExceptionBase = ConnectionError
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.port = opt.display_port
self.saved = False
if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>
import visdom
self.ncols = opt.display_ncols
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
if not self.vis.check_connection():
self.create_visdom_connections()
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
"""Reset the self.saved status"""
self.saved = False
def create_visdom_connections(self):
"""If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
print('Command: %s' % cmd)
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
def display_current_results(self, visuals, epoch, save_result, pic_num):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if self.display_id > 0: # show images in the browser using visdom
ncols = self.ncols
if ncols > 0: # show all the images in one visdom panel
ncols = min(ncols, len(visuals))
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h) # create a table css
# create a table of images.
title = self.name
label_html = ''
label_html_row = ''
images = []
idx = 0
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
try:
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
except VisdomExceptionBase:
self.create_visdom_connections()
else: # show each image in a separate visdom panel;
idx = 1
try:
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
except VisdomExceptionBase:
self.create_visdom_connections()
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s_%i.png' % (epoch, label, pic_num))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_losses(self, epoch, counter_ratio, losses):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
try:
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
except VisdomExceptionBase:
self.create_visdom_connections()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
the-stack_106_15586
|
import argparse
import math
import numpy as np
import csv
import logging
# Setup logging
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def _run(datafile, iterations, alpha, scaling):
# Read CSV file into matrix and split into features and values
headers, rows = _readcsv(datafile)
headers.insert(0, 'intercept') # add the y-intercept as a feature header itself
matrix = np.matrix(rows)
features = matrix[:, :-1]
values = matrix[:, -1]
features = np.insert(features, 0, 1, axis=1) # left-pad the features with 1's
# Scale the features for better performance
if scaling:
logging.info('Scaling features for better performance')
scales = scalefeatures(features)
output = ', '.join(['%s = %s' % (key, value) for (key, value) in _mergeheaders(headers, scales).items()])
logging.info('Scaled features with the following scales: \n' + output)
# Run gradient descent
history = gradientdescent(features, values, iterations, alpha)
# Get the best parameters from the history
params = history[-1:, :-1]
# Print the parameters for the features
output = ', '.join(['%s = %s' % (key, value) for (key, value) in _mergeheaders(headers, params).items()])
logging.info('Found the following parameters that best separates the data:\n' + output)
# Test parameters and print accuracy
accuracy = testparameters(params, features, values)
logging.info('Parameters accuracy: %s%%' % round(accuracy, 2))
def _readcsv(file):
"""Read a CSV file into a multidimensional array of rows and columns."""
rows = []
with open(file, newline='') as csvfile:
reader = csv .reader(csvfile, delimiter=',', quotechar='"')
headers = next(reader, None) # headers
for row in reader:
rows.append([float(x) for x in row])
return headers, rows
def gradientdescent(features, values, iterations, alpha):
"""Performs gradient descent and returns the parameters associated with their cost for each iteration."""
m = features.shape[0] # number of training examples
n = features.shape[1] # number of features
history = np.zeros((iterations, n+1))
params = np.zeros((n, 1))
for itr in range(iterations):
# Perform vectorized gradient descent
gradient = (1 / m) * features.T * (sigmoid(features * params) - values)
params = params - alpha * gradient
# Store the parameters and their associated cost in the history matrix
history[itr, :-1] = params.T
history[itr, -1] = cost(features, values, params)
return history
def cost(features, values, parameters):
"""Computes the cost of applying the parameters to the features."""
m = features.shape[0] # number of training examples
h = sigmoid(features * parameters)
return (1 / m) * (-values.T * np.log10(h) - (1 - values).T * np.log10(1 - h))
def sigmoid(z):
"""Computes the sigmoid of z, which can be a matrix, vector or scalar."""
return np.divide(1, np.add(1, np.power(math.e, -z)))
def _mergeheaders(headers, params):
"""Merges the headers from the CSV file with the found parameters into a dictionary."""
result = {}
for i, header in enumerate(headers[:-1]):
result[header] = params.item(i)
return result
def testparameters(parameters, features, values):
"""Computes the accuracy of the given parameters when applied to the data itself."""
m = features.shape[0] # number of training examples
hits = 0
for row in range(m):
test = int(np.asscalar(sigmoid(features[row] * parameters.T)) >= 0.5)
hits += 1 if test == values[row] else 0
return (hits / m) * 100
def scalefeatures(features):
"""Scales the features of the matrix such that they are in the range [-1;1]."""
colindex = -1
n = features.shape[1] # number of features
scales = np.ones((n, 1))
for column in features.T:
colindex += 1
stddev = np.max(column) - np.min(column)
if stddev == 0: # ignore features that don't change in value
continue
avg = np.full((features.shape[0], 1), np.average(column))
stddev = np.full((features.shape[0], 1), stddev)
features[:, colindex] = (column.T - avg) / stddev
scales[colindex] = 1 / stddev.item(colindex)
return scales
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data', type=str, help='the CSV file containing the data')
parser.add_argument('-a', '--alpha', type=float, default=0.01, help='the learning rate for gradient descent')
parser.add_argument('-i', '--iterations', type=int, default=1500,
help='the number of iterations for gradient descent')
parser.add_argument('-ns', '--noscaling', action='store_true', default=False, help='turn off feature scaling')
args = parser.parse_args()
_run(args.data, args.iterations, args.alpha, not args.noscaling)
|
the-stack_106_15587
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from nova.api.openstack.compute import availability_zone as az_v21
from nova.api.openstack.compute import extension_info
from nova.api.openstack.compute.legacy_v2.contrib import availability_zone \
as az_v2
from nova.api.openstack.compute.legacy_v2 import servers as servers_v2
from nova.api.openstack.compute import servers as servers_v21
from nova.api.openstack import extensions
from nova import availability_zones
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import servicegroup
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_service
from oslo_config import cfg
FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, disabled=None):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
return dict(test_service.fake_service,
binary=binary,
availability_zone=availability_zone,
available_zones=availability_zone,
created_at=created_at,
updated_at=updated_at,
host=host,
disabled=disabled)
if disabled:
return [__fake_service("nova-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
return [__fake_service("nova-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
def fake_service_is_up(self, service):
return service['binary'] != u"nova-network"
def fake_set_availability_zones(context, services):
return services
def fake_get_availability_zones(context):
return ['nova'], []
CONF = cfg.CONF
class AvailabilityZoneApiTestV21(test.NoDBTestCase):
availability_zone = az_v21
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
self.stubs.Set(db, 'service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
self.controller = self.availability_zone.AvailabilityZoneController()
self.req = fakes.HTTPRequest.blank('')
def test_filtered_availability_zones(self):
zones = ['zone1', 'internal']
expected = [{'zoneName': 'zone1',
'zoneState': {'available': True},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones, True)
self.assertEqual(result, expected)
expected = [{'zoneName': 'zone1',
'zoneState': {'available': False},
"hosts": None}]
result = self.controller._get_filtered_availability_zones(zones,
False)
self.assertEqual(result, expected)
def test_availability_zone_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('availabilityZoneInfo', resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
timestamp = iso8601.parse_date("2012-12-26T14:45:25Z")
nova_network_timestamp = iso8601.parse_date("2012-12-26T14:45:24Z")
expected = [{'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-compute': {'active': True, 'available': True,
'updated_at': timestamp}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-sched': {'active': True, 'available': True,
'updated_at': timestamp}},
'fake_host-2': {
'nova-network': {
'active': True,
'available': False,
'updated_at': nova_network_timestamp}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
self.assertEqual(expected, zones)
def test_availability_zone_detail_no_services(self):
expected_response = {'availabilityZoneInfo':
[{'zoneState': {'available': True},
'hosts': {},
'zoneName': 'nova'}]}
self.stubs.Set(availability_zones, 'get_availability_zones',
fake_get_availability_zones)
resp_dict = self.controller.detail(self.req)
self.assertThat(resp_dict,
matchers.DictMatches(expected_response))
class AvailabilityZoneApiTestV2(AvailabilityZoneApiTestV21):
availability_zone = az_v2
def setUp(self):
super(AvailabilityZoneApiTestV2, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.non_admin_req = fakes.HTTPRequest.blank('')
def test_availability_zone_detail_with_non_admin(self):
self.assertRaises(exception.AdminRequired,
self.controller.detail, self.non_admin_req)
class ServersControllerCreateTestV21(test.TestCase):
base_url = '/v2/fake/'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestV21, self).setUp()
self.instance_cache_num = 0
self._set_up_controller()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'availability_zone': 'nova',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
return instance
fake.stub_out_image_service(self.stubs)
self.stubs.Set(db, 'instance_create', instance_create)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
'os-availability-zone',
'osapi_v21')
self.no_availability_zone_controller = servers_v21.ServersController(
extension_info=ext_info)
def _verify_no_availability_zone(self, **kwargs):
self.assertNotIn('availability_zone', kwargs)
def _test_create_extra(self, params, controller):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
server.update(params)
body = dict(server=server)
server = controller.create(self.req, body=body).obj['server']
def test_create_instance_with_availability_zone_disabled(self):
params = {'availability_zone': 'foo'}
old_create = compute_api.API.create
def create(*args, **kwargs):
self._verify_no_availability_zone(**kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params, self.no_availability_zone_controller)
def _create_instance_with_availability_zone(self, zone_name):
def create(*args, **kwargs):
self.assertIn('availability_zone', kwargs)
self.assertEqual('nova', kwargs['availability_zone'])
return old_create(*args, **kwargs)
old_create = compute_api.API.create
self.stubs.Set(compute_api.API, 'create', create)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
'availability_zone': zone_name,
},
}
admin_context = context.get_admin_context()
db.service_create(admin_context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(admin_context,
{'name': 'agg1'}, {'availability_zone': 'nova'})
db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
return self.req, body
def test_create_instance_with_availability_zone(self):
zone_name = 'nova'
req, body = self._create_instance_with_availability_zone(zone_name)
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
zone_name = 'a' * 256
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_too_short(self):
zone_name = ''
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_with_invalid_availability_zone_not_str(self):
zone_name = 111
req, body = self._create_instance_with_availability_zone(zone_name)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_instance_without_availability_zone(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
body = {
'server': {
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
res = self.controller.create(self.req, body=body).obj
server = res['server']
self.assertEqual(fakes.FAKE_UUID, server['id'])
class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-availability-zone': 'fake'}
self.controller = servers_v2.Controller(ext_mgr)
ext_mgr_no_az = extensions.ExtensionManager()
ext_mgr_no_az.extensions = {}
self.no_availability_zone_controller = servers_v2.Controller(
ext_mgr_no_az)
def _verify_no_availability_zone(self, **kwargs):
self.assertIsNone(kwargs['availability_zone'])
def test_create_instance_with_invalid_availability_zone_too_long(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_too_short(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
def test_create_instance_with_invalid_availability_zone_not_str(self):
# NOTE: v2.0 API does not check this bad request case.
# So we skip this test for v2.0 API.
pass
|
the-stack_106_15589
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Policy class for mixed precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util.tf_export import keras_export
# Default value of certain arguments, indicating the default behavior for
# that argument should be used.
USE_DEFAULT = 'USE_DEFAULT'
@keras_export('keras.mixed_precision.experimental.Policy')
class Policy(object):
"""A dtype policy for a Keras layer.
A dtype policy determines dtype-related aspects of a layer, such as its
computation and variable dtypes. Each layer has a policy. Policies can be
passed to the `dtype` argument of layer constructors, or a global policy can
be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will
default to the global policy if no policy is passed to it's constructor.
For many models, each layer's policy will have the same compute dtype and
variable dtype, which will typically be float32. In this case, we refer to the
singular dtype as the layer's dtype, which can be queried by the property
`tf.keras.layers.Layer.dtype`.
When mixed precision training is used, most layers will instead have a float16
or bfloat16 compute dtype and a float32 variable dtype, and so the layer does
not have a single dtype. See [this
link](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html)
for more information on mixed precision training. When the variable dtype does
not match the compute dtype, variables will be automatically casted to the
compute dtype to avoid type errors. In this case,
`tf.keras.layers.Layer.dtype` refers to the variable dtype, not the compute
dtype.
Certain policies also have a `tf.mixed_precision.experimental.LossScale`
instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss
scaling is a technique used with mixed precision to avoid numerical underflow
in float16 gradients. Loss scaling is only done by Models in `Model.fit`,
`Model.train_on_batch`, and similar methods. Layers which are not Models
ignore the loss scale.
Policies are constructed by passing a string to the constructor, e.g.
`tf.keras.mixed_precision.experimental.Policy('float32')`. The string
determines the compute and variable dtypes. It can be one of the following:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype. No loss scaling is done by default.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. These policies are used for
mixed precision training. With 'mixed_float16', a dynamic loss scale is
used by default. 'mixed_bfloat16' does no loss scaling by default, as loss
scaling is unnecessary with bfloat16.
### How to use mixed precision in a Keras model
To use mixed precision in a Keras model, the `'mixed_float16'` or
`'mixed_bfloat16'` policy can be used.
`tf.keras.mixed_precision.experimental.set_policy` can be used to set the
default policy for layers if no policy is passed to them. For example:
```python
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
model = tf.keras.models.Sequential([
tf.keras.layers.Input((100,)),
# Dense layers use global policy of 'mixed_float16', which does
# computations in float16 while keeping variables in float32.
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(10),
# Softmax should be done in float32 for numeric stability. We pass
# dtype='float32' to use float32 instead of the global policy.
tf.keras.layers.Activation('softmax', dtype='float32')
])
model.compile(...)
model.fit(...) # Train `model`
```
Alternatively, the policy can be passed to individual layers instead of
setting the global policy with `set_policy`:
```python
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')
model = tf.keras.models.Sequential([
tf.keras.layers.Input((100,)),
tf.keras.layers.Dense(10, dtype=policy),
tf.keras.layers.Dense(10, dtype=policy),
# Softmax should be done in float32 for numeric stability.
tf.keras.layers.Activation('softmax', dtype='float32')
])
model.compile(...)
model.fit(...) # Train `model`
```
Note the `'mixed_float16'` policy will apply loss scaling by default in
`Model.fit`, `Model.train_on_batch`, and other training methods. If no such
method is used (e.g., a custom training loop is used) and `'mixed_float16'` is
used, the loss scale must be manually applied. See
`tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For
`'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be
manually applied.
### How to use float64 in a Keras model
Using float64 is similar to mixed precision. Either the global policy can be
set to float64, or `dtype='float64'` can be passed to individual layers. For
example, to set the global policy:
```python
tf.keras.mixed_precision.experimental.set_policy('float64')
model = tf.keras.models.Sequential([
tf.keras.layers.Input((100,)),
# All layers use global policy of 'float64', which does computations and
# creates variables in float64.
tf.keras.layers.Dense(10),
tf.keras.layers.Dense(10),
tf.keras.layers.Activation('softmax')
])
model.compile(...)
model.fit(...) # Train `model`
```
### How a layer uses its policy's compute dtype
A layer will cast its inputs to its compute dtype in TensorFlow 2. For
example:
```python
x = tf.ones((4, 4, 4, 4), dtype='float64')
# `layer`'s policy defaults to float32.
layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
# `layer` casts it's inputs to its compute dtype, which is float32, and does
# computations in float32.
y = layer(x)
print(y.dtype) # float32
```
Currently, only tensors in the first argument to the layer's `call` method are
casted. For example:
```python
class MyLayer(tf.keras.layers.Layer):
# Bug! `b` will not be casted.
def call(self, a, b):
return a + 1., b + 1.
a = tf.constant(1., dtype="float32")
b = tf.constant(1., dtype="float32")
layer = MyLayer(dtype="float64")
x, y = layer(a, b)
print(x.dtype) # float64
print(y.dtype) # float32. Not casted since `b` was not passed to first input
```
It is recommended to accept tensors only in the first argument. This way, all
tensors are casted to the layer's compute dtype. `MyLayer` should therefore be
written as:
```python
class MyLayer(tf.keras.layers.Layer):
# Now, all tensor inputs will be casted.
def call(self, inputs):
a, b = inputs
return a + 1., b + 1.
a = tf.constant(1., dtype="float32")
b = tf.constant(1., dtype="float32")
layer = MyLayer(dtype="float64")
x, y = layer((a, b))
print(x.dtype) # float64
print(y.dtype) # float64.
```
Other arguments are not automatically casted for technical reasons, but this
may change in a future minor release.
A layer subclass can prevent its inputs from being autocasted by passing
`autocast=False` to the layer constructor. For example:
```python
class NonAutoCastingLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
kwargs['autocast'] = False
super(NonAutoCastingLayer, self).__init__(**kwargs)
def call(self, inp):
return inp
x = tf.ones((4, 4, 4, 4), dtype='float32')
layer = NonAutoCastingLayer(dtype='float64')
y = layer(x) # MyLayer will not cast inputs to it's compute dtype of float32
print(y.dtype) # float32
```
### The deprecated "infer" policy
In addition to a dtype or "<dtype>_with_float32_vars", a policy can also be
"infer". This Policy is deprecated, and it is not recommended. When a layer
has an infer policy, it will infer the computation and variable dtype from
the first input the first time the layer is called.
Once the layer is called for the first time, the layer's policy will change to
the dtype of the first input.
Similarly to "infer", there is a deprecated "infer_with_float32_vars" policy
that infers the compute dtype, but not the variable dtype. Once a layer with
an "infer_with_float32_vars" policy is called for the first time, the layer's
policy will change to "<dtype>_with_float32_vars", where <dtype> is the dtype
of the first input. These policies force variables in float32.
Warning: Policies ending in "_with_float32_vars" will be removed in TensorFlow
2.1. Please use "mixed_float16" or "mixed_bfloat16" instead.
In TensorFlow 1, only the "infer" and "infer_with_float32_vars" policies are
available.
"""
# TODO(reedwm): Replace link in above docstring with a version that is more
# TensorFlow-specific, and that also mentions bfloat16.
# If True, warn when a policy is created whose name ends in
# "_with_float32_vars". We always want to warn when a user creates such a
# policy, but when the TensorFlow creates a policy, it suppresses the warning
# by setting this to False when creating the policy.
_warn_about_float32_vars = True
def __init__(self, name, loss_scale=USE_DEFAULT):
"""Constructs the policy.
The `name` argument determines the compute and variable dtype, and has no
additional effect on the Policy. The compute and variable dtypes can only be
specified through `name`, and cannot be specified directly.
Args:
name: A string. Can be one of the following values:
* Any dtype name, such as 'float32' or 'float64'. Both the variable and
compute dtypes will be that dtype.
* 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or
bfloat16, while the variable dtype is float32. With 'mixed_float16',
a dynamic loss scale is used. These policies are used for mixed
precision training.
* 'infer' (deprecated): Infer the compute and variable dtype from the
input dtype.
loss_scale: A `tf.mixed_precision.experimental.LossScale`, or a value
convertible to one such as "dynamic". Defaults to using no loss scaling
unless `name` is "mixed_float16", in which case this defaults to
"dynamic". Only `tf.keras.Model`s, not layers, use the loss scale, and
it is only used during `Model.fit`, `Model.train_on_batch`, and other
similar methods.
"""
if isinstance(name, dtypes.DType):
raise TypeError("'name' must be a string, not a DType. "
"Instead, pass DType.name. Got: %s" % (name.name,))
elif not isinstance(name, six.string_types):
raise TypeError("'name' must be a string, but got: %s" % (name,))
if name == 'infer_float32_vars':
# For backwards compatibility. TODO(reedwm): Remove this.
name = 'infer_with_float32_vars'
if name == 'float32_with_float32_vars':
# Doesn't affect correctness, but causes "float32" instead of
# "float32_with_float32_vars" to be printed in __repr__.
name = 'float32'
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
if name.endswith('_with_float32_vars') and self._warn_about_float32_vars:
warning = ("WARNING: The '%s' policy is deprecated and will be removed "
"in TensorFlow 2.1." % name)
if name == 'infer_with_float32_vars':
warning += (" Please use the 'mixed_float16' or 'mixed_bfloat16' "
"policy instead.")
elif name == 'float16_with_float32_vars':
warning += " Please use the 'mixed_float16' policy instead."
elif name == 'bfloat16_with_float32_vars':
warning += " Please use the 'mixed_bfloat16' policy instead."
tf_logging.warn(warning)
if loss_scale == USE_DEFAULT:
loss_scale = 'dynamic' if name == 'mixed_float16' else None
if loss_scale and self._compute_dtype not in (None, 'float16'):
tf_logging.warn('Creating a Policy with a loss scale is only useful for '
'float16 policies. You passed loss_scale=%r for policy '
'%s. Consider not passing any loss_scale instead.' %
(loss_scale, name))
self._loss_scale = loss_scale_module.get(loss_scale)
def _parse_name(self, name):
"""Parses a Policy name into a compute and variable dtype.
Args:
name: The name of the policy:
Returns:
The (compute_dtype, variable_dtype) pair.
"""
if name == 'mixed_float16':
return 'float16', 'float32'
elif name == 'mixed_bfloat16':
return 'bfloat16', 'float32'
if name.endswith('_with_float32_vars'):
base_name = name[:-len('_with_float32_vars')]
float32_vars = True
else:
base_name = name
float32_vars = False
if base_name == 'infer':
base_dtype = None
else:
try:
base_dtype = dtypes.as_dtype(base_name).name
except TypeError:
error = ("Cannot convert value %s to a mixed precision Policy. "
"Valid policies include include 'mixed_float16', "
"'mixed_bfloat16', and the name of any dtype such as "
"'float32'." % (name,))
if float32_vars:
error += (' The value %s ends with _with_float32_vars, but %s cannot '
'be converted to a DType' % (name, base_name))
# six.raise_from supresses the original TypeError from being raised
six.raise_from(ValueError(error), None)
if float32_vars:
return base_dtype, 'float32'
else:
return base_dtype, base_dtype
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicit chooses a different dtype. If this is different than
`Policy.compute_dtype` and both are non-None, Layers will cast variables to
the compute dtype to avoid type errors.
Returns:
The variable dtype of this policy, or None if the variable dtype should be
inferred from the inputs.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in.
Note that even if the compute dtype is float16 or bfloat16, hardware devices
may not do individual adds, multiplies, and other fundamental operations in
[b]float16, but instead may do some of them in float32 for numeric
stability. The compute dtype is the dtype of the inputs and outputs of the
TensorFlow ops that the layer executes. Internally, many TensorFlow ops will
do certain internal calculations in float32, or some other device-internal
intermediate format with higher precision than [b]float16, to increase
numeric stability.
For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
float16 compute dtype, will pass float16 inputs to tf.matmul. But, tf.matmul
will do use float32 intermediate math. The performance benefit of float16 is
still apparent, due to increased memory bandwidth and the fact modern GPUs
have specialized hardware for computing matmuls on float16 while still
keeping intermediate computations in float32.
Returns:
The variable dtype of this policy, or None if the variable dtype should be
inferred from the inputs.
"""
return self._compute_dtype
@property
def should_cast_variables(self):
"""Returns True if variables should be casted.
This is true if the variable dtype is not the same as the compute dtype.
Returns:
True, if variables should be casted.
"""
return self.variable_dtype != self.compute_dtype
@property
def loss_scale(self):
"""Returns the loss scale of this Policy.
Returns:
A `tf.mixed_precision.experimental.LossScale`, or None.
"""
return self._loss_scale
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return '<Policy "%s", loss_scale=%s>' % (self._name, self.loss_scale)
def with_input_dtype(policy, dtype):
"""Copies "infer" `policy`, adding `dtype` to it.
Policy must be "infer" or "infer_float32_vars" (i.e., has no compute dtype).
Returns a new policy with compute dtype `dtype`. The returned policy's
variable dtype is also `dtype` if `policy` is "infer", and is `float32` if
`policy` is "infer_with_float32_vars".
Args:
policy: An "infer" or "infer_float32_vars" policy
dtype: The dtype of an input to a layer.
Returns:
A new policy copied from `policy`, but with compute dtype and maybe
variable_dtype set to `dtype`.
"""
assert not policy.compute_dtype
dtype = dtypes.as_dtype(dtype).name
if policy.variable_dtype is None:
return Policy(dtype)
else:
# Policies without a compute dtype are either "infer" or
# "infer_with_float32_vars", so the variable_dtype must be float32 here.
assert policy.variable_dtype == 'float32'
try:
Policy._warn_about_float32_vars = False # pylint: disable=protected-access
return Policy(dtype + '_with_float32_vars')
finally:
Policy._warn_about_float32_vars = True # pylint: disable=protected-access
# The current global policy in effect. If None, it means the current value of
# floatx should be used as the policy if the V2 dtype behavior is enabled,
# or "infer" otherwise.
# TODO(reedwm): Make this thread local?
_global_policy = None
@keras_export('keras.mixed_precision.experimental.global_policy')
def global_policy():
"""Returns the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no policy has been set with
`keras.mixed_precision.experimental.set_policy`, this will return a policy
constructed from `tf.keras.backend.floatx()` in TensorFlow 2 (floatx defaults
to float32), or an "infer" policy in TensorFlow 1.
See `keras.mixed_precision.experimental.Policy` for more information.
Returns:
The global Policy.
"""
if _global_policy is None:
if base_layer_utils.v2_dtype_behavior_enabled():
return Policy(backend.floatx())
else:
return Policy('infer')
return _global_policy
def policy_defaults_to_floatx():
"""Returns True if `global_policy()` will use the current value of floatx."""
return _global_policy is None and base_layer_utils.v2_dtype_behavior_enabled()
def _check_if_mixed_precision_graph_rewrite_is_enabled():
# TODO(reedwm): Update this comment once the Keras API is complete.
if mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled:
raise ValueError(
'The mixed precision policy cannot be set, because the mixed '
'precision graph rewrite has already been enabled.\n'
'At most, one of the following functions can be called:\n\n'
' 1. tf.train.experimental.enable_mixed_precision_graph_rewrite() '
'(You called this first)\n'
' 2. tf.keras.mixed_precision.experimental.set_policy() (You called '
'this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. The first function enables mixed '
'precision in the graph with a graph rewrite. However it is currently '
'not very customizable, and does not support eager. The second '
'function is for Keras layers, but is not yet fully complete.')
@keras_export('keras.mixed_precision.experimental.set_policy')
def set_policy(policy):
"""Sets the global Policy.
The global policy is the default policy used for layers, if no policy is
passed to the layer constructor. If no global policy is set, layers will
instead default to a Policy constructed from `tf.keras.backend.floatx()` in
TensorFlow 2. In TensorFlow 1, layers default to an "infer" policy.
See `keras.mixed_precision.experimental.Policy` for more information.
Args:
policy: A Policy, or a string that will be converted to a Policy..
"""
global _global_policy
_check_if_mixed_precision_graph_rewrite_is_enabled()
if policy is not None and not isinstance(policy, Policy):
policy = Policy(policy)
if (policy and not base_layer_utils.v2_dtype_behavior_enabled() and
policy.compute_dtype):
raise ValueError(
'The global policy can only be set to a non-infer policy in TensorFlow '
'2')
_global_policy = policy
mixed_precision_global_state.using_default_mixed_precision_policy = (
_global_policy is None)
# TODO(reedwm): Make this thread local
@contextlib.contextmanager
def policy_scope(policy):
"""A context manager that sets the global Policy under it.
Args:
policy: A Policy, or a string that will be converted to a Policy..
Yields:
Nothing.
"""
old_policy = _global_policy
try:
set_policy(policy)
yield
finally:
set_policy(old_policy)
|
the-stack_106_15592
|
import random
import torch
#import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
#from tensorboardX import SummaryWriter
from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
from plotting_utils import plot_gate_outputs_to_numpy, plot_scatter, plot_tsne
class Tacotron2Logger(SummaryWriter):
def __init__(self, logdir, use_vae):
super(Tacotron2Logger, self).__init__(logdir, use_vae)
self.use_vae = use_vae
#self.dataformat = 'CHW' # default argument for SummaryWriter.add_image
self.dataformat = 'HWC' # NVIDIA
def log_training(self, reduced_loss, grad_norm, learning_rate, duration,
padding_rate_txt, max_len_txt, padding_rate_mel, max_len_mel,
iteration, recon_loss='', kl_div='', kl_weight=''):
self.add_scalar("training.loss", reduced_loss, iteration)
self.add_scalar("grad.norm", grad_norm, iteration)
self.add_scalar("learning.rate", learning_rate, iteration)
self.add_scalar("duration", duration, iteration)
self.add_scalar("padding.rate.txt", padding_rate_txt, iteration)
self.add_scalar("max.len.txt", max_len_txt, iteration)
self.add_scalar("padding.rate.mel", padding_rate_mel, iteration)
self.add_scalar("max.len.mel", max_len_mel, iteration)
if self.use_vae:
self.add_scalar("kl_div", kl_div, iteration)
self.add_scalar("kl_weight", kl_weight, iteration)
self.add_scalar("weighted_kl_loss", kl_weight*kl_div, iteration)
self.add_scalar("recon_loss", recon_loss, iteration)
def log_validation(self, reduced_loss, model, y, y_pred, iteration):
self.add_scalar("validation.loss", reduced_loss, iteration)
if self.use_vae:
_, mel_outputs, gate_outputs, alignments, mus, _, _, emotions = y_pred
else:
_, mel_outputs, gate_outputs, alignments = y_pred
mel_targets, gate_targets = y
#print('emotion:\n{}'.format(emotions))
# plot distribution of parameters
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.add_histogram(tag, value.data.cpu().numpy(), iteration)
# plot alignment, mel target and predicted, gate target and predicted
idx = random.randint(0, alignments.size(0) - 1)
self.add_image(
"alignment",
plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
iteration, dataformats=self.dataformat)
self.add_image(
"mel_target",
plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
iteration, dataformats=self.dataformat)
self.add_image(
"mel_predicted",
plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
iteration, dataformats=self.dataformat)
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_targets[idx].data.cpu().numpy(),
torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
iteration, dataformats=self.dataformat)
if self.use_vae:
self.add_image(
"latent_dim (regular)",
plot_scatter(mus, emotions),
iteration, dataformats=self.dataformat)
self.add_image(
"latent_dim (t-sne)",
plot_tsne(mus, emotions),
iteration, dataformats=self.dataformat)
|
the-stack_106_15594
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install("asyncmock", "pytest-asyncio", "-c", constraints_path)
session.install("mock", "pytest", "pytest-cov", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google/cloud",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
the-stack_106_15595
|
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for tf.cond and tf.case."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from backend_test_base import Tf2OnnxBackendTestBase
from common import unittest_main, check_opset_min_version, check_tf_min_version
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
# pylint: disable=abstract-method,arguments-differ
class CondTests(Tf2OnnxBackendTestBase):
def test_simple_cond(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = x + 1
y = y + 1
res = tf.cond(x[0] < y[0], lambda: x + y, lambda: x - y, name="test_cond")
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_cond_with_const_branch(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
true_const = tf.constant(True, name="true_const", dtype=tf.bool)
def cond_graph():
return tf.constant(np.array([2, 1, 3], dtype=np.float32), name="b", dtype=tf.float32)
res = tf.cond(true_const, lambda: x + y, cond_graph, name="test_cond")
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_cond_with_multi_merge(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = x + 1
y = y + 1
res = tf.cond(x[0] < y[0], lambda: [x, x + y], lambda: [x, x - y], name="test")
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_cond_with_replicate_output(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = x + 1
y = y + 1
res = tf.cond(x[0] < y[0], lambda: [x, y], lambda: [y, x], name="test_cond")
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_nest_cond(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = x + 1
y = y + 1
def cond_graph():
def cond_graph1():
def cond_graph2():
return tf.cond(x[0] < y[0], lambda: x + y, lambda: tf.square(y))
return tf.cond(tf.reduce_any(x < y), cond_graph2, cond_graph2)
return tf.cond(x[0] > y[0], cond_graph1, cond_graph1)
res = tf.cond(x[0] < y[0], cond_graph, cond_graph, name="test_cond")
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_while_loop_in_cond(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
def true_fn():
return [x]
def false_fn():
# b = tf.constant(np.array([0], dtype=np.int32), dtype=tf.int32)
# while_loop
c = lambda y: tf.reduce_any(tf.less(y, 10))
b = lambda i: tf.add(y, 1)
return tf.while_loop(c, b, [y])
res = tf.cond(x[0] < y[0], true_fn, false_fn, name="test_cond")
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_cond_in_while_loop(self):
def func(i, inputs):
inputs_2 = tf.identity(inputs)
input_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True).unstack(inputs_2)
output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
c = lambda i, *_: tf.logical_and(tf.less(i, 10), i >= 0)
def b(i, out_ta):
new_i = tf.add(i, 1)
x = input_ta.read(i)
x = tf.cond(x > 0, lambda: x - 1, lambda: x + 3)
out_ta_new = out_ta.write(i, x)
return new_i, out_ta_new
i_final, out_final = tf.while_loop(c, b, [i, output_ta])
return tf.identity(i_final, name="i"), tf.identity(out_final.stack(), name="output_ta")
input_names_with_port = ["input_1:0", "input_2:0"]
feed_dict = {"input_1:0": np.array(0, dtype=np.int32),
"input_2:0": np.array([2.0, 16.0, 5.0, 1.6, 5.0, 6.0, 7.0, 8.0, 9.0, 10.], dtype=np.float32)}
output_names_with_port = ["i:0", "output_ta:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_simple_case(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = tf.add(x, 1, name="add_x")
y = tf.add(y, 1, name="add_y")
res = tf.case([(tf.reduce_all(x < 1, name="red1"), lambda: x + y),
(tf.reduce_all(y > 0, name="red2"), lambda: tf.square(y))],
default=lambda: x, name="test_case")
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_case_with_exclusive(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = x + 1
y = y + 1
res = tf.case([(tf.reduce_all(x < 1), lambda: x + y), (tf.reduce_all(y > 0), lambda: tf.square(y))],
default=lambda: x, name="test_case", exclusive=True)
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_case_without_default_branch(self):
def func(x, y):
x = tf.add(x, 1, name="add_x")
y = tf.add(y, 1, name="add_y")
res = tf.case([(tf.reduce_all(x < 1), lambda: x + y),
(tf.reduce_all(y > 0), lambda: tf.square(y))])
return tf.identity(res, name="output")
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_case_with_multi_merge(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = x + 1
y = y + 1
res = tf.case(
[(tf.reduce_all(x < 1), lambda: [x + y, x - y]),
(tf.reduce_all(y > 0), lambda: [tf.abs(x), tf.square(y)])],
default=lambda: [x, y], name="test_case"
)
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
def test_nest_case(self):
x_val = np.array([1, 2, 3], dtype=np.float32)
y_val = np.array([4, 5, 6], dtype=np.float32)
def func(x, y):
x = x + 1
y = y + 1
def case_graph():
return tf.case(
[(tf.reduce_all(x < 1), lambda: x + y), (tf.reduce_all(y > 0), lambda: tf.square(y))],
default=lambda: x - y,
name="test_case")
res = tf.case([(x[0] > 0, case_graph), (x[0] < 0, case_graph)], default=lambda: x - y)
return tf.identity(res, name="output")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
@check_tf_min_version("1.8", "shape inference for Reshape op screws up")
@check_opset_min_version(9, "ConstantOfShape")
def test_cond_with_different_output_shape(self):
input_shape = (10, 5, 20)
def func(inputs, shape):
# cheat onnx shape inference
inputs = tf.reshape(inputs, shape)
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
# shape is [3], depending on input shape
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
# shape is always [1]
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
return padded_t
output = pad_tensor(inputs, 20)
return tf.identity(output, name="output")
input_names_with_port = ["input:0", "shape:0"]
feed_dict = {
"input:0": np.ones(input_shape, dtype=np.float32),
"shape:0": np.array(input_shape, dtype=np.int32)
}
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
if __name__ == '__main__':
unittest_main()
|
the-stack_106_15597
|
"""TF: Tensorflow parser"""
from __future__ import absolute_import as _abs
from __future__ import print_function
import os
from tensorflow.core.framework import graph_pb2
from tvm.contrib import util
class TFParser(object):
"""
A Wrapper to handle tensorflow models parsing, TensorFlow is needed
Parameters
----------
model_dir : tensorflow frozen pb file or a directory that contains saved
model or checkpoints.
Examples
--------
.. code-block:: python
parser = TfParser(model_dir)
graph = parser.parse()
# graph is related graphdef of the model
"""
def __init__(self, model_dir):
self._tmp_dir = util.tempdir()
self._model_dir = model_dir
self._graph = graph_pb2.GraphDef()
def _set_graph(self, graph):
"""Set Graph"""
self._graph = graph
def _get_graph(self):
"""Get Graph"""
return self._graph
def _load_pb_file(self):
"""Load single pb file"""
graph = self._get_graph()
with open(self._model_dir, "rb") as f:
graph.ParseFromString(f.read())
return graph
def _get_tag_set(self):
"""Return the tag set of saved model, multiple metagraphs are not supported"""
try:
from tensorflow.contrib.saved_model.python.saved_model import reader
except ImportError:
raise ImportError(
"InputConfiguration: Unable to import saved_model.reader which is "
"required to get tag set from saved model.")
tag_sets = reader.get_saved_model_tag_sets(self._model_dir)
return tag_sets[0]
def _get_output_names(self):
"""Return the concatenated output names"""
try:
import tensorflow as tf
except ImportError:
raise ImportError(
"InputConfiguration: Unable to import tensorflow which is "
"required to restore from saved model.")
tags = self._get_tag_set()
with tf.Session() as sess:
meta_graph_def = tf.saved_model.loader.load(sess,
tags,
self._model_dir)
output_names = set()
for k in meta_graph_def.signature_def.keys():
outputs_tensor_info = meta_graph_def.signature_def[k].outputs
for output_tensor in outputs_tensor_info.values():
output_names.add(output_tensor.name)
output_names = [i.replace(":0", "") for i in output_names]
return ",".join(output_names)
def _load_saved_model(self):
"""Load the tensorflow saved model."""
try:
from tensorflow.python.tools import freeze_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import graph_util
except ImportError:
raise ImportError(
"InputConfiguration: Unable to import tensorflow which is "
"required to restore from saved model.")
saved_model_dir = self._model_dir
output_graph_filename = self._tmp_dir.relpath("tf_frozen_model.pb")
input_saved_model_dir = saved_model_dir
output_node_names = self._get_output_names()
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = True
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = ",".join(self._get_tag_set())
freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "",
input_meta_graph, input_saved_model_dir,
saved_model_tags)
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
output_graph_def = graph_util.remove_training_nodes(output_graph_def)
return output_graph_def
def _load_ckpt(self):
"""TODO: Load checkpoint model."""
raise RuntimeError("InputConfiguration: Loading tf checkpoint model is "
"not supported yet.")
def parse(self):
"""
Parse tensorflow models: checkpoints, saved models, and single frozen pb file.
Returns
-------
GraphDef of the passed model
"""
graph = None
if os.path.isdir(self._model_dir):
ckpt = os.path.join(self._model_dir, "checkpoint")
if not os.path.isfile(ckpt):
if not os.path.isdir(os.path.join(self._model_dir, "variables")):
raise RuntimeError("InputConfiguration: Invalid model path.")
graph = self._load_saved_model()
else:
graph = self._load_ckpt()
elif os.path.isfile(self._model_dir):
# Only .pb or .pbtxt is a valid suffix name.
if self._model_dir.endswith(".pb") or \
self._model_dir.endswith(".pbtxt"):
cur_dir = os.path.dirname(self._model_dir)
else:
raise RuntimeError("InputConfiguration: Invalid model format.")
# It is a saved model if `variables` directory is present at the
# same directory with the pb or pbtxt file.
if os.path.isdir(os.path.join(cur_dir, "variables")):
self._model_dir = cur_dir
graph = self._load_saved_model()
else:
graph = self._load_pb_file()
else:
raise RuntimeError("InputConfiguration: Unrecognized model "
"file or path.")
self._set_graph(graph)
return graph
|
the-stack_106_15598
|
import tensorflow as tf
from PIL import Image
import numpy as np
import os
train_path = './fashion_image_label/fashion_train_jpg_60000/'
train_txt = './fashion_image_label/fashion_train_jpg_60000.txt'
x_train_savepath = './fashion_image_label/fashion_x_train.npy'
y_train_savepath = './fashion_image_label/fahion_y_train.npy'
test_path = './fashion_image_label/fashion_test_jpg_10000/'
test_txt = './fashion_image_label/fashion_test_jpg_10000.txt'
x_test_savepath = './fashion_image_label/fashion_x_test.npy'
y_test_savepath = './fashion_image_label/fashion_y_test.npy'
def generateds(path, txt):
f = open(txt, 'r')
contents = f.readlines() # 按行读取
f.close()
x, y_ = [], []
for content in contents:
value = content.split() # 以空格分开,存入数组
img_path = path + value[0]
if not os.path.exists(img_path):
continue
img = Image.open(img_path)
img = np.array(img.convert('L'))
img = img / 255.
x.append(img)
y_.append(value[1])
print('loading : ' + content)
x = np.array(x)
y_ = np.array(y_)
y_ = y_.astype(np.int64)
return x, y_
if os.path.exists(x_train_savepath) and os.path.exists(y_train_savepath) and os.path.exists(
x_test_savepath) and os.path.exists(y_test_savepath):
print('-------------Load Datasets-----------------')
x_train_save = np.load(x_train_savepath)
y_train = np.load(y_train_savepath)
x_test_save = np.load(x_test_savepath)
y_test = np.load(y_test_savepath)
x_train = np.reshape(x_train_save, (len(x_train_save), 28, 28))
x_test = np.reshape(x_test_save, (len(x_test_save), 28, 28))
else:
print('-------------Generate Datasets-----------------')
x_train, y_train = generateds(train_path, train_txt)
x_test, y_test = generateds(test_path, test_txt)
print('-------------Save Datasets-----------------')
x_train_save = np.reshape(x_train, (len(x_train), -1))
x_test_save = np.reshape(x_test, (len(x_test), -1))
np.save(x_train_savepath, x_train_save)
np.save(y_train_savepath, y_train)
np.save(x_test_savepath, x_test_save)
np.save(y_test_savepath, y_test)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1)
model.summary()
|
the-stack_106_15599
|
import random
import pickle
import numpy as np
import torch
M = 2 ** 32 - 1
def init_fn(worker):
seed = torch.LongTensor(1).random_().item()
seed = (seed + worker) % M
np.random.seed(seed)
random.seed(seed)
def add_mask(x, mask, dim=1):
mask = mask.unsqueeze(dim)
shape = list(x.shape);
shape[dim] += 21
new_x = x.new(*shape).zero_()
new_x = new_x.scatter_(dim, mask, 1.0)
s = [slice(None)] * len(shape)
s[dim] = slice(21, None)
new_x[s] = x
return new_x
def sample(x, size):
# https://gist.github.com/yoavram/4134617
i = random.sample(range(x.shape[0]), size)
return torch.tensor(x[i], dtype=torch.int16)
# x = np.random.permutation(x)
# return torch.tensor(x[:size])
def pkload(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
_shape = (240, 240, 155)
def get_all_coords(stride):
return torch.tensor(
np.stack([v.reshape(-1) for v in
np.meshgrid(
*[stride // 2 + np.arange(0, s, stride) for s in _shape],
indexing='ij')],
-1), dtype=torch.int16)
_zero = torch.tensor([0])
def gen_feats():
x, y, z = 240, 240, 155
feats = np.stack(
np.meshgrid(
np.arange(x), np.arange(y), np.arange(z),
indexing='ij'), -1).astype('float32')
shape = np.array([x, y, z])
feats -= shape / 2.0
feats /= shape
return feats
|
the-stack_106_15600
|
# coding: utf-8
"""
job.py
~~~~~~
This module implements the main Job api for Viki
:license: Apache2, see LICENSE for more details
"""
import ast
import os
import subprocess
import json
import uuid
from typing import Any, Dict, Tuple, List, IO, Optional, Union
from vikid import fs as filesystem
class Job:
""" Job library for viki """
debug = False
def __init__(self):
""" Initialize jobs handler
Vars for use:
home: Viki's home directory. Usually /usr/local/viki
jobs_path: Path to Viki's jobs directory. Usually /usr/local/viki/jobs
job_config_filename: Name of the config for each individual job. Usually 'config.json'
"""
# TODO Move this to a central place so all classes can use it
# Change to just /home/viki eventually
self.home: str = os.path.expanduser("~") + "/.viki"
# Path to the jobs directory relative to self.home
self.jobs_path: str = self.home + "/" + "jobs"
# Path to the jobs STDOUT file
self.job_output_file: str = "output.txt"
# Name of job configuration file
self.job_config_filename: str = "config.json"
# --- Job internals
@staticmethod
def _quote_string(string: str, single_quote: bool = True) -> str:
""" Takes a string and returns it
back surrounded by quotes
"""
quote: str
if single_quote:
quote = "'"
else:
quote = '"'
return quote + string + quote
def _run_shell_command(self, command: str, output_filename: str,
job_arguments: Optional[List[str]] = None) -> Tuple[bool, int]:
""" _run_shell_command
string:command Shell command to run
string:file path Where the command results (stdout) are stored
array:arguments to be given to the command
Runs the given command and stores results in a file
Returns Tuple (True|False, Return code)
"""
output_file_obj: IO[Any]
sh_script_name: str
child_process: List[str]
# Generate output file for run results
output_file_obj = open(output_filename, 'a')
# Generate a tmp sh file to run command from
sh_script_name = 'viki-' + str(uuid.uuid4())
with open(sh_script_name, 'w') as sh_script_obj:
sh_script_obj.write(command)
sh_script_obj.close()
# Create the bash command
child_process = [u'/bin/bash', u'-xe', sh_script_name]
# If the job was passed any args, send them into the child process as well
if job_arguments is not None and len(job_arguments) > 0:
for argument in job_arguments:
child_process.append(str(argument))
# *!* DEBUG - show the list that is about to get piped into Popen
if self.debug:
print('Func: _run_shell_command; Var: child_process: ' + str(child_process))
process = subprocess.Popen(
child_process,
stdout=output_file_obj,
stderr=subprocess.STDOUT
)
while process.poll() is None:
# Not finished
pass
return_code = process.poll()
output_file_obj.close()
filesystem.dirty_rm_rf(sh_script_name)
return (True, return_code) if return_code == 0 else (False, return_code)
# --- Job functions
def get_jobs(self) -> Dict[str, Any]:
"""
List jobs in /usr/local/viki/jobs
Takes no parameters
"""
message: str = "Ok"
success: int = 1
try:
# Get all job dirs
jobs_dir_ls = next(os.walk(self.jobs_path))
jobs_list: List[str] = jobs_dir_ls[1]
except OSError as error:
message = str(error)
success = 0
ret: Dict[str, Any] = {"success": success, "message": message, "jobs": jobs_list}
return ret
def get_job_by_name(self, job_name: str) -> Dict[str, Any]:
"""
Get details of a single job by name
string:name Name of specific job
"""
message: str = "Ok"
success: int = 1
contents: str = ""
try:
if job_name is None:
raise ValueError('Missing required field: job_name')
job_dir: str = self.jobs_path + "/" + job_name
if os.path.isdir(job_dir) and os.path.exists(job_dir + "/" + self.job_config_filename):
contents = filesystem.read_job_file(job_dir + "/" + self.job_config_filename)
else:
raise OSError('Job directory not found')
except (OSError, ValueError) as error:
message = str(error)
success = 0
ret: Dict[str, Any] = {"success": success, "message": message, "name": job_name, "config_json": contents}
return ret
def output_job(self, name: str) -> Dict[str, Any]:
"""
Get the output file of a specific job and return the contents of the file
"""
message: str = "Ok"
success: int = 1
contents: str = ""
try:
if name is None:
raise ValueError('Missing required field: job_name')
job_directory: str = self.jobs_path + "/" + name
output_file: str = job_directory + "/" + self.job_output_file
if os.path.isdir(job_directory) and os.path.exists(output_file):
contents = filesystem.read_last_run_output(output_file)
else:
raise OSError('Job directory not found')
except (OSError, ValueError) as error:
message = str(error)
success = 0
return {"success": success, "message": message, "name": name, "output": contents}
def create_job(self, new_name: str, data: Dict[str, Union[str, int]]) -> Dict[str, Any]:
""" Adds a job """
message: str = "Job created successfully"
success: int = 1
try:
# Generate path and file name
job_dir: str = self.jobs_path + "/" + new_name
job_filename: str = job_dir + "/" + self.job_config_filename
# Bail if
if os.path.exists(job_dir):
raise SystemError('Job directory already exists')
else:
os.mkdir(job_dir)
if 'description' not in data.keys():
raise ValueError('Missing description')
if 'steps' not in data.keys():
raise ValueError('Missing steps')
data['runNumber'] = 0
data['lastSuccessfulRun'] = 0
data['lastFailedRun'] = 0
data['name'] = new_name
# Create job file
filesystem.write_job_file(job_filename, data)
except (ValueError, SystemError) as error:
message = str(error)
success = 0
ret = {"success": success, "message": message}
return ret
def update_job(self, name: str, data: Dict[str, Any]) -> Dict[str, Any]:
""" Update an existing job """
message: str = "Job successfully updated"
success: int = 1
try:
# Check required fields first
# description
# steps
# Find job
if not filesystem.job_exists(name):
raise ValueError('Job {} not found'.format(name))
# Prep new config
except ValueError as error:
message = str(error)
success = 0
return {"success": success, "message": message}
def run_job(self, name: str, job_args: Optional[List[str]] = None):
""" Run a specific job """
message: str = "Run successful"
success: int = 1
return_code: int = 0
# Construct job directory and file path names
job_dir: str = self.jobs_path + "/" + name
job_config_json_file: str = job_dir + "/" + "config.json"
# Generate a tmp directory to work in
# Use uuid4() because it creates a truly random uuid
# and doesnt require any arguments and uuid1 uses
# the system network addr.
tmp_cwd: str = "/tmp/viki-" + str(uuid.uuid4())
os.mkdir(tmp_cwd)
try:
# Check job directory exists
# Otherwise raise OSError
if not os.path.isdir(job_dir):
raise OSError('Job not found')
# Check config json file exists
# Otherwise raise OSError
if not os.path.isfile(job_config_json_file):
raise OSError('Job file not found')
# Read the file and load the json inside it
# Otherwise raise OSError
job_json = json.loads(filesystem.read_job_file(job_config_json_file))
if job_json is False or job_json is None:
raise OSError('Job file could not be read')
# Create filename path for output file
# todo: Move this to store the output in each individual build dir
filename: str = job_dir + "/" + "output.txt"
# Grab the json array "steps" from jobs/<jobName>/config.json
job_steps: str = job_json['steps']
# Execute them individually
# If any of these steps fail then we stop execution
for step in job_steps:
success_bool, return_code = self._run_shell_command(step, filename, job_args)
# If unsuccessful stop execution
if not success_bool:
raise SystemError('Build step failed')
except (OSError, subprocess.CalledProcessError, SystemError) as error:
message = str(error)
success = 0
except KeyError:
message = 'Job has no steps'
success = 0
# Clean up tmp workdir
filesystem.dirty_rm_rf(tmp_cwd)
return {"success": success, "message": message, "return_code": return_code}
def delete_job(self, name: str) -> Dict[str, Any]:
""" Removes a job by name
Takes a job's name and removes the directory that the job lives in
"""
message: str = "Job deleted"
success: int = 1
try:
job_dir: str = self.jobs_path + '/' + name
# Check job directory exists
# Otherwise raise OSError
if not os.path.isdir(job_dir):
raise OSError('Job not found')
# Remove the job directory
filesystem.dirty_rm_rf(job_dir)
except (OSError, ValueError) as error:
message = str(error)
success = 0
return {"success": success, "message": message}
|
the-stack_106_15603
|
"""
AMRVAC-specific fields
"""
import functools
import numpy as np
from yt import mylog
from yt.fields.field_info_container import FieldInfoContainer
from yt.fields.magnetic_field import setup_magnetic_field_aliases
from yt.units import dimensions
# We need to specify which fields we might have in our dataset. The field info
# container subclass here will define which fields it knows about. There are
# optionally methods on it that get called which can be subclassed.
direction_aliases = {
"cartesian": ("x", "y", "z"),
"polar": ("r", "theta", "z"),
"cylindrical": ("r", "z", "theta"),
"spherical": ("r", "theta", "phi"),
}
def _velocity(field, data, idir, prefix=None):
"""Velocity = linear momentum / density"""
# This is meant to be used with functools.partial to produce
# functions with only 2 arguments (field, data)
# idir : int
# the direction index (1, 2 or 3)
# prefix : str
# used to generalize to dust fields
if prefix is None:
prefix = ""
moment = data["gas", "%smoment_%d" % (prefix, idir)]
rho = data["gas", "%sdensity" % prefix]
mask1 = rho == 0
if mask1.any():
mylog.info(
"zeros found in %sdensity, patching them to compute corresponding velocity field.",
prefix,
)
mask2 = moment == 0
if not ((mask1 & mask2) == mask1).all():
raise RuntimeError
rho[mask1] = 1
return moment / rho
code_density = "code_mass / code_length**3"
code_moment = "code_mass / code_length**2 / code_time"
code_pressure = "code_mass / code_length / code_time**2"
# for now, define a finite family of dust fields (up to 100 species, should be enough)
MAXN_DUST_SPECIES = 100
known_dust_fields = [
("rhod%d" % idust, (code_density, ["dust%d_density" % idust], None))
for idust in range(1, MAXN_DUST_SPECIES + 1)
]
for idir in (1, 2, 3):
known_dust_fields += [
(
"m%dd%d" % (idir, idust),
(code_moment, ["dust%d_moment_%d" % (idust, idir)], None),
)
for idust in range(1, MAXN_DUST_SPECIES + 1)
]
class AMRVACFieldInfo(FieldInfoContainer):
# format: (native(?) field, (units, [aliases], display_name))
# note: aliases will correspond to "gas" typed fields, whereas the native ones are "amrvac" typed
known_other_fields = tuple(
list(
(
("rho", (code_density, ["density"], None)),
("m1", (code_moment, ["moment_1"], None)),
("m2", (code_moment, ["moment_2"], None)),
("m3", (code_moment, ["moment_3"], None)),
("e", (code_pressure, ["energy_density"], None)),
("b1", ("code_magnetic", ["magnetic_1"], None)),
("b2", ("code_magnetic", ["magnetic_2"], None)),
("b3", ("code_magnetic", ["magnetic_3"], None)),
)
)
+ known_dust_fields
# in python3, there is no need for this tuple+list conversion, it suffices to write
# known_other_fields = (..., *known_dust_fields)
)
known_particle_fields = ()
def _setup_velocity_fields(self, idust=None):
if idust is None:
dust_flag = dust_label = ""
else:
dust_flag = "d%d" % idust
dust_label = "dust%d_" % idust
us = self.ds.unit_system
for idir, alias in enumerate(direction_aliases[self.ds.geometry], start=1):
if not ("amrvac", "m%d%s" % (idir, dust_flag)) in self.field_list:
break
velocity_fn = functools.partial(_velocity, idir=idir, prefix=dust_label)
functools.update_wrapper(velocity_fn, _velocity)
self.add_field(
("gas", "%svelocity_%s" % (dust_label, alias)),
function=velocity_fn,
units=us["velocity"],
dimensions=dimensions.velocity,
sampling_type="cell",
)
self.alias(
("gas", "%svelocity_%d" % (dust_label, idir)),
("gas", "%svelocity_%s" % (dust_label, alias)),
units=us["velocity"],
)
self.alias(
("gas", "%smoment_%s" % (dust_label, alias)),
("gas", "%smoment_%d" % (dust_label, idir)),
units=us["density"] * us["velocity"],
)
def _setup_dust_fields(self):
idust = 1
while ("amrvac", "rhod%d" % idust) in self.field_list:
if idust > MAXN_DUST_SPECIES:
mylog.error(
"Only the first %d dust species are currently read by yt. "
"If you read this, please consider issuing a ticket. ",
MAXN_DUST_SPECIES,
)
break
self._setup_velocity_fields(idust)
idust += 1
n_dust_found = idust - 1
us = self.ds.unit_system
if n_dust_found > 0:
def _total_dust_density(field, data):
tot = np.zeros_like(data["density"])
for idust in range(1, n_dust_found + 1):
tot += data["dust%d_density" % idust]
return tot
self.add_field(
("gas", "total_dust_density"),
function=_total_dust_density,
dimensions=dimensions.density,
units=us["density"],
sampling_type="cell",
)
def dust_to_gas_ratio(field, data):
return data["total_dust_density"] / data["density"]
self.add_field(
("gas", "dust_to_gas_ratio"),
function=dust_to_gas_ratio,
dimensions=dimensions.dimensionless,
sampling_type="cell",
)
def setup_fluid_fields(self):
setup_magnetic_field_aliases(self, "amrvac", ["mag%s" % ax for ax in "xyz"])
self._setup_velocity_fields() # gas velocities
self._setup_dust_fields() # dust derived fields (including velocities)
# fields with nested dependencies are defined thereafter by increasing level of complexity
us = self.ds.unit_system
def _kinetic_energy_density(field, data):
# devnote : have a look at issue 1301
return 0.5 * data["gas", "density"] * data["gas", "velocity_magnitude"] ** 2
self.add_field(
("gas", "kinetic_energy_density"),
function=_kinetic_energy_density,
units=us["density"] * us["velocity"] ** 2,
dimensions=dimensions.density * dimensions.velocity ** 2,
sampling_type="cell",
)
# magnetic energy density
if ("amrvac", "b1") in self.field_list:
def _magnetic_energy_density(field, data):
emag = 0.5 * data["gas", "magnetic_1"] ** 2
for idim in "23":
if not ("amrvac", "b%s" % idim) in self.field_list:
break
emag += 0.5 * data["gas", "magnetic_%s" % idim] ** 2
# important note: in AMRVAC the magnetic field is defined in units where mu0 = 1,
# such that
# Emag = 0.5*B**2 instead of Emag = 0.5*B**2 / mu0
# To correctly transform the dimensionality from gauss**2 -> rho*v**2, we have to
# take mu0 into account. If we divide here, units when adding the field should be
# us["density"]*us["velocity"]**2. If not, they should be us["magnetic_field"]**2
# and division should happen elsewhere.
emag /= 4 * np.pi
# divided by mu0 = 4pi in cgs, yt handles 'mks' and 'code' unit systems internally.
return emag
self.add_field(
("gas", "magnetic_energy_density"),
function=_magnetic_energy_density,
units=us["density"] * us["velocity"] ** 2,
dimensions=dimensions.density * dimensions.velocity ** 2,
sampling_type="cell",
)
# Adding the thermal pressure field.
# In AMRVAC we have multiple physics possibilities:
# - if HD/MHD + energy equation, pressure is (gamma-1)*(e - ekin (- emag)) for (M)HD
# - if HD/MHD but solve_internal_e is true in parfile, pressure is (gamma-1)*e for both
# - if (m)hd_energy is false in parfile (isothermal), pressure is c_adiab * rho**gamma
def _full_thermal_pressure_HD(field, data):
# important note : energy density and pressure are actually expressed in the same unit
pthermal = (data.ds.gamma - 1) * (
data["gas", "energy_density"] - data["gas", "kinetic_energy_density"]
)
return pthermal
def _full_thermal_pressure_MHD(field, data):
pthermal = (
_full_thermal_pressure_HD(field, data)
- (data.ds.gamma - 1) * data["gas", "magnetic_energy_density"]
)
return pthermal
def _polytropic_thermal_pressure(field, data):
return (data.ds.gamma - 1) * data["gas", "energy_density"]
def _adiabatic_thermal_pressure(field, data):
return data.ds._c_adiab * data["gas", "density"] ** data.ds.gamma
pressure_recipe = None
if ("amrvac", "e") in self.field_list:
if self.ds._e_is_internal:
pressure_recipe = _polytropic_thermal_pressure
mylog.info("Using polytropic EoS for thermal pressure.")
elif ("amrvac", "b1") in self.field_list:
pressure_recipe = _full_thermal_pressure_MHD
mylog.info("Using full MHD energy for thermal pressure.")
else:
pressure_recipe = _full_thermal_pressure_HD
mylog.info("Using full HD energy for thermal pressure.")
elif self.ds._c_adiab is not None:
pressure_recipe = _adiabatic_thermal_pressure
mylog.info("Using adiabatic EoS for thermal pressure (isothermal).")
mylog.warning(
"If you used usr_set_pthermal you should redefine the thermal_pressure field."
)
if pressure_recipe is not None:
self.add_field(
("gas", "thermal_pressure"),
function=pressure_recipe,
units=us["density"] * us["velocity"] ** 2,
dimensions=dimensions.density * dimensions.velocity ** 2,
sampling_type="cell",
)
# sound speed and temperature depend on thermal pressure
def _sound_speed(field, data):
return np.sqrt(
data.ds.gamma
* data["gas", "thermal_pressure"]
/ data["gas", "density"]
)
self.add_field(
("gas", "sound_speed"),
function=_sound_speed,
units=us["velocity"],
dimensions=dimensions.velocity,
sampling_type="cell",
)
else:
mylog.warning(
"e not found and no parfile passed, can not set thermal_pressure."
)
|
the-stack_106_15604
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.gettext
~~~~~~~~~~~~~~~~~~~~~~~
The MessageCatalogBuilder class.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
from codecs import open
from collections import defaultdict, OrderedDict
from datetime import datetime, tzinfo, timedelta
from os import path, walk, getenv
from time import time
from uuid import uuid4
from six import StringIO
from sphinx.builders import Builder
from sphinx.domains.python import pairindextypes
from sphinx.errors import ThemeError
from sphinx.locale import __
from sphinx.util import split_index_msg, logging, status_iterator
from sphinx.util.console import bold # type: ignore
from sphinx.util.i18n import find_catalog
from sphinx.util.nodes import extract_messages, traverse_translatable_index
from sphinx.util.osutil import relpath, ensuredir, canon_path
from sphinx.util.tags import Tags
if False:
# For type annotation
from typing import Any, DefaultDict, Dict, Iterable, List, Set, Tuple, Union # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.i18n import CatalogInfo # NOQA
from sphinx.util.typing import unicode # NOQA
logger = logging.getLogger(__name__)
POHEADER = r"""
# SOME DESCRIPTIVE TITLE.
# Copyright (C) %(copyright)s
# This file is distributed under the same license as the %(project)s package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: %(project)s %(version)s\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: %(ctime)s\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"""[1:]
class Catalog:
"""Catalog of translatable messages."""
def __init__(self):
# type: () -> None
self.messages = [] # type: List[unicode]
# retain insertion order, a la OrderedDict
self.metadata = OrderedDict() # type: Dict[unicode, List[Tuple[unicode, int, unicode]]] # NOQA
# msgid -> file, line, uid
def add(self, msg, origin):
# type: (unicode, Union[nodes.Element, MsgOrigin]) -> None
if not hasattr(origin, 'uid'):
# Nodes that are replicated like todo don't have a uid,
# however i18n is also unnecessary.
return
if msg not in self.metadata: # faster lookup in hash
self.messages.append(msg)
self.metadata[msg] = []
self.metadata[msg].append((origin.source, origin.line, origin.uid))
class MsgOrigin:
"""
Origin holder for Catalog message origin.
"""
def __init__(self, source, line):
# type: (unicode, int) -> None
self.source = source
self.line = line
self.uid = uuid4().hex
class I18nTags(Tags):
"""Dummy tags module for I18nBuilder.
To translate all text inside of only nodes, this class
always returns True value even if no tags are defined.
"""
def eval_condition(self, condition):
# type: (Any) -> bool
return True
class I18nBuilder(Builder):
"""
General i18n builder.
"""
name = 'i18n'
versioning_method = 'text'
versioning_compare = None # type: bool
# be set by `gettext_uuid`
use_message_catalog = False
def init(self):
# type: () -> None
super(I18nBuilder, self).init()
self.env.set_versioning_method(self.versioning_method,
self.env.config.gettext_uuid)
self.tags = I18nTags()
self.catalogs = defaultdict(Catalog) # type: DefaultDict[unicode, Catalog]
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
return ''
def get_outdated_docs(self):
# type: () -> Set[unicode]
return self.env.found_docs
def prepare_writing(self, docnames):
# type: (Set[unicode]) -> None
return
def compile_catalogs(self, catalogs, message):
# type: (Set[CatalogInfo], unicode) -> None
return
def write_doc(self, docname, doctree):
# type: (unicode, nodes.document) -> None
catalog = self.catalogs[find_catalog(docname, self.config.gettext_compact)]
for node, msg in extract_messages(doctree):
catalog.add(msg, node)
if 'index' in self.env.config.gettext_additional_targets:
# Extract translatable messages from index entries.
for node, entries in traverse_translatable_index(doctree):
for typ, msg, tid, main, key_ in entries:
for m in split_index_msg(typ, msg):
if typ == 'pair' and m in pairindextypes.values():
# avoid built-in translated message was incorporated
# in 'sphinx.util.nodes.process_index_entry'
continue
catalog.add(m, node)
# determine tzoffset once to remain unaffected by DST change during build
timestamp = time()
tzdelta = datetime.fromtimestamp(timestamp) - \
datetime.utcfromtimestamp(timestamp)
# set timestamp from SOURCE_DATE_EPOCH if set
# see https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
timestamp = float(source_date_epoch)
tzdelta = timedelta(0)
class LocalTimeZone(tzinfo):
def __init__(self, *args, **kw):
# type: (Any, Any) -> None
super(LocalTimeZone, self).__init__(*args, **kw) # type: ignore
self.tzdelta = tzdelta
def utcoffset(self, dt):
# type: (datetime) -> timedelta
return self.tzdelta
def dst(self, dt):
# type: (datetime) -> timedelta
return timedelta(0)
ltz = LocalTimeZone()
def should_write(filepath, new_content):
# type: (unicode, unicode) -> bool
if not path.exists(filepath):
return True
try:
with open(filepath, 'r', encoding='utf-8') as oldpot: # type: ignore
old_content = oldpot.read()
old_header_index = old_content.index('"POT-Creation-Date:')
new_header_index = new_content.index('"POT-Creation-Date:')
old_body_index = old_content.index('"PO-Revision-Date:')
new_body_index = new_content.index('"PO-Revision-Date:')
return ((old_content[:old_header_index] != new_content[:new_header_index]) or
(new_content[new_body_index:] != old_content[old_body_index:]))
except ValueError:
pass
return True
class MessageCatalogBuilder(I18nBuilder):
"""
Builds gettext-style message catalogs (.pot files).
"""
name = 'gettext'
epilog = __('The message catalogs are in %(outdir)s.')
def init(self):
# type: () -> None
super(MessageCatalogBuilder, self).init()
self.create_template_bridge()
self.templates.init(self)
def _collect_templates(self):
# type: () -> Set[unicode]
template_files = set()
for template_path in self.config.templates_path:
tmpl_abs_path = path.join(self.app.srcdir, template_path)
for dirpath, dirs, files in walk(tmpl_abs_path):
for fn in files:
if fn.endswith('.html'):
filename = canon_path(path.join(dirpath, fn))
template_files.add(filename)
return template_files
def _extract_from_template(self):
# type: () -> None
files = list(self._collect_templates())
files.sort()
logger.info(bold(__('building [%s]: ') % self.name), nonl=1)
logger.info(__('targets for %d template files'), len(files))
extract_translations = self.templates.environment.extract_translations
for template in status_iterator(files, __('reading templates... '), "purple", # type: ignore # NOQA
len(files), self.app.verbosity):
try:
with open(template, 'r', encoding='utf-8') as f: # type: ignore
context = f.read()
for line, meth, msg in extract_translations(context):
origin = MsgOrigin(template, line)
self.catalogs['sphinx'].add(msg, origin)
except Exception as exc:
raise ThemeError('%s: %r' % (template, exc))
def build(self, docnames, summary=None, method='update'):
# type: (Iterable[unicode], unicode, unicode) -> None
self._extract_from_template()
super(MessageCatalogBuilder, self).build(docnames, summary, method)
def finish(self):
# type: () -> None
super(MessageCatalogBuilder, self).finish()
data = {
'version': self.config.version,
'copyright': self.config.copyright,
'project': self.config.project,
'ctime': datetime.fromtimestamp(
timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
}
for textdomain, catalog in status_iterator(self.catalogs.items(), # type: ignore
__("writing message catalogs... "),
"darkgreen", len(self.catalogs),
self.app.verbosity,
lambda textdomain__: textdomain__[0]):
# noop if config.gettext_compact is set
ensuredir(path.join(self.outdir, path.dirname(textdomain)))
pofn = path.join(self.outdir, textdomain + '.pot')
output = StringIO()
output.write(POHEADER % data) # type: ignore
for message in catalog.messages:
positions = catalog.metadata[message]
if self.config.gettext_location:
# generate "#: file1:line1\n#: file2:line2 ..."
output.write("#: %s\n" % "\n#: ".join( # type: ignore
"%s:%s" % (canon_path(relpath(source, self.outdir)), line)
for source, line, _ in positions))
if self.config.gettext_uuid:
# generate "# uuid1\n# uuid2\n ..."
output.write("# %s\n" % "\n# ".join( # type: ignore
uid for _, _, uid in positions))
# message contains *one* line of text ready for translation
message = message.replace('\\', r'\\'). \
replace('"', r'\"'). \
replace('\n', '\\n"\n"')
output.write('msgid "%s"\nmsgstr ""\n\n' % message) # type: ignore
content = output.getvalue()
if should_write(pofn, content):
with open(pofn, 'w', encoding='utf-8') as pofile: # type: ignore
pofile.write(content)
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(MessageCatalogBuilder)
app.add_config_value('gettext_compact', True, 'gettext')
app.add_config_value('gettext_location', True, 'gettext')
app.add_config_value('gettext_uuid', False, 'gettext')
app.add_config_value('gettext_auto_build', True, 'env')
app.add_config_value('gettext_additional_targets', [], 'env')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_106_15605
|
import http.server
import socketserver
from urllib.parse import urlparse
import json
from searchPath import searchPath
PORT = 80
# arguments are two ints
# returns a str
def solve(id1, id2):
paths = searchPath(id1,id2)
return json.dumps(paths)
class Handler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
parsed_url = urlparse(self.path)
if parsed_url.path == '/semifinal':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
query = dict(kv_pair.split('=') for kv_pair in parsed_url.query.split('&'))
id1 = int(query['id1'])
id2 = int(query['id2'])
result = solve(id1, id2)
self.wfile.write(result.encode('utf-8'))
httpd = socketserver.TCPServer(('', PORT), Handler)
print('serving at port', PORT)
httpd.serve_forever()
|
the-stack_106_15606
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import os
import shutil
import textwrap
import unittest
from datetime import datetime
from tempfile import NamedTemporaryFile, mkdtemp
from mock import patch, ANY
from airflow import models
from airflow.configuration import conf
from airflow.utils.dag_processing import SimpleTaskInstance
from airflow.models import DagModel, DagBag, TaskInstance as TI
from airflow.utils.db import create_session
from airflow.utils.state import State
from airflow.utils.timezone import utc
from tests.models import TEST_DAGS_FOLDER, DEFAULT_DATE
from tests.test_utils.config import conf_vars
import airflow.example_dags
class DagBagTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.empty_dir = mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.empty_dir)
def test_get_existing_dag(self):
"""
Test that we're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_dont_load_example(self):
"""
test that the example are not loaded
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual(dagbag.size(), 0)
def test_safe_mode_heuristic_match(self):
"""With safe mode enabled, a file matching the discovery heuristics
should be discovered.
"""
with NamedTemporaryFile(dir=self.empty_dir, suffix=".py") as fp:
fp.write("# airflow".encode())
fp.write("# DAG".encode())
fp.flush()
with conf_vars({('core', 'dags_folder'): self.empty_dir}):
dagbag = models.DagBag(include_examples=False, safe_mode=True)
self.assertEqual(len(dagbag.dagbag_stats), 1)
self.assertEqual(
dagbag.dagbag_stats[0].file,
"/{}".format(os.path.basename(fp.name)))
def test_safe_mode_heuristic_mismatch(self):
"""With safe mode enabled, a file not matching the discovery heuristics
should not be discovered.
"""
with NamedTemporaryFile(dir=self.empty_dir, suffix=".py"):
with conf_vars({('core', 'dags_folder'): self.empty_dir}):
dagbag = models.DagBag(include_examples=False, safe_mode=True)
self.assertEqual(len(dagbag.dagbag_stats), 0)
def test_safe_mode_disabled(self):
"""With safe mode disabled, an empty python file should be discovered.
"""
with NamedTemporaryFile(dir=self.empty_dir, suffix=".py") as fp:
with conf_vars({('core', 'dags_folder'): self.empty_dir}):
dagbag = models.DagBag(include_examples=False, safe_mode=False)
self.assertEqual(len(dagbag.dagbag_stats), 1)
self.assertEqual(
dagbag.dagbag_stats[0].file,
"/{}".format(os.path.basename(fp.name)))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip_skip_log(self):
"""
test the loading of a DAG from within a zip file that skips another file because
it doesn't have "airflow" and "DAG"
"""
from mock import Mock
with patch('airflow.models.DagBag.log') as log_mock:
log_mock.info = Mock()
test_zip_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag = models.DagBag(dag_folder=test_zip_path, include_examples=False)
self.assertTrue(dagbag.has_logged)
log_mock.info.assert_any_call("File %s assumed to contain no DAGs. Skipping.",
test_zip_path)
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
def test_process_file_cron_validity_check(self):
"""
test if an invalid cron expression
as schedule interval can be identified
"""
invalid_dag_files = ["test_invalid_cron.py", "test_zip_invalid_cron.zip"]
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual(len(dagbag.import_errors), 0)
for d in invalid_dag_files:
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, d))
self.assertEqual(len(dagbag.import_errors), len(invalid_dag_files))
@patch.object(DagModel, 'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
dagbag.process_file_calls
# Should not call process_file again, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs and packaged DAGs.
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=True)
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
expected = {
'example_bash_operator': 'airflow/example_dags/example_bash_operator.py',
'example_subdag_operator': 'airflow/example_dags/example_subdag_operator.py',
'example_subdag_operator.section-1': 'airflow/example_dags/subdags/subdag.py',
'test_zip_dag': 'dags/test_zip.zip/test_zip.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(dag.fileloc.endswith(path))
@patch.object(DagModel, "get_current")
def test_refresh_py_dag(self, mock_dagmodel):
"""
Test that we can refresh an ordinary .py DAG
"""
EXAMPLE_DAGS_FOLDER = airflow.example_dags.__path__[0]
dag_id = "example_bash_operator"
fileloc = os.path.realpath(
os.path.join(EXAMPLE_DAGS_FOLDER, "example_bash_operator.py")
)
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = datetime.max.replace(
tzinfo=utc
)
mock_dagmodel.return_value.fileloc = fileloc
class TestDagBag(DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if filepath == fileloc:
TestDagBag.process_file_calls += 1
return super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(dag_folder=self.empty_dir, include_examples=True)
self.assertEqual(1, dagbag.process_file_calls)
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertEqual(2, dagbag.process_file_calls)
@patch.object(DagModel, "get_current")
def test_refresh_packaged_dag(self, mock_dagmodel):
"""
Test that we can refresh a packaged DAG
"""
dag_id = "test_zip_dag"
fileloc = os.path.realpath(
os.path.join(TEST_DAGS_FOLDER, "test_zip.zip/test_zip.py")
)
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = datetime.max.replace(
tzinfo=utc
)
mock_dagmodel.return_value.fileloc = fileloc
class TestDagBag(DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if filepath in fileloc:
TestDagBag.process_file_calls += 1
return super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(dag_folder=os.path.realpath(TEST_DAGS_FOLDER), include_examples=False)
self.assertEqual(1, dagbag.process_file_calls)
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertEqual(2, dagbag.process_file_calls)
def process_dag(self, create_dag):
"""
Helper method to process a file generated from the input create_dag function.
"""
# write source to file
source = textwrap.dedent(''.join(
inspect.getsource(create_dag).splitlines(True)[1:-1]))
f = NamedTemporaryFile()
f.write(source.encode('utf8'))
f.flush()
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
found_dags = dagbag.process_file(f.name)
return dagbag, found_dags, f.name
def validate_dags(self, expected_parent_dag, actual_found_dags, actual_dagbag,
should_be_found=True):
expected_dag_ids = list(map(lambda dag: dag.dag_id, expected_parent_dag.subdags))
expected_dag_ids.append(expected_parent_dag.dag_id)
actual_found_dag_ids = list(map(lambda dag: dag.dag_id, actual_found_dags))
for dag_id in expected_dag_ids:
actual_dagbag.log.info('validating %s' % dag_id)
self.assertEqual(
dag_id in actual_found_dag_ids, should_be_found,
'dag "%s" should %shave been found after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
self.assertEqual(
dag_id in actual_dagbag.dags, should_be_found,
'dag "%s" should %sbe in dagbag.dags after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
def test_load_subdags(self):
# Define Dag to load
def standard_subdag():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubDag_0
# master.opsubdag_0:
# -> subdag_0.task
# A -> opSubDag_1
# master.opsubdag_1:
# -> subdag_1.task
with dag:
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_0.task', dag=subdag_0)
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_1.task', dag=subdag_1)
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = standard_subdag()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 2)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(standard_subdag)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
# Define Dag to load
def nested_subdags():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubdag_0
# master.opSubdag_0:
# -> opSubDag_A
# master.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# master.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# master.opSubdag_1:
# -> opSubdag_C
# master.opSubdag_1.opSubdag_C:
# -> subdag_C.task
# -> opSubDag_D
# master.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'master.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'master.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'master.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_C.task', dag=subdag_C)
return subdag_C
def subdag_D():
subdag_D = DAG(
'master.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdags()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(nested_subdags)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
def test_skip_cycle_dags(self):
"""
Don't crash when loading an invalid (contains a cycle) DAG file.
Don't load the dag into the DagBag either
"""
# Define Dag to load
def basic_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
import datetime
DAG_NAME = 'cycle_dag'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
return dag
testDag = basic_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 0)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(basic_cycle)
# #Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
# Define Dag to load
def nested_subdag_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'nested_cycle'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# cycle:
# A -> opSubdag_0
# cycle.opSubdag_0:
# -> opSubDag_A
# cycle.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# cycle.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# cycle.opSubdag_1:
# -> opSubdag_C
# cycle.opSubdag_1.opSubdag_C:
# -> subdag_C.task -> subdag_C.task >Invalid Loop<
# -> opSubDag_D
# cycle.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'nested_cycle.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'nested_cycle.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'nested_cycle.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
opSubdag_C_task = DummyOperator(
task_id='subdag_C.task', dag=subdag_C)
# introduce a loop in opSubdag_C
opSubdag_C_task.set_downstream(opSubdag_C_task)
return subdag_C
def subdag_D():
subdag_D = DAG(
'nested_cycle.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('nested_cycle.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('nested_cycle.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdag_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(nested_subdag_cycle)
# Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
def test_process_file_with_none(self):
"""
test that process_file can handle Nones
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=False)
self.assertEqual([], dagbag.process_file(None))
@patch.object(TI, 'handle_failure')
def test_kill_zombies(self, mock_ti_handle_failure):
"""
Test that kill zombies call TIs failure handler with proper context
"""
dagbag = models.DagBag(dag_folder=self.empty_dir, include_examples=True)
with create_session() as session:
session.query(TI).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
zombies = [SimpleTaskInstance(ti)]
dagbag.kill_zombies(zombies)
mock_ti_handle_failure \
.assert_called_with(ANY,
conf.getboolean('core', 'unit_test_mode'),
ANY)
def test_deactivate_unknown_dags(self):
"""
Test that dag_ids not passed into deactivate_unknown_dags
are deactivated when function is invoked
"""
dagbag = DagBag(include_examples=True)
dag_id = "test_deactivate_unknown_dags"
expected_active_dags = dagbag.dags.keys()
model_before = DagModel(dag_id=dag_id, is_active=True)
with create_session() as session:
session.merge(model_before)
models.DAG.deactivate_unknown_dags(expected_active_dags)
after_model = DagModel.get_dagmodel(dag_id)
self.assertTrue(model_before.is_active)
self.assertFalse(after_model.is_active)
# clean up
with create_session() as session:
session.query(DagModel).filter(DagModel.dag_id == 'test_deactivate_unknown_dags').delete()
|
the-stack_106_15607
|
from typing import Any, List, Union
import torch
def normalize_string(s: str) -> str:
return s.lower().replace('-', '').replace('_', '').replace(' ', '')
def resolver(classes: List[Any], query: Union[Any, str], *args, **kwargs):
if query is None or not isinstance(query, str):
return query
query = normalize_string(query)
for cls in classes:
if query == normalize_string(cls.__name__):
return cls(*args, **kwargs)
return ValueError(
f"Could not resolve '{query}' among the choices "
f"{set(normalize_string(cls.__name__) for cls in classes)}")
def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs):
acts = [
act for act in vars(torch.nn.modules.activation).values()
if isinstance(act, type) and issubclass(act, torch.nn.Module)
]
return resolver(acts, query, *args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.