gt
stringclasses
1 value
context
stringlengths
2.49k
119k
#!/usr/bin/env python import collections import warnings from affine import Affine import numpy import pandas from geoh5.kea import common as kc from geoh5.kea.common import LayerType from geoh5.kea.common import BandColourInterp from geoh5.kea.common import RatFieldTypes from geoh5.kea.common import RatDataTypes from geoh5.kea.common import ConvertRatDataType from geoh5.kea.common import NumpyRatTypes class KeaImageRead(object): """ The base class for the KEA image format. Sets up the `Read` interface. """ def __init__(self, fid): self._fid = fid self._header = None self._closed = False # image dimensions self._width = None self._height = None self._count = None # spatial info self._crs = None self._transform = None self._band_groups = None self._band_datasets = None self._mask_datasets = None # band level info self._dtype = None self._dtypes = None self._no_data = None self._chunks = None self._metadata = None self._description = None self._layer_useage = None self._layer_type = None self._rat_column_names = None self._rat_rows = None self._rat_lookup = None # do we kick it off??? # self._read_kea() def _read_kea(self): self._header = self._read_header() self._width = self._header['SIZE'][0] self._height = self._header['SIZE'][1] self._count = self._header['NUMBANDS'][0] self._crs = self._header['WKT'][0] self._transform = self._read_transform() self._band_groups = self._read_band_groups() self._band_datasets = self._read_band_datasets() self._mask_datasets = self._read_mask_datasets() self._dtype, self._dtypes = self._read_dtypes() self._no_data = self._read_no_data() self._chunks = self._read_chunks() self._metadata = self._read_metadata() self._description = self._read_description() self._layer_useage = self._read_layer_useage() self._layer_type = self._read_layer_type() self._prep_rat() def __enter__(self): return self # http://docs.quantifiedcode.com/python-anti-patterns/correctness/exit_must_accept_three_arguments.html def __exit__(self, exception_type, exception_value, traceback): self.close() def close(self): """ Closes the HDF5 file. """ self._closed = True self._fid.close() def _read_header(self): _hdr = self._fid['HEADER'] hdr = {} for key in _hdr: hdr[key] = _hdr[key][:] return hdr @property def closed(self): return self._closed @property def count(self): return self._count @property def width(self): return self._width @property def height(self): return self._height @property def crs(self): return self._crs @property def transform(self): return self._transform def _read_transform(self): transform = [self._header['TL'][0], self._header['RES'][0], self._header['ROT'][0], self._header['TL'][1], self._header['ROT'][1], self._header['RES'][1]] return Affine.from_gdal(*transform) def _read_band_groups(self): gname_fmt = 'BAND{}' band_groups = {} for band in range(1, self.count + 1): group = gname_fmt.format(band) band_groups[band] = self._fid[group] return band_groups def _read_band_datasets(self): bname_fmt = 'BAND{}/DATA' band_dsets = {} for band in range(1, self.count + 1): dset = bname_fmt.format(band) band_dsets[band] = self._fid[dset] return band_dsets def _read_mask_datasets(self): bname_fmt = 'BAND{}/MASK' mask_dsets = {} for band in range(1, self.count + 1): dset = bname_fmt.format(band) mask_dsets[band] = self._fid[dset] if dset in self._fid else None return mask_dsets @property def dtypes(self): return self._dtypes @property def dtype(self): """ The highest level datatype of each raster band. """ return self._dtype def _read_dtypes(self): dtypes = {} for band in self._band_groups: bnd_grp = self._band_groups[band] val = bnd_grp['DATATYPE'][0] dtypes[band] = kc.KeaDataType(val).name dtype = dtypes[1] # get the highest level datatype # this is used as the base datatype for reading all bands as well as # the base datatype for appending a new band. for band in dtypes: dtype = numpy.promote_types(dtype, dtypes[band]) return dtype.name, dtypes @property def no_data(self): return self._no_data def _read_no_data(self): item = 'NO_DATA_VAL' no_data = {} for band in self._band_groups: bnd_grp = self._band_groups[band] if item in bnd_grp: val = bnd_grp['NO_DATA_VAL'][0] else: val = None no_data[band] = val return no_data @property def chunks(self): return self._chunks def _read_chunks(self): chunks = {} for band in self._band_datasets: chunks[band] = self._band_datasets[band].chunks return chunks @property def metadata(self): return self._metadata def _read_metadata(self): metadata = {} md = self._fid['METADATA'] for key in md: metadata[key] = md[key][:] return metadata @property def description(self): return self._description def _read_description(self): desc = {} for band in self._band_groups: bnd_grp = self._band_groups[band] val = bnd_grp['DESCRIPTION'][0] desc[band] = val return desc @property def layer_useage(self): return self._layer_useage def _read_layer_useage(self): layer_useage = {} for band in self._band_groups: bnd_grp = self._band_groups[band] val = bnd_grp['LAYER_USAGE'][0] layer_useage[band] = BandColourInterp(val) return layer_useage @property def layer_type(self): return self._layer_type def _read_layer_type(self): layer_type = {} for band in self._band_groups: bnd_grp = self._band_groups[band] val = bnd_grp['LAYER_TYPE'][0] layer_type[band] = LayerType(val) return layer_type @property def rat_column_names(self): return self._rat_column_names @property def rat_rows(self): return self._rat_rows def _prep_rat(self): self._rat_lookup = {} self._rat_column_names = {} self._rat_rows = {} for band in self._band_groups: bnd_grp = self._band_groups[band] hdr = bnd_grp['ATT/HEADER'] data = bnd_grp['ATT/DATA'] # bool, int, float, string fields rat_info = hdr['SIZE'][:] nrows = rat_info[0] rat_fields = rat_info[1:] names = list(range(rat_fields.sum())) # read the field types rat_data = {} for i, val in enumerate(rat_fields): if val > 0: fname = RatFieldTypes(i).name dname = RatDataTypes(i).name fields = hdr[fname][:] # set column name to link to the dataset, column index, # and the final table index for key in fields: col_name = key[0] col_idx = key[1] tbl_idx = key[-1] rat_data[col_name] = (data[dname], col_idx, tbl_idx) names[tbl_idx] = col_name self._rat_lookup[band] = rat_data self._rat_column_names[band] = names self._rat_rows[band] = nrows def read_rat(self, band=1, columns=None, row_start=0, row_end=None): """ Read the raster attribute table for a given band. :param bands: An integer representing the raster band that the raster attribute table should be read from. Default is the first band, i.e. `1`. :param columns: A list of the column names to read. Default is `None` in which case all columns are read. :param row_start: An integer indicating the 1st row to start reading from. Default is 0, the first row (zero based index). :param row_end: An integer indicating the last row to read up to. Default is None, in which case all rows are read. The row_end shouldn't exceed the number of rows in the table. :return: A `pandas.DataFrame` containing the raster attribute table. """ if band not in range(1, self.count + 1): msg = "Invalid band number: {}" raise IndexError(msg.format(band)) # If retrieve for multiple bands, return a pandas panel??? rat = self._rat_lookup[band] valid_cols = self._rat_column_names[band] data = {} if columns is None: # return all columns for col in rat: dset, idx, tbl_idx = rat[col] data[col] = dset[row_start:row_end, idx] col_names = self._rat_column_names[band] else: # check for valid columns if not set(columns).issubset(valid_cols): msg = ("Invalid column name.\n" "Valid column names are: {}") raise IndexError(msg.format(valid_cols)) col_names = [] for col in columns: dset, idx, tbl_idx = rat[col] data[col] = dset[row_start:row_end, idx] col_names.append(self._rat_column_names[band][tbl_idx]) return pandas.DataFrame(data, columns=col_names) def read(self, bands=None, window=None): """ Reads the image data into a `NumPy` array. :param bands: An integer or list of integers representing the raster bands that will be read from. The length of bands must match the `count` dimension of `data`, i.e. (count, height, width). If `bands` is None, the default behaviour is to read all bands. :param window: A `tuple` containing ((ystart, ystop), (xstart, xstop)) indices for reading from a specific location within the (height, width) 2D image. :return: A 2D or 3D `NumPy` array depending on whether `bands` is a `list` or single integer. """ # default behaviour is to read all bands if bands is None: bands = range(1, self.count + 1) # do we have several bands to read if isinstance(bands, collections.Sequence): nb = len(bands) if window is None: data = numpy.zeros((nb, self.height, self.width), dtype=self.dtype) for i, band in enumerate(bands): self._band_datasets[band].read_direct(data[i]) else: ys, ye = window[0] xs, xe = window[1] ysize = ye - ys xsize = xe - xs idx = numpy.s_[ys:ye, xs:xe] data = numpy.zeros((nb, ysize, xsize), dtype=self.dtype) for i, band in enumerate(bands): self._band_datasets[band].read_direct(data[i], idx) else: if window is None: data = self._band_datasets[bands][:] else: ys, ye = window[0] xs, xe = window[1] idx = numpy.s_[ys:ye, xs:xe] data = self._band_datasets[bands][idx] return data def read_mask(self, bands=None, window=None): """ Reads the mask data into a `NumPy` array. :param bands: An integer or list of integers representing the raster bands that will be read from. The length of bands must match the `count` dimension of `data`, i.e. (count, height, width). If `bands` is None, the default behaviour is to read all bands. :param window: A `tuple` containing ((ystart, ystop), (xstart, xstop)) indices for reading from a specific location within the (height, width) 2D image. :return: A 2D or 3D `NumPy` array depending on whether `bands` is a `list` or single integer. """ # default behaviour is to read all bands if bands is None: bands = range(1, self.count + 1) # do we have several bands to read if isinstance(bands, collections.Sequence): nb = len(bands) if window is None: mask = numpy.zeros((nb, self.height, self.width), dtype='uint8') for i, band in enumerate(bands): if self._mask_datasets[band] is not None: self._mask_datasets[band].read_direct(mask[i]) else: no_data = self.no_data[band] if no_data is None: mask.fill(255) else: mask[i][:] = (self.read(band) != no_data) * 255 else: ys, ye = window[0] xs, xe = window[1] ysize = ye - ys xsize = xe - xs idx = numpy.s_[ys:ye, xs:xe] mask = numpy.zeros((nb, ysize, xsize), dtype='uint8') for i, band in enumerate(bands): if self._mask_datasets[band] is None: no_data = self.no_data[band] if no_data is None: mask.fill(255) else: bdata = self.read(band, window=window) mask[i][:] = (bdata != no_data) * 255 else: self._mask_datasets[band].read_direct(mask[i], idx) else: if window is None: if self._mask_datasets[band] is None: dims = (self.height, self.width) mask = numpy.zeros(dims, dtype='uint8') no_data = self.no_data[band] if no_data is None: mask.fill(255) else: mask[:] = (self.read(bands) != no_data) * 255 else: mask = self._mask_datasets[bands][:] else: ys, ye = window[0] xs, xe = window[1] idx = numpy.s_[ys:ye, xs:xe] if self._mask_datasets[band] is None: ysize = ye - ys xsize = xe - xs mask = numpy.zeros((ysize, xsize), dtype='uint8') if no_data is None: mask.fill(255) else: bdata = self.read(bands, window=window) mask[:] = (bdata != no_data) * 255 else: mask = self._mask_datasets[bands][idx] return mask class KeaImageReadWrite(KeaImageRead): """ A subclass of `KeaImageRead`. Sets up the `Write` interface. """ def flush(self): """ Flushes the HDF5 caches. """ self._fid.flush() def close(self): """ Closes the HDF5 file. """ self.flush() self._closed = True self._fid.close() def write_description(self, band, description, delete=True): """ Writes the description for a given raster band. :param band: An integer representing the band number for which to write the description to. :param description: A string containing the description to be written to disk. :param delete: If set to `True` (default), then the original description will be deleted before being re-created. """ # TODO write either fixed length or variable length strings if delete: del self._band_groups[band]['DESCRIPTION'] grp = self._band_groups[band] grp.create_dataset('DESCRIPTION', shape=(1,), data=description) else: dset = self._band_groups[band]['DESCRIPTION'] dset[0] = description self._description[band] = description def write_band_metadata(self, band, metadata): """ Does nothing yet. """ raise NotImplementedError def write_layer_type(self, band, layer_type=LayerType.continuous): """ Writes the layer type for a given raster band. :param band: An integer representing the band number for which to write the description to. :param layer_type: See class `LayerType`. Default is `LayerType.continuous`. """ dset = self._band_groups[band]['LAYER_TYPE'] dset[0] = layer_type.value self._layer_type[band] = layer_type def write_layer_useage(self, band, layer_useage=BandColourInterp.greyindex): """ Writes the layer useage for a given raster band. Refers to the colour index mapping to be used for displaying the raster band. :param band: An integer representing the band number for which to write the description to. :param layer_useage: See class `BandColourInterp`. Default is `BandColourInterp.greyindex`. """ dset = self._band_groups[band]['LAYER_USEAGE'] dset[0] = layer_useage.value self._layer_useage[band] = layer_useage def write(self, data, bands, window=None): """ Writes the image data to disk. :param data: A 2D or 3D `NumPy` array containing the data to be written to disk. :param bands: An integer or list of integers representing the raster bands that will be written to. The length of bands must match the `count` dimension of `data`, i.e. (count, height, width). :param window: A `tuple` containing ((ystart, ystop), (xstart, xstop)) indices for writing to a specific location within the (height, width) 2D image. """ # do we have several bands to write if isinstance(bands, collections.Sequence): if not set(bands).issubset(self._band_datasets.keys()): msg = "1 or more bands does not exist in the output file." raise TypeError(msg) if data.ndim != 3: msg = "Data has {} dimensions and should be 3." raise TypeError(msg.format(data.ndim)) nb = data.shape[0] if nb != len(bands): msg = "Number of bands, {}, doesn't match data shape, {}." raise TypeError(msg.format(len(bands), nb)) if window is None: for i, band in enumerate(bands): dset = self._band_datasets[band] dset[:] = data[i] else: ys, ye = window[0] xs, xe = window[1] idx = numpy.s_[ys:ye, xs:xe] for i, band in enumerate(bands): dset = self._band_datasets[band] dset[idx] = data[i] else: if not set([bands]).issubset(self._band_datasets.keys()): msg = "Band {} does not exist in the output file." raise TypeError(msg.format(bands)) if window is None: dset = self._band_datasets[bands] dset[:] = data else: ys, ye = window[0] xs, xe = window[1] idx = numpy.s_[ys:ye, xs:xe] dset = self._band_datasets[bands] dset[idx] = data def write_mask(self, data, bands, window=None): """ Writes the image data to disk. :param data: A 2D or 3D `NumPy` array of type `bool` to be written to disk. The data will be written to disk with the values 0 & 255 in-place of False & True. :param bands: An integer or list of integers representing the raster bands that will be written to. The length of bands must match the `count` dimension of `data`, i.e. (count, height, width). :param window: A `tuple` containing ((ystart, ystop), (xstart, xstop)) indices for writing to a specific location within the (height, width) 2D image. """ # check for correct datatype if data.dtype is not numpy.dtype('bool'): msg = "Required datatype is bool, received {}" raise TypeError(msg.format(data.dtype.name)) # available mask datasets mdsets = self._mask_datasets # do we have several bands to write if isinstance(bands, collections.Sequence): if not set(bands).issubset(self._band_datasets.keys()): msg = "1 or more bands does not exist in the output file." raise TypeError(msg) if data.ndim != 3: msg = "Data has {} dimensions and should be 3." raise TypeError(msg.format(data.ndim)) nb = data.shape[0] if nb != len(bands): msg = "Number of bands, {}, doesn't match data shape, {}." raise TypeError(msg.format(len(bands), nb)) if window is None: for i, band in enumerate(bands): mdsets[band][data[i]] = 255 else: ys, ye = window[0] xs, xe = window[1] idx = numpy.s_[ys:ye, xs:xe] for i, band in enumerate(bands): mdsets[band][idx][data[i]] = 255 else: band = bands if not set([band]).issubset(self._band_datasets.keys()): msg = "Band {} does not exist in the output file." raise TypeError(msg.format(band)) if window is None: mdsets[band][data] = 255 else: ys, ye = window[0] xs, xe = window[1] idx = numpy.s_[ys:ye, xs:xe] mdsets[band][idx][data] = 255 def create_mask_dataset(self, band, compression=1, shuffle=False): """ Create a mask dataset for a given band. The mask dataset will inherit the same chunksize as the raster band that the mask refers to. The datatype will be `uint8`. :param band: An integer representing the raster band number that the mask refers to. :param compression: An integer in the range (0, 9), with 0 being low compression and 9 being high compression using the `gzip` filter. Default is 1. Will be set to `None` when `parallel` is set to True. The fast compression `lzf` can be used by setting `compression='lzf'`. Only used when `mode=w'. :param shuffle: If set to True, then the shuffle filter will be applied prior to compression. Higher compression ratio's can be achieved by applying the shuffle filter. Default is False. """ if not set([band]).issubset(self._band_datasets.keys()): msg = "Band {} does not exist in the output file." raise TypeError(msg.format(band)) # available mask datasets mdsets = self._mask_datasets if mdsets[band] is not None: msg = "Mask dataset for band: {} already exists!" warnings.warn(msg.format(band)) return # create bgroup = self._band_groups[band] chunks = self._chunks[band] dims = (self.height, self.width) kwargs = {'shape': dims, 'dtype': 'uint8', 'chunks': chunks, 'compression': compression, 'shuffle': shuffle} bgroup.create_dataset('MASK', **kwargs) # flush the cache and re-initialise self.flush() self._read_kea() def add_image_band(self, band_name=None, description=None, dtype='uint8', chunks=(256, 256), blocksize=256, compression=1, shuffle=False, no_data=None, link=None): """ Adds a new image band to the KEA file. :param band_name: If `None` (default), then band name will be `Band {count+1}` where `count` is the current nuber of image bands. :param description: A string containing the image band description. If `None` (default) then the description will be an empty string. :param dtype: A valid `NumPy` style datatype string. Defaults to 'uint8'. :param chunks: A `tuple` containing the desired chunksize for each 2D chunk within a given raster band. Defaults to (256, 256). :param blocksize: An integer representing the desired blocksize. Defaults to 256. :param compression: An integer in the range (0, 9), with 0 being low compression and 9 being high compression using the `gzip` filter. Default is 1. Will be set to `None` when `parallel` is set to True. The fast compression `lzf` can be used by setting `compression='lzf'`. Only used when `mode=w'. :param shuffle: If set to True, then the shuffle filter will be applied prior to compression. Higher compression ratio's can be achieved by applying the shuffle filter. Default is False. :param no_data: An integer or floating point value representing the no data or fillvalue of the image datasets. :param link: If set to a integer representing an existing band number, then a HDF5 hard link will be created pointing to an existing band number, rather than physically create a new band dataset. Useful if you have multiple raster attribute tables derived from the same segmented image, but the stats are from different points in time. So rather store the same image multiple times, you can store it once and simply point the other 'bands' to the real raster band, which will save lots of disk space. """ band_num = self.count + 1 if description is None: description = '' if band_name is None: band_name = 'Band {}'.format(band_num) dims = (self.height, self.width) kea_dtype = kc.KeaDataType[dtype].value gname = 'BAND{}'.format(band_num) if link is not None: if not set([link]).issubset(self._band_datasets.keys()): msg = ("Band {} does not exist in the output file. " "Can't create a link to a band that doensn't exist.") raise TypeError(msg.format(link)) grp = self._fid.create_group(gname) grp.create_group('METADATA') grp.create_group('OVERVIEWS') # do we create a hard link to an existing band if link is None: dset = grp.create_dataset('DATA', shape=dims, dtype=dtype, compression=compression, shuffle=shuffle, chunks=chunks, fillvalue=no_data) # CLASS 'IMAGE', is a HDF recognised attribute dset.attrs['CLASS'] = 'IMAGE' dset.attrs['IMAGE_VERSION'] = kc.IMAGE_VERSION # image blocksize dset.attrs['BLOCK_SIZE'] = blocksize else: # no need to write attributes as they already exist in the # band that we'll link to grp['DATA'] = self._band_datasets[link] dset = grp['DATA'] dtype = self.dtypes[link] kea_dtype = kc.KeaDataType[dtype].value # KEA has defined their own numerical datatype mapping self._fid[gname].create_dataset('DATATYPE', shape=(1,), data=kea_dtype, dtype='uint16') grp.create_dataset('DESCRIPTION', shape=(1,), data=description) # we'll use a default, but allow the user to overide later grp.create_dataset('LAYER_TYPE', shape=(1,), data=0) grp.create_dataset('LAYER_USAGE', shape=(1,), data=0) # create the attribute table groups grp.create_group('ATT/DATA') # TODO need an example in order to flesh the neighbours section grp.create_group('ATT/NEIGHBOURS') grp.create_dataset('ATT/HEADER/CHUNKSIZE', data=[0], dtype='uint64') # size is rows then bool, int, float, string columns grp.create_dataset('ATT/HEADER/SIZE', data=[0, 0, 0, 0, 0], dtype='uint64') # do we have no a data value if no_data is not None: grp.create_dataset('NO_DATA_VAL', shape=(1,), data=no_data) dname_fmt = 'Band_{}'.format(band_num) md = self._fid['METADATA'] md.create_dataset(dname_fmt, shape=(1,), data=band_name) hdr = self._fid['HEADER'] hdr['NUMBANDS'][0] = band_num # flush the cache and re-initialise self.flush() self._read_kea() def write_rat(self, dataframe, band, usage=None, chunksize=1000, compression=1, shuffle=False): """ Write a `pandas.DataFrame` as a raster attribute table for a given band. :param dataframe: A `pandas.DataFrame` containing the data to write to disk. The index column is currently not written to disk. :param band: An integer representing the raster band that the attribute table refers to. :param usage: A `dict` with the `DataFrame` column names as the keys, and a useage description for each column name as the values. If not all column names in usage are located in the `DataFrame` columns list then the missing columns will be inserted with a 'Generic' usage tag. If `usage` is set to `None`, then all columns contained with the `DataFrame` will be assigned a `Generic` usage tag. :param chunksize: An integer representing the chunks (number of rows) that will be used when compressing the data. Default is 1000, or total rows if the number of rows is < 1000. :param compression: An integer in the range (0, 9), with 0 being low compression and 9 being high compression using the `gzip` filter. Default is 1. Will be set to `None` when `parallel` is set to True. The fast compression `lzf` can be used by setting `compression='lzf'`. Only used when `mode=w'. :param shuffle: If set to True, then the shuffle filter will be applied prior to compression. Higher compression ratio's can be achieved by applying the shuffle filter. Default is False. """ if not set([band]).issubset(self._band_datasets.keys()): msg = "Band {} does not exist in the output file." raise TypeError(msg.format(band)) # gather descriptive info of the dataframe dtypes = dataframe.dtypes columns = dataframe.columns nrows, ncols = dataframe.shape # create default usage names if usage is None: usage = {col: 'Generic' for col in columns} else: if not all([i in columns for i in usage]): msg = "Column name(s) in usage not found in dataframe.\n{}" raise IndexError(msg.format(usage.keys())) else: usage = usage.copy() missing_cols = [col for col in columns if col not in usage] for col in missing_cols: usage[col] = 'Generic' # what datatypes are we working with datatypes = {key.value: [] for key in RatDataTypes} for col in columns: dtype = dtypes[col].name.upper() dvalue = NumpyRatTypes[dtype].value datatypes[dvalue].append(col) # retrieve the relevant groups for the given band bnd_grp = self._band_groups[band] hdr = bnd_grp['ATT/HEADER'] data = bnd_grp['ATT/DATA'] # write the chunksize if nrows < chunksize: chunksize = nrows hdr['CHUNKSIZE'][0] = chunksize # write the rat dimensions rat_size = hdr['SIZE'] rat_size[0] = nrows for dtype in datatypes: # account for the nrows value at idx:0 rat_size[dtype + 1] = len(datatypes[dtype]) # header fields (name, local index, usage, global index) vlen = ConvertRatDataType[3] hdr_dtype = numpy.dtype([("NAME", vlen), ("INDEX", numpy.uint32), ("USAGE", vlen), ("COLNUM", numpy.uint32)]) # create the datasets for dtype in datatypes: cols = datatypes[dtype] ncols_dtype = len(cols) # do we have any data for this datatype? if ncols_dtype == 0: continue # setup the dataset for a given datatype out_dtype = ConvertRatDataType[dtype] dims = (nrows, ncols_dtype) dataset_name = RatDataTypes(dtype).name dset = data.create_dataset(dataset_name, shape=dims, dtype=out_dtype, chunks=(chunksize, 1), compression=compression, shuffle=shuffle) # header fields hdr_data = numpy.zeros((ncols_dtype), dtype=hdr_dtype) hdr_data["NAME"] = cols # write the column data and the hdr fields for idx, col in enumerate(cols): dset[:, idx] = dataframe[col].values.astype(out_dtype) hdr_data["INDEX"][idx] = idx hdr_data["USAGE"][idx] = usage[col] hdr_data["COLNUM"][idx] = columns.get_loc(col) hdr.create_dataset(RatFieldTypes(dtype).name, data=hdr_data) # flush the cache and re-initialise self.flush() self._read_kea()
import rdtest import struct import renderdoc as rd def real_action_children(action): return [c for c in action.children if not c.flags & rd.ActionFlags.PopMarker] class VK_Indirect(rdtest.TestCase): demos_test_name = 'VK_Indirect' def check_overlay(self, pass_samples, *, no_overlay = False): pipe: rd.PipeState = self.controller.GetPipelineState() tex = rd.TextureDisplay() tex.overlay = rd.DebugOverlay.Drawcall tex.resourceId = pipe.GetOutputTargets()[0].resourceId self.out.SetTextureDisplay(tex) self.out.Display() overlay_id = self.out.GetDebugOverlayTexID() samples = [ (50, 40), (60, 40), (70, 40), (90, 40), (100, 40), (110, 40), (130, 40), (140, 40), (160, 40), (190, 40), (200, 40), (220, 40), (50, 190), (60, 190), (70, 190), (90, 190), (100, 190), (110, 190), (130, 190), (140, 190), (160, 190), (190, 190), (200, 190), (220, 190), (330, 40), (340, 40), (350, 40), (330, 115), (340, 115), (350, 115), (330, 190), (340, 190), (350, 190), ] # Every sample that isn't passing should be off off_alpha = 0.5 # If the overlay isn't even for a action, it will be cleared to black if no_overlay: off_alpha = 0.0 self.check(len(pass_samples) == 0) for s in [s for s in samples if s not in pass_samples]: self.check_pixel_value(overlay_id, s[0], s[1], [0.0, 0.0, 0.0, off_alpha], eps=1.0/256.0) # And the passing samples should be on for s in pass_samples: self.check_pixel_value(overlay_id, s[0], s[1], [0.8, 0.1, 0.8, 1.0], eps=1.0/256.0) def check_capture(self): fill = self.find_action("vkCmdFillBuffer") self.check(fill is not None) buffer_usage = {} for usage in self.controller.GetUsage(fill.copyDestination): usage: rd.EventUsage if usage.eventId not in buffer_usage: buffer_usage[usage.eventId] = [] buffer_usage[usage.eventId].append(usage.usage) # The texture is the backbuffer tex = self.get_last_action().copyDestination for level in ["Primary", "Secondary"]: rdtest.log.print("Checking {} indirect calls".format(level)) final = self.find_action("{}: Final".format(level)) indirect_count_root = self.find_action("{}: KHR_action_indirect_count".format(level)) self.controller.SetFrameEvent(final.eventId, False) # Check the top row, non indirect count and always present self.check_pixel_value(tex, 60, 60, [1.0, 0.0, 0.0, 1.0]) self.check_pixel_value(tex, 100, 60, [0.0, 0.0, 1.0, 1.0]) self.check_pixel_value(tex, 145, 35, [1.0, 1.0, 0.0, 1.0]) self.check_pixel_value(tex, 205, 35, [0.0, 1.0, 1.0, 1.0]) # if present, check bottom row of indirect count as well as post-count calls if indirect_count_root is not None: self.check_pixel_value(tex, 60, 220, [0.0, 1.0, 0.0, 1.0]) self.check_pixel_value(tex, 100, 220, [1.0, 0.0, 1.0, 1.0]) self.check_pixel_value(tex, 145, 185, [0.5, 1.0, 0.0, 1.0]) self.check_pixel_value(tex, 205, 185, [0.5, 0.0, 1.0, 1.0]) self.check_pixel_value(tex, 340, 40, [1.0, 0.5, 0.0, 1.0]) self.check_pixel_value(tex, 340, 115, [1.0, 0.5, 0.5, 1.0]) self.check_pixel_value(tex, 340, 190, [1.0, 0.0, 0.5, 1.0]) dispatches = self.find_action("{}: Dispatches".format(level)) # Set up a ReplayOutput and TextureSave for quickly testing the action highlight overlay self.out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(100, 100), rd.ReplayOutputType.Texture) self.check(self.out is not None) # Rewind to the start of the capture action: rd.ActionDescription = dispatches.children[0] while action.previous is not None: action = action.previous # Ensure we can select all actions while action is not None: self.controller.SetFrameEvent(action.eventId, False) action = action.next rdtest.log.success("Selected all {} actions".format(level)) self.check(dispatches and len(real_action_children(dispatches)) == 3) self.check(dispatches.children[0].dispatchDimension == (0, 0, 0)) self.check(dispatches.children[1].dispatchDimension == (1, 1, 1)) self.check(dispatches.children[2].dispatchDimension == (3, 4, 5)) rdtest.log.success("{} Indirect dispatches are the correct dimensions".format(level)) self.controller.SetFrameEvent(dispatches.children[2].eventId, False) pipe: rd.PipeState = self.controller.GetPipelineState() ssbo: rd.BoundResource = pipe.GetReadWriteResources(rd.ShaderStage.Compute)[0].resources[0] data: bytes = self.controller.GetBufferData(ssbo.resourceId, 0, 0) rdtest.log.print("Got {} bytes of uints".format(len(data))) uints = [struct.unpack_from('=4L', data, offs) for offs in range(0, len(data), 16)] for x in range(0, 6): # 3 groups of 2 threads each for y in range(0, 8): # 3 groups of 2 threads each for z in range(0, 5): # 5 groups of 1 thread each idx = 100 + z*8*6 + y*6 + x if not rdtest.value_compare(uints[idx], [x, y, z, 12345]): raise rdtest.TestFailureException( 'expected thread index data @ {},{},{}: {} is not as expected: {}' .format(x, y, z, uints[idx], [x, y, z, 12345])) rdtest.log.success("Dispatched buffer contents are as expected for {}".format(level)) empties = self.find_action("{}: Empty draws".format(level)) self.check(empties and len(real_action_children(empties)) == 2) action: rd.ActionDescription for action in real_action_children(empties): self.check(action.numIndices == 0) self.check(action.numInstances == 0) self.controller.SetFrameEvent(action.eventId, False) # Check that we have empty PostVS postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, 1) self.check(len(postvs_data) == 0) # No samples should be passing in the empties self.check_overlay([]) rdtest.log.success("{} empty actions are empty".format(level)) indirects = self.find_action("{}: Indirect draws".format(level)) self.check('vkCmdDrawIndirect' in indirects.children[0].customName) self.check('vkCmdDrawIndexedIndirect' in indirects.children[1].customName) self.check(len(real_action_children(indirects.children[1])) == 2) rdtest.log.success("Correct number of {} indirect draws".format(level)) # vkCmdDrawIndirect(...) action = indirects.children[0] self.check(action.numIndices == 3) self.check(action.numInstances == 2) self.controller.SetFrameEvent(action.eventId, False) self.check(rd.ResourceUsage.Indirect in buffer_usage[action.eventId]) # Check that we have PostVS as expected postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut) postvs_ref = { 0: {'vtx': 0, 'idx': 0, 'gl_PerVertex_var.gl_Position': [-0.8, -0.5, 0.0, 1.0]}, 1: {'vtx': 1, 'idx': 1, 'gl_PerVertex_var.gl_Position': [-0.7, -0.8, 0.0, 1.0]}, 2: {'vtx': 2, 'idx': 2, 'gl_PerVertex_var.gl_Position': [-0.6, -0.5, 0.0, 1.0]}, } self.check_mesh_data(postvs_ref, postvs_data) self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices self.check_overlay([(60, 40)]) rdtest.log.success("{} {} is as expected".format(level, action.customName)) self.check(rd.ResourceUsage.Indirect in buffer_usage[indirects.children[1].eventId]) # vkCmdDrawIndexedIndirect[0](...) action = indirects.children[1].children[0] self.check(action.numIndices == 3) self.check(action.numInstances == 3) self.controller.SetFrameEvent(action.eventId, False) # Check that we have PostVS as expected postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut) # These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input # indices postvs_ref = { 0: {'vtx': 0, 'idx': 6, 'gl_PerVertex_var.gl_Position': [-0.6, -0.5, 0.0, 1.0]}, 1: {'vtx': 1, 'idx': 7, 'gl_PerVertex_var.gl_Position': [-0.5, -0.8, 0.0, 1.0]}, 2: {'vtx': 2, 'idx': 8, 'gl_PerVertex_var.gl_Position': [-0.4, -0.5, 0.0, 1.0]}, } self.check_mesh_data(postvs_ref, postvs_data) self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices self.check_overlay([(100, 40)]) rdtest.log.success("{} {} is as expected".format(level, action.customName)) # vkCmdDrawIndexedIndirect[1](...) action = indirects.children[1].children[1] self.check(action.numIndices == 6) self.check(action.numInstances == 2) self.controller.SetFrameEvent(action.eventId, False) # Check that we have PostVS as expected postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut) postvs_ref = { 0: {'vtx': 0, 'idx': 9, 'gl_PerVertex_var.gl_Position': [-0.4, -0.5, 0.0, 1.0]}, 1: {'vtx': 1, 'idx': 10, 'gl_PerVertex_var.gl_Position': [-0.3, -0.8, 0.0, 1.0]}, 2: {'vtx': 2, 'idx': 11, 'gl_PerVertex_var.gl_Position': [-0.2, -0.8, 0.0, 1.0]}, 3: {'vtx': 3, 'idx': 12, 'gl_PerVertex_var.gl_Position': [-0.1, -0.5, 0.0, 1.0]}, 4: {'vtx': 4, 'idx': 13, 'gl_PerVertex_var.gl_Position': [ 0.0, -0.8, 0.0, 1.0]}, 5: {'vtx': 5, 'idx': 14, 'gl_PerVertex_var.gl_Position': [ 0.1, -0.8, 0.0, 1.0]}, } self.check_mesh_data(postvs_ref, postvs_data) self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices self.check_overlay([(140, 40), (200, 40)]) rdtest.log.success("{} {} is as expected".format(level, action.customName)) if indirect_count_root is not None: self.check(indirect_count_root.children[0].customName == '{}: Empty count draws'.format(level)) self.check(indirect_count_root.children[1].customName == '{}: Indirect count draws'.format(level)) empties = indirect_count_root.children[0] self.check(empties and len(real_action_children(empties)) == 3) action: rd.ActionDescription for action in real_action_children(empties.children): self.check(action.numIndices == 0) self.check(action.numInstances == 0) self.controller.SetFrameEvent(action.eventId, False) # Check that we have empty PostVS postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, 1) self.check(len(postvs_data) == 0) self.check_overlay([], no_overlay=True) # vkCmdDrawIndirectCountKHR action_indirect = indirect_count_root.children[1].children[0] self.check(rd.ResourceUsage.Indirect in buffer_usage[action_indirect.eventId]) self.check(action_indirect and len(real_action_children(action_indirect)) == 1) # vkCmdDrawIndirectCountKHR[0] action = action_indirect.children[0] self.check(action.numIndices == 3) self.check(action.numInstances == 4) self.controller.SetFrameEvent(action.eventId, False) # Check that we have PostVS as expected postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut) # These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input # indices postvs_ref = { 0: {'vtx': 0, 'idx': 0, 'gl_PerVertex_var.gl_Position': [-0.8, 0.5, 0.0, 1.0]}, 1: {'vtx': 1, 'idx': 1, 'gl_PerVertex_var.gl_Position': [-0.7, 0.2, 0.0, 1.0]}, 2: {'vtx': 2, 'idx': 2, 'gl_PerVertex_var.gl_Position': [-0.6, 0.5, 0.0, 1.0]}, } self.check_mesh_data(postvs_ref, postvs_data) self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices self.check_overlay([(60, 190)]) rdtest.log.success("{} {} is as expected".format(level, action.customName)) # vkCmdDrawIndexedIndirectCountKHR action_indirect = indirect_count_root.children[1].children[1] self.check(action_indirect and len(real_action_children(action_indirect)) == 3) # vkCmdDrawIndirectCountKHR[0] action = action_indirect.children[0] self.check(action.numIndices == 3) self.check(action.numInstances == 1) self.controller.SetFrameEvent(action.eventId, False) # Check that we have PostVS as expected postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut) # These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input # indices postvs_ref = { 0: {'vtx': 0, 'idx': 15, 'gl_PerVertex_var.gl_Position': [-0.6, 0.5, 0.0, 1.0]}, 1: {'vtx': 1, 'idx': 16, 'gl_PerVertex_var.gl_Position': [-0.5, 0.2, 0.0, 1.0]}, 2: {'vtx': 2, 'idx': 17, 'gl_PerVertex_var.gl_Position': [-0.4, 0.5, 0.0, 1.0]}, } self.check_mesh_data(postvs_ref, postvs_data) self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices self.check_overlay([(100, 190)]) rdtest.log.success("{} {} is as expected".format(level, action.customName)) # vkCmdDrawIndirectCountKHR[1] action = action_indirect.children[1] self.check(action.numIndices == 0) self.check(action.numInstances == 0) self.controller.SetFrameEvent(action.eventId, False) postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut) self.check(len(postvs_data) == 0) self.check_overlay([]) rdtest.log.success("{} {} is as expected".format(level, action.customName)) # vkCmdDrawIndirectCountKHR[2] action = action_indirect.children[2] self.check(action.numIndices == 6) self.check(action.numInstances == 2) self.controller.SetFrameEvent(action.eventId, False) # Check that we have PostVS as expected postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut) # These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input # indices postvs_ref = { 0: {'vtx': 0, 'idx': 18, 'gl_PerVertex_var.gl_Position': [-0.4, 0.5, 0.0, 1.0]}, 1: {'vtx': 1, 'idx': 19, 'gl_PerVertex_var.gl_Position': [-0.3, 0.2, 0.0, 1.0]}, 2: {'vtx': 2, 'idx': 20, 'gl_PerVertex_var.gl_Position': [-0.2, 0.2, 0.0, 1.0]}, 3: {'vtx': 3, 'idx': 21, 'gl_PerVertex_var.gl_Position': [-0.1, 0.5, 0.0, 1.0]}, 4: {'vtx': 4, 'idx': 22, 'gl_PerVertex_var.gl_Position': [ 0.0, 0.2, 0.0, 1.0]}, 5: {'vtx': 5, 'idx': 23, 'gl_PerVertex_var.gl_Position': [ 0.1, 0.2, 0.0, 1.0]}, } self.check_mesh_data(postvs_ref, postvs_data) self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices self.check_overlay([(140, 190), (200, 190)]) rdtest.log.success("{} {} is as expected".format(level, action.customName)) # Now check that the draws post-count are correctly highlighted self.controller.SetFrameEvent(self.find_action("{}: Post-count 1".format(level)).children[0].eventId, False) self.check_overlay([(340, 40)]) self.controller.SetFrameEvent(self.find_action("{}: Post-count 2".format(level)).children[0].eventId, False) self.check_overlay([(340, 190)]) self.controller.SetFrameEvent(self.find_action("{}: Post-count 3".format(level)).children[0].eventId, False) self.check_overlay([(340, 115)]) else: rdtest.log.print("KHR_action_indirect_count not tested")
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os from subprocess import Popen, PIPE import sys from tempfile import mkdtemp from textwrap import dedent from time import sleep, time from collections import defaultdict import unittest from hashlib import md5 from uuid import uuid4 from nose import SkipTest from six.moves.http_client import HTTPConnection import shutil from swiftclient import get_auth, head_account from swift.common import internal_client from swift.obj.diskfile import get_data_dir from swift.common.ring import Ring from swift.common.utils import readconf, renamer, rsync_module_interpolation from swift.common.manager import Manager from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY from test.probe import CHECK_SERVER_TIMEOUT, VALIDATE_RSYNC ENABLED_POLICIES = [p for p in POLICIES if not p.is_deprecated] POLICIES_BY_TYPE = defaultdict(list) for p in POLICIES: POLICIES_BY_TYPE[p.policy_type].append(p) def get_server_number(ipport, ipport2server): server_number = ipport2server[ipport] server, number = server_number[:-1], server_number[-1:] try: number = int(number) except ValueError: # probably the proxy return server_number, None return server, number def start_server(ipport, ipport2server): server, number = get_server_number(ipport, ipport2server) err = Manager([server]).start(number=number, wait=True) if err: raise Exception('unable to start %s' % ( server if not number else '%s%s' % (server, number))) return check_server(ipport, ipport2server) def _check_storage(ipport, path): conn = HTTPConnection(*ipport) conn.request('GET', path) resp = conn.getresponse() # 404 because it's a nonsense path (and mount_check is false) # 507 in case the test target is a VM using mount_check if resp.status not in (404, 507): raise Exception( 'Unexpected status %s' % resp.status) return resp def _check_proxy(ipport, user, key): url, token = get_auth('http://%s:%d/auth/v1.0' % ipport, user, key) account = url.split('/')[-1] head_account(url, token) return url, token, account def _retry_timeout(f, args=None, kwargs=None, timeout=CHECK_SERVER_TIMEOUT): args = args or () kwargs = kwargs or {} try_until = time() + timeout while True: try: return f(*args, **kwargs) except Exception as err: if time() > try_until: print(err) fsignature = '%s(*%r, **%r)' % (f.__name__, args, kwargs) print('Giving up on %s after %s seconds.' % ( fsignature, timeout)) raise err sleep(0.1) def check_server(ipport, ipport2server): server = ipport2server[ipport] if server[:-1] in ('account', 'container', 'object'): if int(server[-1]) > 4: return None path = '/connect/1/2' if server[:-1] == 'container': path += '/3' elif server[:-1] == 'object': path += '/3/4' rv = _retry_timeout(_check_storage, args=(ipport, path)) else: rv = _retry_timeout(_check_proxy, args=( ipport, 'test:tester', 'testing')) return rv def kill_server(ipport, ipport2server): server, number = get_server_number(ipport, ipport2server) err = Manager([server]).kill(number=number) if err: raise Exception('unable to kill %s' % (server if not number else '%s%s' % (server, number))) try_until = time() + 30 while True: try: conn = HTTPConnection(*ipport) conn.request('GET', '/') conn.getresponse() except Exception as err: break if time() > try_until: raise Exception( 'Still answering on %s:%s after 30 seconds' % ipport) sleep(0.1) def kill_nonprimary_server(primary_nodes, ipport2server): primary_ipports = [(n['ip'], n['port']) for n in primary_nodes] for ipport, server in ipport2server.items(): if ipport in primary_ipports: server_type = server[:-1] break else: raise Exception('Cannot figure out server type for %r' % primary_nodes) for ipport, server in list(ipport2server.items()): if server[:-1] == server_type and ipport not in primary_ipports: kill_server(ipport, ipport2server) return ipport def add_ring_devs_to_ipport2server(ring, server_type, ipport2server, servers_per_port=0): # We'll number the servers by order of unique occurrence of: # IP, if servers_per_port > 0 OR there > 1 IP in ring # ipport, otherwise unique_ip_count = len(set(dev['ip'] for dev in ring.devs if dev)) things_to_number = {} number = 0 for dev in filter(None, ring.devs): ip = dev['ip'] ipport = (ip, dev['port']) unique_by = ip if servers_per_port or unique_ip_count > 1 else ipport if unique_by not in things_to_number: number += 1 things_to_number[unique_by] = number ipport2server[ipport] = '%s%d' % (server_type, things_to_number[unique_by]) def store_config_paths(name, configs): for server_name in (name, '%s-replicator' % name): for server in Manager([server_name]): for i, conf in enumerate(server.conf_files(), 1): configs[server.server][i] = conf def get_ring(ring_name, required_replicas, required_devices, server=None, force_validate=None, ipport2server=None, config_paths=None): if not server: server = ring_name ring = Ring('/etc/swift', ring_name=ring_name) if ipport2server is None: ipport2server = {} # used internally, even if not passed in if config_paths is None: config_paths = defaultdict(dict) store_config_paths(server, config_paths) repl_name = '%s-replicator' % server repl_configs = {i: readconf(c, section_name=repl_name) for i, c in config_paths[repl_name].items()} servers_per_port = any(int(c.get('servers_per_port', '0')) for c in repl_configs.values()) add_ring_devs_to_ipport2server(ring, server, ipport2server, servers_per_port=servers_per_port) if not VALIDATE_RSYNC and not force_validate: return ring # easy sanity checks if ring.replica_count != required_replicas: raise SkipTest('%s has %s replicas instead of %s' % ( ring.serialized_path, ring.replica_count, required_replicas)) devs = [dev for dev in ring.devs if dev is not None] if len(devs) != required_devices: raise SkipTest('%s has %s devices instead of %s' % ( ring.serialized_path, len(devs), required_devices)) for dev in devs: # verify server is exposing mounted device ipport = (dev['ip'], dev['port']) _, server_number = get_server_number(ipport, ipport2server) conf = repl_configs[server_number] for device in os.listdir(conf['devices']): if device == dev['device']: dev_path = os.path.join(conf['devices'], device) full_path = os.path.realpath(dev_path) if not os.path.exists(full_path): raise SkipTest( 'device %s in %s was not found (%s)' % (device, conf['devices'], full_path)) break else: raise SkipTest( "unable to find ring device %s under %s's devices (%s)" % ( dev['device'], server, conf['devices'])) # verify server is exposing rsync device rsync_export = conf.get('rsync_module', '').rstrip('/') if not rsync_export: rsync_export = '{replication_ip}::%s' % server cmd = "rsync %s" % rsync_module_interpolation(rsync_export, dev) p = Popen(cmd, shell=True, stdout=PIPE) stdout, _stderr = p.communicate() if p.returncode: raise SkipTest('unable to connect to rsync ' 'export %s (%s)' % (rsync_export, cmd)) for line in stdout.splitlines(): if line.rsplit(None, 1)[-1] == dev['device']: break else: raise SkipTest("unable to find ring device %s under rsync's " "exported devices for %s (%s)" % (dev['device'], rsync_export, cmd)) return ring def get_policy(**kwargs): kwargs.setdefault('is_deprecated', False) # go through the policies and make sure they match the # requirements of kwargs for policy in POLICIES: # TODO: for EC, pop policy type here and check it first matches = True for key, value in kwargs.items(): try: if getattr(policy, key) != value: matches = False except AttributeError: matches = False if matches: return policy raise SkipTest('No policy matching %s' % kwargs) def resetswift(): p = Popen("resetswift 2>&1", shell=True, stdout=PIPE) stdout, _stderr = p.communicate() if p.returncode: raise AssertionError( 'Cleanup with "resetswift" failed: stdout: %s, stderr: %s' % (stdout, _stderr)) print(stdout) Manager(['all']).stop() class Body(object): def __init__(self, total=3.5 * 2 ** 20): self.length = total self.hasher = md5() self.read_amount = 0 self.chunk = uuid4().hex * 2 ** 10 self.buff = '' @property def etag(self): return self.hasher.hexdigest() def __len__(self): return self.length def read(self, amount): if len(self.buff) < amount: try: self.buff += next(self) except StopIteration: pass rv, self.buff = self.buff[:amount], self.buff[amount:] return rv def __iter__(self): return self def next(self): if self.buff: rv, self.buff = self.buff, '' return rv if self.read_amount >= self.length: raise StopIteration() rv = self.chunk[:int(self.length - self.read_amount)] self.read_amount += len(rv) self.hasher.update(rv) return rv def __next__(self): return next(self) class ProbeTest(unittest.TestCase): """ Don't instantiate this directly, use a child class instead. """ def setUp(self): resetswift() try: self.ipport2server = {} self.configs = defaultdict(dict) self.account_ring = get_ring( 'account', self.acct_cont_required_replicas, self.acct_cont_required_devices, ipport2server=self.ipport2server, config_paths=self.configs) self.container_ring = get_ring( 'container', self.acct_cont_required_replicas, self.acct_cont_required_devices, ipport2server=self.ipport2server, config_paths=self.configs) self.policy = get_policy(**self.policy_requirements) self.object_ring = get_ring( self.policy.ring_name, self.obj_required_replicas, self.obj_required_devices, server='object', ipport2server=self.ipport2server, config_paths=self.configs) self.servers_per_port = any( int(readconf(c, section_name='object-replicator').get( 'servers_per_port', '0')) for c in self.configs['object-replicator'].values()) Manager(['main']).start(wait=True) for ipport in self.ipport2server: check_server(ipport, self.ipport2server) proxy_ipport = ('127.0.0.1', 8080) self.ipport2server[proxy_ipport] = 'proxy' self.url, self.token, self.account = check_server( proxy_ipport, self.ipport2server) self.account_1 = { 'url': self.url, 'token': self.token, 'account': self.account} rv = _retry_timeout(_check_proxy, args=( proxy_ipport, 'test2:tester2', 'testing2')) self.account_2 = { k: v for (k, v) in zip(('url', 'token', 'account'), rv)} self.replicators = Manager( ['account-replicator', 'container-replicator', 'object-replicator']) self.updaters = Manager(['container-updater', 'object-updater']) except BaseException: try: raise finally: try: Manager(['all']).kill() except Exception: pass def tearDown(self): Manager(['all']).kill() def device_dir(self, server, node): server_type, config_number = get_server_number( (node['ip'], node['port']), self.ipport2server) repl_server = '%s-replicator' % server_type conf = readconf(self.configs[repl_server][config_number], section_name=repl_server) return os.path.join(conf['devices'], node['device']) def storage_dir(self, server, node, part=None, policy=None): policy = policy or self.policy device_path = self.device_dir(server, node) path_parts = [device_path, get_data_dir(policy)] if part is not None: path_parts.append(str(part)) return os.path.join(*path_parts) def config_number(self, node): _server_type, config_number = get_server_number( (node['ip'], node['port']), self.ipport2server) return config_number def is_local_to(self, node1, node2): """ Return True if both ring devices are "local" to each other (on the same "server". """ if self.servers_per_port: return node1['ip'] == node2['ip'] # Without a disambiguating IP, for SAIOs, we have to assume ports # uniquely identify "servers". SAIOs should be configured to *either* # have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique # ports per server (i.e. sdb1 & sdb5 would have same port numbers in # the 8-disk EC ring). return node1['port'] == node2['port'] def get_to_final_state(self): # these .stop()s are probably not strictly necessary, # but may prevent race conditions self.replicators.stop() self.updaters.stop() self.replicators.once() self.updaters.once() self.replicators.once() def kill_drive(self, device): if os.path.ismount(device): os.system('sudo umount %s' % device) else: renamer(device, device + "X") def revive_drive(self, device): disabled_name = device + "X" if os.path.isdir(disabled_name): renamer(device + "X", device) else: os.system('sudo mount %s' % device) def make_internal_client(self, object_post_as_copy=True): tempdir = mkdtemp() try: conf_path = os.path.join(tempdir, 'internal_client.conf') conf_body = """ [DEFAULT] swift_dir = /etc/swift [pipeline:main] pipeline = catch_errors cache copy proxy-server [app:proxy-server] use = egg:swift#proxy [filter:copy] use = egg:swift#copy object_post_as_copy = %s [filter:cache] use = egg:swift#memcache [filter:catch_errors] use = egg:swift#catch_errors """ % object_post_as_copy with open(conf_path, 'w') as f: f.write(dedent(conf_body)) return internal_client.InternalClient(conf_path, 'test', 1) finally: shutil.rmtree(tempdir) class ReplProbeTest(ProbeTest): acct_cont_required_replicas = 3 acct_cont_required_devices = 4 obj_required_replicas = 3 obj_required_devices = 4 policy_requirements = {'policy_type': REPL_POLICY} class ECProbeTest(ProbeTest): acct_cont_required_replicas = 3 acct_cont_required_devices = 4 obj_required_replicas = 6 obj_required_devices = 8 policy_requirements = {'policy_type': EC_POLICY} if __name__ == "__main__": for server in ('account', 'container'): try: get_ring(server, 3, 4, force_validate=True) except SkipTest as err: sys.exit('%s ERROR: %s' % (server, err)) print('%s OK' % server) for policy in POLICIES: try: get_ring(policy.ring_name, 3, 4, server='object', force_validate=True) except SkipTest as err: sys.exit('object ERROR (%s): %s' % (policy.name, err)) print('object OK (%s)' % policy.name)
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import sys import testtools from tempest.api.compute import base from tempest import clients from tempest.common.utils import data_utils from tempest import config from tempest import exceptions from tempest import test CONF = config.CONF class ServersNegativeTestJSON(base.BaseV2ComputeTest): def setUp(self): super(ServersNegativeTestJSON, self).setUp() try: self.client.wait_for_server_status(self.server_id, 'ACTIVE') except Exception: self.__class__.server_id = self.rebuild_server(self.server_id) def tearDown(self): self.server_check_teardown() super(ServersNegativeTestJSON, self).tearDown() @classmethod def resource_setup(cls): super(ServersNegativeTestJSON, cls).resource_setup() cls.client = cls.servers_client if CONF.compute.allow_tenant_isolation: cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds()) else: cls.alt_os = clients.AltManager() cls.alt_client = cls.alt_os.servers_client resp, server = cls.create_test_server(wait_until='ACTIVE') cls.server_id = server['id'] @test.attr(type=['negative', 'gate']) def test_server_name_blank(self): # Create a server with name parameter empty self.assertRaises(exceptions.BadRequest, self.create_test_server, name='') @test.attr(type=['negative', 'gate']) def test_personality_file_contents_not_encoded(self): # Use an unencoded file when creating a server with personality file_contents = 'This is a test file.' person = [{'path': '/etc/testfile.txt', 'contents': file_contents}] self.assertRaises(exceptions.BadRequest, self.create_test_server, personality=person) @test.attr(type=['negative', 'gate']) def test_create_with_invalid_image(self): # Create a server with an unknown image self.assertRaises(exceptions.BadRequest, self.create_test_server, image_id=-1) @test.attr(type=['negative', 'gate']) def test_create_with_invalid_flavor(self): # Create a server with an unknown flavor self.assertRaises(exceptions.BadRequest, self.create_test_server, flavor=-1,) @test.attr(type=['negative', 'gate']) def test_invalid_access_ip_v4_address(self): # An access IPv4 address must match a valid address pattern IPv4 = '1.1.1.1.1.1' self.assertRaises(exceptions.BadRequest, self.create_test_server, accessIPv4=IPv4) @test.attr(type=['negative', 'gate']) def test_invalid_ip_v6_address(self): # An access IPv6 address must match a valid address pattern IPv6 = 'notvalid' self.assertRaises(exceptions.BadRequest, self.create_test_server, accessIPv6=IPv6) @testtools.skipUnless(CONF.compute_feature_enabled.resize, 'Resize not available.') @test.attr(type=['negative', 'gate']) def test_resize_nonexistent_server(self): # Resize a non-existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.resize, nonexistent_server, self.flavor_ref) @testtools.skipUnless(CONF.compute_feature_enabled.resize, 'Resize not available.') @test.attr(type=['negative', 'gate']) def test_resize_server_with_non_existent_flavor(self): # Resize a server with non-existent flavor nonexistent_flavor = data_utils.rand_uuid() self.assertRaises(exceptions.BadRequest, self.client.resize, self.server_id, flavor_ref=nonexistent_flavor) @testtools.skipUnless(CONF.compute_feature_enabled.resize, 'Resize not available.') @test.attr(type=['negative', 'gate']) def test_resize_server_with_null_flavor(self): # Resize a server with null flavor self.assertRaises(exceptions.BadRequest, self.client.resize, self.server_id, flavor_ref="") @test.attr(type=['negative', 'gate']) def test_reboot_non_existent_server(self): # Reboot a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.reboot, nonexistent_server, 'SOFT') @testtools.skipUnless(CONF.compute_feature_enabled.pause, 'Pause is not available.') @test.attr(type=['negative', 'gate']) def test_pause_paused_server(self): # Pause a paused server. self.client.pause_server(self.server_id) self.client.wait_for_server_status(self.server_id, 'PAUSED') self.assertRaises(exceptions.Conflict, self.client.pause_server, self.server_id) self.client.unpause_server(self.server_id) @test.attr(type=['negative', 'gate']) def test_rebuild_reboot_deleted_server(self): # Rebuild and Reboot a deleted server _, server = self.create_test_server() self.client.delete_server(server['id']) self.client.wait_for_server_termination(server['id']) self.assertRaises(exceptions.NotFound, self.client.rebuild, server['id'], self.image_ref_alt) self.assertRaises(exceptions.NotFound, self.client.reboot, server['id'], 'SOFT') @test.attr(type=['negative', 'gate']) def test_rebuild_non_existent_server(self): # Rebuild a non existent server nonexistent_server = data_utils.rand_uuid() meta = {'rebuild': 'server'} new_name = data_utils.rand_name('server') file_contents = 'Test server rebuild.' personality = [{'path': '/etc/rebuild.txt', 'contents': base64.b64encode(file_contents)}] self.assertRaises(exceptions.NotFound, self.client.rebuild, nonexistent_server, self.image_ref_alt, name=new_name, meta=meta, personality=personality, adminPass='rebuild') @test.attr(type=['negative', 'gate']) def test_create_numeric_server_name(self): # Create a server with a numeric name if self.__class__._interface == "xml": raise self.skipException("Not testable in XML") server_name = 12345 self.assertRaises(exceptions.BadRequest, self.create_test_server, name=server_name) @test.attr(type=['negative', 'gate']) def test_create_server_name_length_exceeds_256(self): # Create a server with name length exceeding 256 characters server_name = 'a' * 256 self.assertRaises(exceptions.BadRequest, self.create_test_server, name=server_name) @test.attr(type=['negative', 'gate']) def test_create_with_invalid_network_uuid(self): # Pass invalid network uuid while creating a server networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}] self.assertRaises(exceptions.BadRequest, self.create_test_server, networks=networks) @test.attr(type=['negative', 'gate']) def test_create_with_non_existent_keypair(self): # Pass a non-existent keypair while creating a server key_name = data_utils.rand_name('key') self.assertRaises(exceptions.BadRequest, self.create_test_server, key_name=key_name) @test.attr(type=['negative', 'gate']) def test_create_server_metadata_exceeds_length_limit(self): # Pass really long metadata while creating a server metadata = {'a': 'b' * 260} self.assertRaises(exceptions.OverLimit, self.create_test_server, meta=metadata) @test.attr(type=['negative', 'gate']) def test_update_name_of_non_existent_server(self): # Update name of a non-existent server server_name = data_utils.rand_name('server') new_name = data_utils.rand_name('server') + '_updated' self.assertRaises(exceptions.NotFound, self.client.update_server, server_name, name=new_name) @test.attr(type=['negative', 'gate']) def test_update_server_set_empty_name(self): # Update name of the server to an empty string server_name = data_utils.rand_name('server') new_name = '' self.assertRaises(exceptions.BadRequest, self.client.update_server, server_name, name=new_name) @test.attr(type=['negative', 'gate']) def test_update_server_of_another_tenant(self): # Update name of a server that belongs to another tenant new_name = self.server_id + '_new' self.assertRaises(exceptions.NotFound, self.alt_client.update_server, self.server_id, name=new_name) @test.attr(type=['negative', 'gate']) def test_update_server_name_length_exceeds_256(self): # Update name of server exceed the name length limit new_name = 'a' * 256 self.assertRaises(exceptions.BadRequest, self.client.update_server, self.server_id, name=new_name) @test.attr(type=['negative', 'gate']) def test_delete_non_existent_server(self): # Delete a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.delete_server, nonexistent_server) @test.attr(type=['negative', 'gate']) def test_delete_a_server_of_another_tenant(self): # Delete a server that belongs to another tenant self.assertRaises(exceptions.NotFound, self.alt_client.delete_server, self.server_id) @test.attr(type=['negative', 'gate']) def test_delete_server_pass_negative_id(self): # Pass an invalid string parameter to delete server self.assertRaises(exceptions.NotFound, self.client.delete_server, -1) @test.attr(type=['negative', 'gate']) def test_delete_server_pass_id_exceeding_length_limit(self): # Pass a server ID that exceeds length limit to delete server self.assertRaises(exceptions.NotFound, self.client.delete_server, sys.maxint + 1) @test.attr(type=['negative', 'gate']) def test_create_with_nonexistent_security_group(self): # Create a server with a nonexistent security group security_groups = [{'name': 'does_not_exist'}] self.assertRaises(exceptions.BadRequest, self.create_test_server, security_groups=security_groups) @test.attr(type=['negative', 'gate']) def test_get_non_existent_server(self): # Get a non existent server details nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.get_server, nonexistent_server) @test.attr(type=['negative', 'gate']) def test_stop_non_existent_server(self): # Stop a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.servers_client.stop, nonexistent_server) @testtools.skipUnless(CONF.compute_feature_enabled.pause, 'Pause is not available.') @test.attr(type=['negative', 'gate']) def test_pause_non_existent_server(self): # pause a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.pause_server, nonexistent_server) @testtools.skipUnless(CONF.compute_feature_enabled.pause, 'Pause is not available.') @test.attr(type=['negative', 'gate']) def test_unpause_non_existent_server(self): # unpause a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.unpause_server, nonexistent_server) @testtools.skipUnless(CONF.compute_feature_enabled.pause, 'Pause is not available.') @test.attr(type=['negative', 'gate']) def test_unpause_server_invalid_state(self): # unpause an active server. self.assertRaises(exceptions.Conflict, self.client.unpause_server, self.server_id) @testtools.skipUnless(CONF.compute_feature_enabled.suspend, 'Suspend is not available.') @test.attr(type=['negative', 'gate']) def test_suspend_non_existent_server(self): # suspend a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.suspend_server, nonexistent_server) @testtools.skipUnless(CONF.compute_feature_enabled.suspend, 'Suspend is not available.') @test.attr(type=['negative', 'gate']) def test_suspend_server_invalid_state(self): # suspend a suspended server. resp, _ = self.client.suspend_server(self.server_id) self.assertEqual(202, resp.status) self.client.wait_for_server_status(self.server_id, 'SUSPENDED') self.assertRaises(exceptions.Conflict, self.client.suspend_server, self.server_id) self.client.resume_server(self.server_id) @testtools.skipUnless(CONF.compute_feature_enabled.suspend, 'Suspend is not available.') @test.attr(type=['negative', 'gate']) def test_resume_non_existent_server(self): # resume a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.resume_server, nonexistent_server) @testtools.skipUnless(CONF.compute_feature_enabled.suspend, 'Suspend is not available.') @test.attr(type=['negative', 'gate']) def test_resume_server_invalid_state(self): # resume an active server. self.assertRaises(exceptions.Conflict, self.client.resume_server, self.server_id) @test.attr(type=['negative', 'gate']) def test_get_console_output_of_non_existent_server(self): # get the console output for a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.get_console_output, nonexistent_server, 10) @test.attr(type=['negative', 'gate']) def test_force_delete_nonexistent_server_id(self): # force-delete a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.force_delete_server, nonexistent_server) @test.attr(type=['negative', 'gate']) def test_restore_nonexistent_server_id(self): # restore-delete a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.restore_soft_deleted_server, nonexistent_server) @test.attr(type=['negative', 'gate']) def test_restore_server_invalid_state(self): # we can only restore-delete a server in 'soft-delete' state self.assertRaises(exceptions.Conflict, self.client.restore_soft_deleted_server, self.server_id) @testtools.skipUnless(CONF.compute_feature_enabled.shelve, 'Shelve is not available.') @test.attr(type=['negative', 'gate']) def test_shelve_non_existent_server(self): # shelve a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.shelve_server, nonexistent_server) @testtools.skipUnless(CONF.compute_feature_enabled.shelve, 'Shelve is not available.') @test.attr(type=['negative', 'gate']) def test_shelve_shelved_server(self): # shelve a shelved server. resp, server = self.client.shelve_server(self.server_id) self.assertEqual(202, resp.status) offload_time = CONF.compute.shelved_offload_time if offload_time >= 0: self.client.wait_for_server_status(self.server_id, 'SHELVED_OFFLOADED', extra_timeout=offload_time) else: self.client.wait_for_server_status(self.server_id, 'SHELVED') resp, server = self.client.get_server(self.server_id) image_name = server['name'] + '-shelved' params = {'name': image_name} resp, images = self.images_client.list_images(params) self.assertEqual(1, len(images)) self.assertEqual(image_name, images[0]['name']) self.assertRaises(exceptions.Conflict, self.client.shelve_server, self.server_id) self.client.unshelve_server(self.server_id) @testtools.skipUnless(CONF.compute_feature_enabled.shelve, 'Shelve is not available.') @test.attr(type=['negative', 'gate']) def test_unshelve_non_existent_server(self): # unshelve a non existent server nonexistent_server = data_utils.rand_uuid() self.assertRaises(exceptions.NotFound, self.client.unshelve_server, nonexistent_server) @testtools.skipUnless(CONF.compute_feature_enabled.shelve, 'Shelve is not available.') @test.attr(type=['negative', 'gate']) def test_unshelve_server_invalid_state(self): # unshelve an active server. self.assertRaises(exceptions.Conflict, self.client.unshelve_server, self.server_id) class ServersNegativeTestXML(ServersNegativeTestJSON): _interface = 'xml'
"""Tests for `imitation.data.rollout`.""" import functools from typing import Mapping, Sequence import gym import numpy as np import pytest from stable_baselines3.common import monitor, vec_env from imitation.data import rollout, types, wrappers from imitation.policies import serialize from imitation.policies.base import RandomPolicy class TerminalSentinelEnv(gym.Env): """Environment with observation 0 when alive and 1 at terminal state.""" def __init__(self, max_acts: int): """Builds `TerminalSentinelLength` with episode length `max_acts`.""" self.max_acts = max_acts self.current_step = 0 self.action_space = gym.spaces.Discrete(1) self.observation_space = gym.spaces.Box(np.array([0]), np.array([1])) def reset(self): self.current_step = 0 return np.array([0]) def step(self, action): self.current_step += 1 done = self.current_step >= self.max_acts observation = np.array([1 if done else 0]) rew = 0.0 return observation, rew, done, {} def _sample_fixed_length_trajectories( episode_lengths: Sequence[int], min_episodes: int, policy_type: str = "policy", **kwargs, ) -> Sequence[types.Trajectory]: venv = vec_env.DummyVecEnv( [functools.partial(TerminalSentinelEnv, length) for length in episode_lengths], ) if policy_type == "policy": policy = RandomPolicy(venv.observation_space, venv.action_space) elif policy_type == "callable": random_policy = RandomPolicy(venv.observation_space, venv.action_space) # Simple way to get a valid callable: just use a policies .predict() method # (still tests another code path inside generate_trajectories) def policy(x): return random_policy.predict(x)[0] elif policy_type == "random": policy = None else: # pragma: no cover raise ValueError(f"Unknown policy_type '{policy_type}'") sample_until = rollout.make_min_episodes(min_episodes) trajectories = rollout.generate_trajectories( policy, venv, sample_until=sample_until, **kwargs, ) return trajectories @pytest.mark.parametrize( "policy_type", ["policy", "callable", "random"], ) def test_complete_trajectories(policy_type) -> None: """Checks trajectories include the terminal observation. This is hidden by default by VecEnv's auto-reset; we add it back in using `rollout.RolloutInfoWrapper`. Args: policy_type: Kind of policy to use when generating trajectories. """ min_episodes = 13 max_acts = 5 num_envs = 4 trajectories = _sample_fixed_length_trajectories( [max_acts] * num_envs, min_episodes, policy_type=policy_type, ) assert len(trajectories) >= min_episodes expected_obs = np.array([[0]] * max_acts + [[1]]) for trajectory in trajectories: obs = trajectory.obs acts = trajectory.acts assert len(obs) == len(acts) + 1 assert np.all(obs == expected_obs) @pytest.mark.parametrize( "episode_lengths,min_episodes,expected_counts", [ # Do we keep on sampling from the 1st (len 3) environment that remains 'alive'? ([3, 5], 2, {3: 2, 5: 1}), # Do we keep on sampling from the 2nd (len 7) environment that remains 'alive'? ([3, 7], 2, {3: 2, 7: 1}), # Similar, but more extreme case with num environments > num episodes ([3, 3, 3, 7], 2, {3: 3, 7: 1}), # Do we stop sampling at 2 episodes if we get two equal-length episodes? ([3, 3], 2, {3: 2}), ([5, 5], 2, {5: 2}), ([7, 7], 2, {7: 2}), ], ) def test_unbiased_trajectories( episode_lengths: Sequence[int], min_episodes: int, expected_counts: Mapping[int, int], ) -> None: """Checks trajectories are sampled without bias towards shorter episodes. Specifically, we create a VecEnv consisting of environments with fixed-length `episode_lengths`. This is unrealistic and breaks the i.i.d. assumption, but lets us test things deterministically. If we hit `min_episodes` exactly and all environments are done at the same time, we should stop and not sample any more trajectories. Otherwise, we should keep sampling from any in-flight environments, but not add trajectories from any other environments. The different test cases check each of these cases. Args: episode_lengths: The length of the episodes in each environment. min_episodes: The minimum number of episodes to sample. expected_counts: Mapping from episode length to expected number of episodes of that length (omit if 0 episodes of that length expected). """ trajectories = _sample_fixed_length_trajectories(episode_lengths, min_episodes) assert len(trajectories) == sum(expected_counts.values()) traj_lens = np.array([len(traj) for traj in trajectories]) for length, count in expected_counts.items(): assert np.sum(traj_lens == length) == count def test_seed_trajectories(): """Check trajectory order deterministic given seed and that seed is not no-op. Note in general environments and policies are stochastic, so the trajectory order *will* differ unless environment/policy seeds are also set. However, `TerminalSentinelEnv` is fixed-length deterministic, so there are no such confounders in this test. """ rng_a1 = np.random.RandomState(0) rng_a2 = np.random.RandomState(0) rng_b = np.random.RandomState(1) traj_a1 = _sample_fixed_length_trajectories([3, 5], 2, rng=rng_a1) traj_a2 = _sample_fixed_length_trajectories([3, 5], 2, rng=rng_a2) traj_b = _sample_fixed_length_trajectories([3, 5], 2, rng=rng_b) assert [len(traj) for traj in traj_a1] == [len(traj) for traj in traj_a2] assert [len(traj) for traj in traj_a1] != [len(traj) for traj in traj_b] class ObsRewHalveWrapper(gym.Wrapper): """Simple wrapper that scales every reward and observation feature by 0.5.""" def reset(self, **kwargs): obs = self.env.reset(**kwargs) / 2 return obs def step(self, action): obs, rew, done, info = self.env.step(action) return obs / 2, rew / 2, done, info def test_rollout_stats(): """Applying `ObsRewIncrementWrapper` halves the reward mean. `rollout_stats` should reflect this. """ env = gym.make("CartPole-v1") env = monitor.Monitor(env, None) env = ObsRewHalveWrapper(env) venv = vec_env.DummyVecEnv([lambda: env]) policy = serialize.load_policy("zero", "UNUSED", venv) trajs = rollout.generate_trajectories(policy, venv, rollout.make_min_episodes(10)) s = rollout.rollout_stats(trajs) np.testing.assert_allclose(s["return_mean"], s["monitor_return_mean"] / 2) np.testing.assert_allclose(s["return_std"], s["monitor_return_std"] / 2) np.testing.assert_allclose(s["return_min"], s["monitor_return_min"] / 2) np.testing.assert_allclose(s["return_max"], s["monitor_return_max"] / 2) def test_unwrap_traj(): """Check that unwrap_traj reverses `ObsRewIncrementWrapper`. Also check that unwrapping twice is a no-op. """ env = gym.make("CartPole-v1") env = wrappers.RolloutInfoWrapper(env) env = ObsRewHalveWrapper(env) venv = vec_env.DummyVecEnv([lambda: env]) policy = serialize.load_policy("zero", "UNUSED", venv) trajs = rollout.generate_trajectories(policy, venv, rollout.make_min_episodes(10)) trajs_unwrapped = [rollout.unwrap_traj(t) for t in trajs] trajs_unwrapped_twice = [rollout.unwrap_traj(t) for t in trajs_unwrapped] for t, t_unwrapped in zip(trajs, trajs_unwrapped): np.testing.assert_allclose(t.acts, t_unwrapped.acts) np.testing.assert_allclose(t.obs, t_unwrapped.obs / 2) np.testing.assert_allclose(t.rews, t_unwrapped.rews / 2) for t1, t2 in zip(trajs_unwrapped, trajs_unwrapped_twice): np.testing.assert_equal(t1.acts, t2.acts) np.testing.assert_equal(t1.obs, t2.obs) np.testing.assert_equal(t1.rews, t2.rews) def test_make_sample_until_errors(): with pytest.raises(ValueError, match="At least one.*"): rollout.make_sample_until(min_timesteps=None, min_episodes=None) episodes_positive = pytest.raises(ValueError, match="min_episodes.*positive") with episodes_positive: rollout.make_sample_until(min_timesteps=None, min_episodes=0) with episodes_positive: rollout.make_sample_until(min_timesteps=10, min_episodes=-34) timesteps_positive = pytest.raises(ValueError, match="min_timesteps.*positive") with timesteps_positive: rollout.make_sample_until(min_timesteps=-3, min_episodes=None) with timesteps_positive: rollout.make_sample_until(min_timesteps=0, min_episodes=None) @pytest.mark.parametrize("gamma", [0, 0.9, 1]) def test_compute_returns(gamma): rng = np.random.default_rng(seed=0) N = 100 rewards = rng.random(N) discounts = np.power(gamma, np.arange(N)) returns = np.sum(discounts * rewards) # small numerical errors will occur because compute_returns # uses a somewhat different method based on evaluating # polynomials assert abs(rollout.discounted_sum(rewards, gamma) - returns) < 1e-8 def test_generate_trajectories_type_error(): venv = vec_env.DummyVecEnv([functools.partial(TerminalSentinelEnv, 1)]) sample_until = rollout.make_min_episodes(1) with pytest.raises(TypeError, match="Policy must be.*got <class 'str'> instead"): rollout.generate_trajectories( "strings_are_not_valid_policies", venv, sample_until=sample_until, )
# coding: utf-8 # In[1]: get_ipython().magic('matplotlib inline') import matplotlib.pyplot as plt import numpy as np import time import src.gen_source as gen_source import src.two_tone_lib as tt import src.tcp_async as tcp_async import src.tcp_sync as tcp_sync import src.dab_util as du import src.dab_tuning_lib as dt from live_analyse_py import live_analyse_py # In[15]: try: __IPYTHON__ reload(tcp_async) reload(tcp_sync) reload(gen_source) reload(tt) reload(du) reload(dt) except: pass # In[3]: sync = tcp_sync.UhdSyncMsg(packet_size=4*8192, packet_type="".join(["f"]*8192)) async = tcp_async.UhdAsyncMsg() # In[4]: top = live_analyse_py() # In[5]: top.start() # In[6]: top.set_txgain(86) top.set_rxgain(10) # In[7]: top.blocks_file_source_0.open("./../dab_normalized_c64.dat", True) # In[8]: sync.has_msg() async.has_msg() # In[9]: tt.gen_two_tone(debug = True) # In[10]: msgs = sync.get_msgs(1) msgs = [np.fft.fftshift(msg) for msg in msgs] # In[18]: def measure(param): n_avg = 20 x2, x3, x4, x5, x6, x7, x8 = param repeat = True while repeat: #tt.gen_two_tone(debug = True, predist=tt.predist_poly, par=(x2, x3, x4)) top.dpd_memless_poly_0.set_a1(1) top.dpd_memless_poly_0.set_a2(x2) top.dpd_memless_poly_0.set_a3(x3) top.dpd_memless_poly_0.set_a4(x4) top.dpd_memless_poly_0.set_a5(x5) top.dpd_memless_poly_0.set_a6(x6) top.dpd_memless_poly_0.set_a7(x7) top.dpd_memless_poly_0.set_a8(x8) sync.has_msg() np.array(sync.get_msgs(0.8)) msgs = np.array(sync.get_msgs(n_avg)) scores = np.zeros(n_avg) msgs = [np.fft.fftshift(msg) for msg in msgs] if async.has_msg(): print ("repeat due to async message") continue a = np.array(msgs) mean_msg = a.mean(axis = 0) suffix = "x_2_%.3f_x_3_%.3f_x_4_%.3fx_5_%.3fx_6_%.3fx_7_%.3fx_8_%.3f" % (x2, x3, x4, x5, x6, x7, x8) #sig_to_noise = tt.analyse_power_spec(mean_msg, debug=True, debug_path="/tmp/out", suffix=suffix) for i in range(n_avg): if i == 0: scores[i] = dt.calc_signal_sholder_ratio(msgs[0], sampling_rate=8000000, debug=True, debug_path="/tmp/out", suffix=suffix) else: scores[i] = dt.calc_signal_sholder_ratio(msgs[0], sampling_rate=8000000) score = np.mean(scores) print(score, x2, x3, x4, x5, x6, x7, x8) repeat = False return score # In[16]: def simple_opt(pars, i, d, func): par = pars[i] test_pars = [] for x in [-1, 0, 1]: new_par = list(pars) new_par[i] = par + x * d test_pars.append(new_par) res = [func(par_new) for par_new in test_pars] sel = np.argmax(res) best_par = test_pars[sel] return best_par #pars = [1,1,1] #i_rand = np.random.randint(0, len(pars)) #pars = simple_opt(pars, i_rand, 0.01, lambda x:np.sum(x)) #pars # In[ ]: top.set_txgain(86) top.set_rxgain(5) pars = np.zeros(7) for i in range(10000): i_rand = np.random.randint(0, len(pars)) pars = simple_opt(pars, i_rand, 0.005, measure) # In[ ]: # In[ ]: # In[ ]: # In[15]: top.set_txgain(85) params = [] for x2 in np.linspace(-0.1, 0.1, num = 11): for x3 in np.linspace(-0.1, 0.1, num = 11): for x4 in np.linspace(-0.1, 0.1, num = 11): params.append((x2, x3, x4)) t_start = time.time() for idx, param in enumerate(params): measure(param) time_per_element = (time.time() - t_start) / (idx + 1) print ("Time per Element " + str(time_per_element) + ", total: " + str(time_per_element * len(params)), ", left: " + str(time_per_element * (len(params) - 1 - idx)) ) # In[ ]: # In[31]: sync.stop() async.stop() top.stop() top.wait() # In[ ]: # In[ ]: # In[ ]: # In[ ]:
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for metrics used in eval.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect # Dependency imports import numpy as np from tensor2tensor.layers import common_layers from tensor2tensor.utils import bleu_hook from tensor2tensor.utils import registry from tensor2tensor.utils import rouge import tensorflow as tf from tensorflow.contrib.eager.python import tfe class Metrics(object): """Available evaluation metrics.""" # Entries here should match the keys in METRICS_FN below ACC = "accuracy" ACC_TOP5 = "accuracy_top5" ACC_PER_SEQ = "accuracy_per_sequence" NEG_LOG_PERPLEXITY = "neg_log_perplexity" APPROX_BLEU = "approx_bleu_score" RMSE = "rmse" LOG_POISSON = "log_poisson" R2 = "r_squared" ROUGE_2_F = "rouge_2_fscore" ROUGE_L_F = "rouge_L_fscore" EDIT_DISTANCE = "edit_distance" SET_PRECISION = "set_precision" SET_RECALL = "set_recall" IMAGE_SUMMARY = "image_summary" def padded_rmse(predictions, labels, weights_fn=common_layers.weights_all): predictions, labels = common_layers.pad_with_zeros(predictions, labels) targets = labels weights = weights_fn(targets) error = tf.sqrt(tf.pow(predictions - labels, 2)) return tf.reduce_sum(error * weights), tf.reduce_sum(weights) def padded_log_poisson(predictions, labels, weights_fn=common_layers.weights_all): # Expects predictions to already be transformed into log space predictions, labels = common_layers.pad_with_zeros(predictions, labels) targets = labels weights = weights_fn(targets) lp_loss = tf.nn.log_poisson_loss(targets, predictions, compute_full_loss=True) return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights) def padded_variance_explained(predictions, labels, weights_fn=common_layers.weights_all): # aka R^2 predictions, labels = common_layers.pad_with_zeros(predictions, labels) targets = labels weights = weights_fn(targets) y_bar = tf.reduce_mean(weights * targets) tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2)) res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2)) r2 = 1. - res_ss / tot_ss return r2, tf.reduce_sum(weights) def padded_accuracy_topk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Percentage of times that top-k predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) effective_k = tf.minimum(k, common_layers.shape_list(padded_predictions)[-1]) _, outputs = tf.nn.top_k(padded_predictions, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(padded_labels) padded_labels = tf.expand_dims(padded_labels, axis=-1) padded_labels += tf.zeros_like(outputs) # Pad to same shape. same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=-1) return same_topk, weights def padded_accuracy_top5(predictions, labels, weights_fn=common_layers.weights_nonzero): return padded_accuracy_topk(predictions, labels, 5, weights_fn) def padded_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels everywhere (non-0).""" with tf.variable_scope( "padded_sequence_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0) def sequence_edit_distance(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero. """ if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("edit_distance", values=[predictions, labels]): # Transform logits into sequence classes by taking max at every step. predictions = tf.to_int32( tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3))) nonzero_idx = tf.where(tf.not_equal(predictions, 0)) sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(predictions, nonzero_idx), tf.shape(predictions, out_type=tf.int64)) labels = tf.squeeze(labels, axis=(2, 3)) nonzero_idx = tf.where(tf.not_equal(labels, 0)) label_sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(labels, nonzero_idx), tf.shape(labels, out_type=tf.int64)) distance = tf.reduce_sum( tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False)) reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0]) return distance / reference_length, reference_length def padded_neg_log_perplexity(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average log-perplexity exluding padding 0s. No smoothing.""" num, den = common_layers.padded_cross_entropy( predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False) return (-num, den) def padded_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) return tf.to_float(tf.equal(outputs, padded_labels)), weights def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero): """Recall of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_recall", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights def image_summary(predictions, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : A Tensor of scores of shape [batch, nlabels]. hparams: model_hparams Returns: summary_proto: containing the summary image for predictions weights: A Tensor of zeros of shape [batch, nlabels]. """ predictions_reshaped = tf.reshape( predictions, [-1, hparams.height, hparams.width, hparams.colors]) return tf.summary.image( "image_summary", predictions_reshaped, max_outputs=1), tf.zeros_like(predictions) def create_evaluation_metrics(problems, model_hparams): """Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets, problem_choice}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum. """ def make_problem_specific_metric_fn(metric_fn, problem_idx, weights_fn): """Create a metric fn conditioned on problem_idx.""" def problem_metric_fn(predictions, features): """Metric fn.""" labels = features.get("targets", None) problem_choice = features.get("problem_choice", 0) # Send along the entire features dict if the metric fn has the kwarg # "features". kwargs = {} args, _, keywords, _ = inspect.getargspec(metric_fn) if ("features" in args) or keywords: kwargs["features"] = features def wrapped_metric_fn(): return metric_fn(predictions, labels, weights_fn=weights_fn, **kwargs) (scores, weights) = tf.cond( tf.equal(problem_idx, problem_choice), wrapped_metric_fn, lambda: (tf.constant(0.0), tf.constant(0.0))) # The tf.metrics.mean function assures correct aggregation. return tf.metrics.mean(scores, weights) return problem_metric_fn eval_metrics = dict() for problem_idx, problem_instance in enumerate(problems): problem_name = problem_instance.name metrics = problem_instance.eval_metrics() if not all([m in METRICS_FNS for m in metrics]): error_str = ("Unrecognized metric. Problem %s specified metrics " "%s. Recognized metrics are %s.") raise ValueError(error_str % (problem_name, metrics, list(METRICS_FNS.keys()))) def image_wrapped_metric_fn(predictions, labels, weights_fn=common_layers.weights_nonzero): _, _ = labels, weights_fn return metric_fn(predictions, model_hparams) tm = problem_instance.get_hparams().target_modality if isinstance(tm, tuple): tm = registry.create_modality(tm, model_hparams) weights_fn = tm.targets_weights_fn for metric in metrics: metric_fn = METRICS_FNS[metric] metric_name = "metrics-%s/%s" % (problem_name, metric) if metric == Metrics.IMAGE_SUMMARY: eval_metrics[metric_name] = image_wrapped_metric_fn else: problem_metric_fn = make_problem_specific_metric_fn( metric_fn, problem_idx, weights_fn) eval_metrics[metric_name] = problem_metric_fn return eval_metrics def create_eager_metrics_for_problem(problem, model_hparams=None): """See create_eager_metrics.""" metric_names = problem.eval_metrics() tm = problem.get_hparams().target_modality if isinstance(tm, tuple): assert model_hparams is not None tm = registry.create_modality(tm, model_hparams) return create_eager_metrics(metric_names, weights_fn=tm.targets_weights_fn) def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all): """Create metrics accumulators and averager for Eager mode. Args: metric_names: list<str> from Metrics enum weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val> """ metric_fns = dict( [(name, METRICS_FNS[name]) for name in metric_names]) tfe_metrics = dict() for name in metric_names: tfe_metrics[name] = tfe.metrics.Mean(name=name) def metric_accum(predictions, targets): for name, metric_fn in metric_fns.items(): val, weight = metric_fn(predictions, targets, weights_fn=weights_fn) tfe_metrics[name](np.squeeze(val), np.squeeze(weight)) def metric_means(): avgs = {} for name in metric_names: avgs[name] = tfe_metrics[name].result().numpy() return avgs return metric_accum, metric_means # Metrics are functions that take predictions and labels and return # a tensor of metrics and a tensor of weights. # If the function has "features" as an argument, it will receive the whole # features dict as well. # The results are passed to tf.metrics.mean to accumulate properly. METRICS_FNS = { Metrics.ACC: padded_accuracy, Metrics.ACC_TOP5: padded_accuracy_top5, Metrics.ACC_PER_SEQ: padded_sequence_accuracy, Metrics.NEG_LOG_PERPLEXITY: padded_neg_log_perplexity, Metrics.APPROX_BLEU: bleu_hook.bleu_score, Metrics.RMSE: padded_rmse, Metrics.LOG_POISSON: padded_log_poisson, Metrics.R2: padded_variance_explained, Metrics.ROUGE_2_F: rouge.rouge_2_fscore, Metrics.ROUGE_L_F: rouge.rouge_l_fscore, Metrics.EDIT_DISTANCE: sequence_edit_distance, Metrics.SET_PRECISION: set_precision, Metrics.SET_RECALL: set_recall, Metrics.IMAGE_SUMMARY: image_summary, }
from __future__ import absolute_import, division, print_function import gc import contextlib import datetime import functools import platform import sys import textwrap import time import weakref import warnings from tornado.concurrent import return_future, Future from tornado.escape import url_escape from tornado.httpclient import AsyncHTTPClient from tornado.ioloop import IOLoop from tornado.log import app_log from tornado import stack_context from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test from tornado.test.util import unittest, skipOnTravis, skipBefore33, skipBefore35, skipNotCPython, exec_test, ignore_deprecation # noqa: E501 from tornado.web import Application, RequestHandler, asynchronous, HTTPError from tornado import gen try: from concurrent import futures except ImportError: futures = None try: import asyncio except ImportError: asyncio = None class GenEngineTest(AsyncTestCase): def setUp(self): self.warning_catcher = warnings.catch_warnings() self.warning_catcher.__enter__() warnings.simplefilter('ignore', DeprecationWarning) super(GenEngineTest, self).setUp() self.named_contexts = [] def tearDown(self): super(GenEngineTest, self).tearDown() self.warning_catcher.__exit__(None, None, None) def named_context(self, name): @contextlib.contextmanager def context(): self.named_contexts.append(name) try: yield finally: self.assertEqual(self.named_contexts.pop(), name) return context def run_gen(self, f): f() return self.wait() def delay_callback(self, iterations, callback, arg): """Runs callback(arg) after a number of IOLoop iterations.""" if iterations == 0: callback(arg) else: self.io_loop.add_callback(functools.partial( self.delay_callback, iterations - 1, callback, arg)) with ignore_deprecation(): @return_future def async_future(self, result, callback): self.io_loop.add_callback(callback, result) @gen.coroutine def async_exception(self, e): yield gen.moment raise e def test_no_yield(self): @gen.engine def f(): self.stop() self.run_gen(f) def test_inline_cb(self): @gen.engine def f(): (yield gen.Callback("k1"))() res = yield gen.Wait("k1") self.assertTrue(res is None) self.stop() self.run_gen(f) def test_ioloop_cb(self): @gen.engine def f(): self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.stop() self.run_gen(f) def test_exception_phase1(self): @gen.engine def f(): 1 / 0 self.assertRaises(ZeroDivisionError, self.run_gen, f) def test_exception_phase2(self): @gen.engine def f(): self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") 1 / 0 self.assertRaises(ZeroDivisionError, self.run_gen, f) def test_exception_in_task_phase1(self): def fail_task(callback): 1 / 0 @gen.engine def f(): try: yield gen.Task(fail_task) raise Exception("did not get expected exception") except ZeroDivisionError: self.stop() self.run_gen(f) def test_exception_in_task_phase2(self): # This is the case that requires the use of stack_context in gen.engine def fail_task(callback): self.io_loop.add_callback(lambda: 1 / 0) @gen.engine def f(): try: yield gen.Task(fail_task) raise Exception("did not get expected exception") except ZeroDivisionError: self.stop() self.run_gen(f) def test_with_arg(self): @gen.engine def f(): (yield gen.Callback("k1"))(42) res = yield gen.Wait("k1") self.assertEqual(42, res) self.stop() self.run_gen(f) def test_with_arg_tuple(self): @gen.engine def f(): (yield gen.Callback((1, 2)))((3, 4)) res = yield gen.Wait((1, 2)) self.assertEqual((3, 4), res) self.stop() self.run_gen(f) def test_key_reuse(self): @gen.engine def f(): yield gen.Callback("k1") yield gen.Callback("k1") self.stop() self.assertRaises(gen.KeyReuseError, self.run_gen, f) def test_key_reuse_tuple(self): @gen.engine def f(): yield gen.Callback((1, 2)) yield gen.Callback((1, 2)) self.stop() self.assertRaises(gen.KeyReuseError, self.run_gen, f) def test_key_mismatch(self): @gen.engine def f(): yield gen.Callback("k1") yield gen.Wait("k2") self.stop() self.assertRaises(gen.UnknownKeyError, self.run_gen, f) def test_key_mismatch_tuple(self): @gen.engine def f(): yield gen.Callback((1, 2)) yield gen.Wait((2, 3)) self.stop() self.assertRaises(gen.UnknownKeyError, self.run_gen, f) def test_leaked_callback(self): @gen.engine def f(): yield gen.Callback("k1") self.stop() self.assertRaises(gen.LeakedCallbackError, self.run_gen, f) def test_leaked_callback_tuple(self): @gen.engine def f(): yield gen.Callback((1, 2)) self.stop() self.assertRaises(gen.LeakedCallbackError, self.run_gen, f) def test_parallel_callback(self): @gen.engine def f(): for k in range(3): self.io_loop.add_callback((yield gen.Callback(k))) yield gen.Wait(1) self.io_loop.add_callback((yield gen.Callback(3))) yield gen.Wait(0) yield gen.Wait(3) yield gen.Wait(2) self.stop() self.run_gen(f) def test_bogus_yield(self): @gen.engine def f(): yield 42 self.assertRaises(gen.BadYieldError, self.run_gen, f) def test_bogus_yield_tuple(self): @gen.engine def f(): yield (1, 2) self.assertRaises(gen.BadYieldError, self.run_gen, f) def test_reuse(self): @gen.engine def f(): self.io_loop.add_callback((yield gen.Callback(0))) yield gen.Wait(0) self.stop() self.run_gen(f) self.run_gen(f) def test_task(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) self.stop() self.run_gen(f) def test_wait_all(self): @gen.engine def f(): (yield gen.Callback("k1"))("v1") (yield gen.Callback("k2"))("v2") results = yield gen.WaitAll(["k1", "k2"]) self.assertEqual(results, ["v1", "v2"]) self.stop() self.run_gen(f) def test_exception_in_yield(self): @gen.engine def f(): try: yield gen.Wait("k1") raise Exception("did not get expected exception") except gen.UnknownKeyError: pass self.stop() self.run_gen(f) def test_resume_after_exception_in_yield(self): @gen.engine def f(): try: yield gen.Wait("k1") raise Exception("did not get expected exception") except gen.UnknownKeyError: pass (yield gen.Callback("k2"))("v2") self.assertEqual((yield gen.Wait("k2")), "v2") self.stop() self.run_gen(f) def test_orphaned_callback(self): @gen.engine def f(): self.orphaned_callback = yield gen.Callback(1) try: self.run_gen(f) raise Exception("did not get expected exception") except gen.LeakedCallbackError: pass self.orphaned_callback() def test_none(self): @gen.engine def f(): yield None self.stop() self.run_gen(f) def test_multi(self): @gen.engine def f(): (yield gen.Callback("k1"))("v1") (yield gen.Callback("k2"))("v2") results = yield [gen.Wait("k1"), gen.Wait("k2")] self.assertEqual(results, ["v1", "v2"]) self.stop() self.run_gen(f) def test_multi_dict(self): @gen.engine def f(): (yield gen.Callback("k1"))("v1") (yield gen.Callback("k2"))("v2") results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2")) self.assertEqual(results, dict(foo="v1", bar="v2")) self.stop() self.run_gen(f) # The following tests explicitly run with both gen.Multi # and gen.multi_future (Task returns a Future, so it can be used # with either). def test_multi_yieldpoint_delayed(self): @gen.engine def f(): # callbacks run at different times responses = yield gen.Multi([ gen.Task(self.delay_callback, 3, arg="v1"), gen.Task(self.delay_callback, 1, arg="v2"), ]) self.assertEqual(responses, ["v1", "v2"]) self.stop() self.run_gen(f) def test_multi_yieldpoint_dict_delayed(self): @gen.engine def f(): # callbacks run at different times responses = yield gen.Multi(dict( foo=gen.Task(self.delay_callback, 3, arg="v1"), bar=gen.Task(self.delay_callback, 1, arg="v2"), )) self.assertEqual(responses, dict(foo="v1", bar="v2")) self.stop() self.run_gen(f) def test_multi_future_delayed(self): @gen.engine def f(): # callbacks run at different times responses = yield gen.multi_future([ gen.Task(self.delay_callback, 3, arg="v1"), gen.Task(self.delay_callback, 1, arg="v2"), ]) self.assertEqual(responses, ["v1", "v2"]) self.stop() self.run_gen(f) def test_multi_future_dict_delayed(self): @gen.engine def f(): # callbacks run at different times responses = yield gen.multi_future(dict( foo=gen.Task(self.delay_callback, 3, arg="v1"), bar=gen.Task(self.delay_callback, 1, arg="v2"), )) self.assertEqual(responses, dict(foo="v1", bar="v2")) self.stop() self.run_gen(f) @skipOnTravis @gen_test def test_multi_performance(self): # Yielding a list used to have quadratic performance; make # sure a large list stays reasonable. On my laptop a list of # 2000 used to take 1.8s, now it takes 0.12. start = time.time() yield [gen.Task(self.io_loop.add_callback) for i in range(2000)] end = time.time() self.assertLess(end - start, 1.0) @gen_test def test_multi_empty(self): # Empty lists or dicts should return the same type. x = yield [] self.assertTrue(isinstance(x, list)) y = yield {} self.assertTrue(isinstance(y, dict)) @gen_test def test_multi_mixed_types(self): # A YieldPoint (Wait) and Future (Task) can be combined # (and use the YieldPoint codepath) (yield gen.Callback("k1"))("v1") responses = yield [gen.Wait("k1"), gen.Task(self.delay_callback, 3, arg="v2")] self.assertEqual(responses, ["v1", "v2"]) @gen_test def test_future(self): result = yield self.async_future(1) self.assertEqual(result, 1) @gen_test def test_multi_future(self): results = yield [self.async_future(1), self.async_future(2)] self.assertEqual(results, [1, 2]) @gen_test def test_multi_future_duplicate(self): f = self.async_future(2) results = yield [self.async_future(1), f, self.async_future(3), f] self.assertEqual(results, [1, 2, 3, 2]) @gen_test def test_multi_dict_future(self): results = yield dict(foo=self.async_future(1), bar=self.async_future(2)) self.assertEqual(results, dict(foo=1, bar=2)) @gen_test def test_multi_exceptions(self): with ExpectLog(app_log, "Multiple exceptions in yield list"): with self.assertRaises(RuntimeError) as cm: yield gen.Multi([self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))]) self.assertEqual(str(cm.exception), "error 1") # With only one exception, no error is logged. with self.assertRaises(RuntimeError): yield gen.Multi([self.async_exception(RuntimeError("error 1")), self.async_future(2)]) # Exception logging may be explicitly quieted. with self.assertRaises(RuntimeError): yield gen.Multi([self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))], quiet_exceptions=RuntimeError) @gen_test def test_multi_future_exceptions(self): with ExpectLog(app_log, "Multiple exceptions in yield list"): with self.assertRaises(RuntimeError) as cm: yield [self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))] self.assertEqual(str(cm.exception), "error 1") # With only one exception, no error is logged. with self.assertRaises(RuntimeError): yield [self.async_exception(RuntimeError("error 1")), self.async_future(2)] # Exception logging may be explicitly quieted. with self.assertRaises(RuntimeError): yield gen.multi_future( [self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))], quiet_exceptions=RuntimeError) def test_arguments(self): @gen.engine def f(): (yield gen.Callback("noargs"))() self.assertEqual((yield gen.Wait("noargs")), None) (yield gen.Callback("1arg"))(42) self.assertEqual((yield gen.Wait("1arg")), 42) (yield gen.Callback("kwargs"))(value=42) result = yield gen.Wait("kwargs") self.assertTrue(isinstance(result, gen.Arguments)) self.assertEqual(((), dict(value=42)), result) self.assertEqual(dict(value=42), result.kwargs) (yield gen.Callback("2args"))(42, 43) result = yield gen.Wait("2args") self.assertTrue(isinstance(result, gen.Arguments)) self.assertEqual(((42, 43), {}), result) self.assertEqual((42, 43), result.args) def task_func(callback): callback(None, error="foo") result = yield gen.Task(task_func) self.assertTrue(isinstance(result, gen.Arguments)) self.assertEqual(((None,), dict(error="foo")), result) self.stop() self.run_gen(f) def test_stack_context_leak(self): # regression test: repeated invocations of a gen-based # function should not result in accumulated stack_contexts def _stack_depth(): head = stack_context._state.contexts[1] length = 0 while head is not None: length += 1 head = head.old_contexts[1] return length @gen.engine def inner(callback): yield gen.Task(self.io_loop.add_callback) callback() @gen.engine def outer(): for i in range(10): yield gen.Task(inner) stack_increase = _stack_depth() - initial_stack_depth self.assertTrue(stack_increase <= 2) self.stop() initial_stack_depth = _stack_depth() self.run_gen(outer) def test_stack_context_leak_exception(self): # same as previous, but with a function that exits with an exception @gen.engine def inner(callback): yield gen.Task(self.io_loop.add_callback) 1 / 0 @gen.engine def outer(): for i in range(10): try: yield gen.Task(inner) except ZeroDivisionError: pass stack_increase = len(stack_context._state.contexts) - initial_stack_depth self.assertTrue(stack_increase <= 2) self.stop() initial_stack_depth = len(stack_context._state.contexts) self.run_gen(outer) def function_with_stack_context(self, callback): # Technically this function should stack_context.wrap its callback # upon entry. However, it is very common for this step to be # omitted. def step2(): self.assertEqual(self.named_contexts, ['a']) self.io_loop.add_callback(callback) with stack_context.StackContext(self.named_context('a')): self.io_loop.add_callback(step2) @gen_test def test_wait_transfer_stack_context(self): # Wait should not pick up contexts from where callback was invoked, # even if that function improperly fails to wrap its callback. cb = yield gen.Callback('k1') self.function_with_stack_context(cb) self.assertEqual(self.named_contexts, []) yield gen.Wait('k1') self.assertEqual(self.named_contexts, []) @gen_test def test_task_transfer_stack_context(self): yield gen.Task(self.function_with_stack_context) self.assertEqual(self.named_contexts, []) def test_raise_after_stop(self): # This pattern will be used in the following tests so make sure # the exception propagates as expected. @gen.engine def f(): self.stop() 1 / 0 with self.assertRaises(ZeroDivisionError): self.run_gen(f) def test_sync_raise_return(self): # gen.Return is allowed in @gen.engine, but it may not be used # to return a value. @gen.engine def f(): self.stop(42) raise gen.Return() result = self.run_gen(f) self.assertEqual(result, 42) def test_async_raise_return(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) self.stop(42) raise gen.Return() result = self.run_gen(f) self.assertEqual(result, 42) def test_sync_raise_return_value(self): @gen.engine def f(): raise gen.Return(42) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_sync_raise_return_value_tuple(self): @gen.engine def f(): raise gen.Return((1, 2)) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_async_raise_return_value(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) raise gen.Return(42) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_async_raise_return_value_tuple(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) raise gen.Return((1, 2)) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_return_value(self): # It is an error to apply @gen.engine to a function that returns # a value. @gen.engine def f(): return 42 with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_return_value_tuple(self): # It is an error to apply @gen.engine to a function that returns # a value. @gen.engine def f(): return (1, 2) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) @skipNotCPython def test_task_refcounting(self): # On CPython, tasks and their arguments should be released immediately # without waiting for garbage collection. @gen.engine def f(): class Foo(object): pass arg = Foo() self.arg_ref = weakref.ref(arg) task = gen.Task(self.io_loop.add_callback, arg=arg) self.task_ref = weakref.ref(task) yield task self.stop() self.run_gen(f) self.assertIs(self.arg_ref(), None) self.assertIs(self.task_ref(), None) # GenBasicTest duplicates the non-deprecated portions of GenEngineTest # with gen.coroutine to ensure we don't lose coverage when gen.engine # goes away. class GenBasicTest(AsyncTestCase): @gen.coroutine def delay(self, iterations, arg): """Returns arg after a number of IOLoop iterations.""" for i in range(iterations): yield gen.moment raise gen.Return(arg) with ignore_deprecation(): @return_future def async_future(self, result, callback): self.io_loop.add_callback(callback, result) @gen.coroutine def async_exception(self, e): yield gen.moment raise e @gen.coroutine def add_one_async(self, x): yield gen.moment raise gen.Return(x + 1) def test_no_yield(self): @gen.coroutine def f(): pass self.io_loop.run_sync(f) def test_exception_phase1(self): @gen.coroutine def f(): 1 / 0 self.assertRaises(ZeroDivisionError, self.io_loop.run_sync, f) def test_exception_phase2(self): @gen.coroutine def f(): yield gen.moment 1 / 0 self.assertRaises(ZeroDivisionError, self.io_loop.run_sync, f) def test_bogus_yield(self): @gen.coroutine def f(): yield 42 self.assertRaises(gen.BadYieldError, self.io_loop.run_sync, f) def test_bogus_yield_tuple(self): @gen.coroutine def f(): yield (1, 2) self.assertRaises(gen.BadYieldError, self.io_loop.run_sync, f) def test_reuse(self): @gen.coroutine def f(): yield gen.moment self.io_loop.run_sync(f) self.io_loop.run_sync(f) def test_none(self): @gen.coroutine def f(): yield None self.io_loop.run_sync(f) def test_multi(self): @gen.coroutine def f(): results = yield [self.add_one_async(1), self.add_one_async(2)] self.assertEqual(results, [2, 3]) self.io_loop.run_sync(f) def test_multi_dict(self): @gen.coroutine def f(): results = yield dict(foo=self.add_one_async(1), bar=self.add_one_async(2)) self.assertEqual(results, dict(foo=2, bar=3)) self.io_loop.run_sync(f) def test_multi_delayed(self): @gen.coroutine def f(): # callbacks run at different times responses = yield gen.multi_future([ self.delay(3, "v1"), self.delay(1, "v2"), ]) self.assertEqual(responses, ["v1", "v2"]) self.io_loop.run_sync(f) def test_multi_dict_delayed(self): @gen.coroutine def f(): # callbacks run at different times responses = yield gen.multi_future(dict( foo=self.delay(3, "v1"), bar=self.delay(1, "v2"), )) self.assertEqual(responses, dict(foo="v1", bar="v2")) self.io_loop.run_sync(f) @skipOnTravis @gen_test def test_multi_performance(self): # Yielding a list used to have quadratic performance; make # sure a large list stays reasonable. On my laptop a list of # 2000 used to take 1.8s, now it takes 0.12. start = time.time() yield [gen.moment for i in range(2000)] end = time.time() self.assertLess(end - start, 1.0) @gen_test def test_multi_empty(self): # Empty lists or dicts should return the same type. x = yield [] self.assertTrue(isinstance(x, list)) y = yield {} self.assertTrue(isinstance(y, dict)) @gen_test def test_future(self): result = yield self.async_future(1) self.assertEqual(result, 1) @gen_test def test_multi_future(self): results = yield [self.async_future(1), self.async_future(2)] self.assertEqual(results, [1, 2]) @gen_test def test_multi_future_duplicate(self): f = self.async_future(2) results = yield [self.async_future(1), f, self.async_future(3), f] self.assertEqual(results, [1, 2, 3, 2]) @gen_test def test_multi_dict_future(self): results = yield dict(foo=self.async_future(1), bar=self.async_future(2)) self.assertEqual(results, dict(foo=1, bar=2)) @gen_test def test_multi_exceptions(self): with ExpectLog(app_log, "Multiple exceptions in yield list"): with self.assertRaises(RuntimeError) as cm: yield gen.Multi([self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))]) self.assertEqual(str(cm.exception), "error 1") # With only one exception, no error is logged. with self.assertRaises(RuntimeError): yield gen.Multi([self.async_exception(RuntimeError("error 1")), self.async_future(2)]) # Exception logging may be explicitly quieted. with self.assertRaises(RuntimeError): yield gen.Multi([self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))], quiet_exceptions=RuntimeError) @gen_test def test_multi_future_exceptions(self): with ExpectLog(app_log, "Multiple exceptions in yield list"): with self.assertRaises(RuntimeError) as cm: yield [self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))] self.assertEqual(str(cm.exception), "error 1") # With only one exception, no error is logged. with self.assertRaises(RuntimeError): yield [self.async_exception(RuntimeError("error 1")), self.async_future(2)] # Exception logging may be explicitly quieted. with self.assertRaises(RuntimeError): yield gen.multi_future( [self.async_exception(RuntimeError("error 1")), self.async_exception(RuntimeError("error 2"))], quiet_exceptions=RuntimeError) def test_sync_raise_return(self): @gen.coroutine def f(): raise gen.Return() self.io_loop.run_sync(f) def test_async_raise_return(self): @gen.coroutine def f(): yield gen.moment raise gen.Return() self.io_loop.run_sync(f) def test_sync_raise_return_value(self): @gen.coroutine def f(): raise gen.Return(42) self.assertEqual(42, self.io_loop.run_sync(f)) def test_sync_raise_return_value_tuple(self): @gen.coroutine def f(): raise gen.Return((1, 2)) self.assertEqual((1, 2), self.io_loop.run_sync(f)) def test_async_raise_return_value(self): @gen.coroutine def f(): yield gen.moment raise gen.Return(42) self.assertEqual(42, self.io_loop.run_sync(f)) def test_async_raise_return_value_tuple(self): @gen.coroutine def f(): yield gen.moment raise gen.Return((1, 2)) self.assertEqual((1, 2), self.io_loop.run_sync(f)) class GenCoroutineTest(AsyncTestCase): def setUp(self): # Stray StopIteration exceptions can lead to tests exiting prematurely, # so we need explicit checks here to make sure the tests run all # the way through. self.finished = False super(GenCoroutineTest, self).setUp() def tearDown(self): super(GenCoroutineTest, self).tearDown() assert self.finished def test_attributes(self): self.finished = True def f(): yield gen.moment coro = gen.coroutine(f) self.assertEqual(coro.__name__, f.__name__) self.assertEqual(coro.__module__, f.__module__) self.assertIs(coro.__wrapped__, f) def test_is_coroutine_function(self): self.finished = True def f(): yield gen.moment coro = gen.coroutine(f) self.assertFalse(gen.is_coroutine_function(f)) self.assertTrue(gen.is_coroutine_function(coro)) self.assertFalse(gen.is_coroutine_function(coro())) @gen_test def test_sync_gen_return(self): @gen.coroutine def f(): raise gen.Return(42) result = yield f() self.assertEqual(result, 42) self.finished = True @gen_test def test_async_gen_return(self): @gen.coroutine def f(): yield gen.moment raise gen.Return(42) result = yield f() self.assertEqual(result, 42) self.finished = True @gen_test def test_sync_return(self): @gen.coroutine def f(): return 42 result = yield f() self.assertEqual(result, 42) self.finished = True @skipBefore33 @gen_test def test_async_return(self): namespace = exec_test(globals(), locals(), """ @gen.coroutine def f(): yield gen.moment return 42 """) result = yield namespace['f']() self.assertEqual(result, 42) self.finished = True @skipBefore33 @gen_test def test_async_early_return(self): # A yield statement exists but is not executed, which means # this function "returns" via an exception. This exception # doesn't happen before the exception handling is set up. namespace = exec_test(globals(), locals(), """ @gen.coroutine def f(): if True: return 42 yield gen.Task(self.io_loop.add_callback) """) result = yield namespace['f']() self.assertEqual(result, 42) self.finished = True @skipBefore35 @gen_test def test_async_await(self): @gen.coroutine def f1(): yield gen.moment raise gen.Return(42) # This test verifies that an async function can await a # yield-based gen.coroutine, and that a gen.coroutine # (the test method itself) can yield an async function. namespace = exec_test(globals(), locals(), """ async def f2(): result = await f1() return result """) result = yield namespace['f2']() self.assertEqual(result, 42) self.finished = True @skipBefore35 @gen_test def test_asyncio_sleep_zero(self): # asyncio.sleep(0) turns into a special case (equivalent to # `yield None`) namespace = exec_test(globals(), locals(), """ async def f(): import asyncio await asyncio.sleep(0) return 42 """) result = yield namespace['f']() self.assertEqual(result, 42) self.finished = True @skipBefore35 @gen_test def test_async_await_mixed_multi_native_future(self): @gen.coroutine def f1(): yield gen.moment namespace = exec_test(globals(), locals(), """ async def f2(): await f1() return 42 """) @gen.coroutine def f3(): yield gen.moment raise gen.Return(43) results = yield [namespace['f2'](), f3()] self.assertEqual(results, [42, 43]) self.finished = True @skipBefore35 @gen_test def test_async_await_mixed_multi_native_yieldpoint(self): namespace = exec_test(globals(), locals(), """ async def f1(): await gen.Task(self.io_loop.add_callback) return 42 """) @gen.coroutine def f2(): yield gen.Task(self.io_loop.add_callback) raise gen.Return(43) with ignore_deprecation(): f2(callback=(yield gen.Callback('cb'))) results = yield [namespace['f1'](), gen.Wait('cb')] self.assertEqual(results, [42, 43]) self.finished = True @skipBefore35 @gen_test def test_async_with_timeout(self): namespace = exec_test(globals(), locals(), """ async def f1(): return 42 """) result = yield gen.with_timeout(datetime.timedelta(hours=1), namespace['f1']()) self.assertEqual(result, 42) self.finished = True @gen_test def test_sync_return_no_value(self): @gen.coroutine def f(): return result = yield f() self.assertEqual(result, None) self.finished = True @gen_test def test_async_return_no_value(self): # Without a return value we don't need python 3.3. @gen.coroutine def f(): yield gen.moment return result = yield f() self.assertEqual(result, None) self.finished = True @gen_test def test_sync_raise(self): @gen.coroutine def f(): 1 / 0 # The exception is raised when the future is yielded # (or equivalently when its result method is called), # not when the function itself is called). future = f() with self.assertRaises(ZeroDivisionError): yield future self.finished = True @gen_test def test_async_raise(self): @gen.coroutine def f(): yield gen.moment 1 / 0 future = f() with self.assertRaises(ZeroDivisionError): yield future self.finished = True @gen_test def test_pass_callback(self): with ignore_deprecation(): @gen.coroutine def f(): raise gen.Return(42) result = yield gen.Task(f) self.assertEqual(result, 42) self.finished = True @gen_test def test_replace_yieldpoint_exception(self): # Test exception handling: a coroutine can catch one exception # raised by a yield point and raise a different one. @gen.coroutine def f1(): 1 / 0 @gen.coroutine def f2(): try: yield f1() except ZeroDivisionError: raise KeyError() future = f2() with self.assertRaises(KeyError): yield future self.finished = True @gen_test def test_swallow_yieldpoint_exception(self): # Test exception handling: a coroutine can catch an exception # raised by a yield point and not raise a different one. @gen.coroutine def f1(): 1 / 0 @gen.coroutine def f2(): try: yield f1() except ZeroDivisionError: raise gen.Return(42) result = yield f2() self.assertEqual(result, 42) self.finished = True @gen_test def test_replace_context_exception(self): with ignore_deprecation(): # Test exception handling: exceptions thrown into the stack context # can be caught and replaced. # Note that this test and the following are for behavior that is # not really supported any more: coroutines no longer create a # stack context automatically; but one is created after the first # YieldPoint (i.e. not a Future). @gen.coroutine def f2(): (yield gen.Callback(1))() yield gen.Wait(1) self.io_loop.add_callback(lambda: 1 / 0) try: yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 10) except ZeroDivisionError: raise KeyError() future = f2() with self.assertRaises(KeyError): yield future self.finished = True @gen_test def test_swallow_context_exception(self): with ignore_deprecation(): # Test exception handling: exceptions thrown into the stack context # can be caught and ignored. @gen.coroutine def f2(): (yield gen.Callback(1))() yield gen.Wait(1) self.io_loop.add_callback(lambda: 1 / 0) try: yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 10) except ZeroDivisionError: raise gen.Return(42) result = yield f2() self.assertEqual(result, 42) self.finished = True @gen_test def test_moment(self): calls = [] @gen.coroutine def f(name, yieldable): for i in range(5): calls.append(name) yield yieldable # First, confirm the behavior without moment: each coroutine # monopolizes the event loop until it finishes. immediate = Future() immediate.set_result(None) yield [f('a', immediate), f('b', immediate)] self.assertEqual(''.join(calls), 'aaaaabbbbb') # With moment, they take turns. calls = [] yield [f('a', gen.moment), f('b', gen.moment)] self.assertEqual(''.join(calls), 'ababababab') self.finished = True calls = [] yield [f('a', gen.moment), f('b', immediate)] self.assertEqual(''.join(calls), 'abbbbbaaaa') @gen_test def test_sleep(self): yield gen.sleep(0.01) self.finished = True @skipBefore33 @gen_test def test_py3_leak_exception_context(self): class LeakedException(Exception): pass @gen.coroutine def inner(iteration): raise LeakedException(iteration) try: yield inner(1) except LeakedException as e: self.assertEqual(str(e), "1") self.assertIsNone(e.__context__) try: yield inner(2) except LeakedException as e: self.assertEqual(str(e), "2") self.assertIsNone(e.__context__) self.finished = True @skipNotCPython @unittest.skipIf((3,) < sys.version_info < (3, 6), "asyncio.Future has reference cycles") def test_coroutine_refcounting(self): # On CPython, tasks and their arguments should be released immediately # without waiting for garbage collection. @gen.coroutine def inner(): class Foo(object): pass local_var = Foo() self.local_ref = weakref.ref(local_var) yield gen.coroutine(lambda: None)() raise ValueError('Some error') @gen.coroutine def inner2(): try: yield inner() except ValueError: pass self.io_loop.run_sync(inner2, timeout=3) self.assertIs(self.local_ref(), None) self.finished = True @unittest.skipIf(sys.version_info < (3,), "test only relevant with asyncio Futures") def test_asyncio_future_debug_info(self): self.finished = True # Enable debug mode asyncio_loop = asyncio.get_event_loop() self.addCleanup(asyncio_loop.set_debug, asyncio_loop.get_debug()) asyncio_loop.set_debug(True) def f(): yield gen.moment coro = gen.coroutine(f)() self.assertIsInstance(coro, asyncio.Future) # We expect the coroutine repr() to show the place where # it was instantiated expected = ("created at %s:%d" % (__file__, f.__code__.co_firstlineno + 3)) actual = repr(coro) self.assertIn(expected, actual) @unittest.skipIf(asyncio is None, "asyncio module not present") @gen_test def test_asyncio_gather(self): # This demonstrates that tornado coroutines can be understood # by asyncio (This failed prior to Tornado 5.0). @gen.coroutine def f(): yield gen.moment raise gen.Return(1) ret = yield asyncio.gather(f(), f()) self.assertEqual(ret, [1, 1]) self.finished = True class GenSequenceHandler(RequestHandler): with ignore_deprecation(): @asynchronous @gen.engine def get(self): # The outer ignore_deprecation applies at definition time. # We need another for serving time. with ignore_deprecation(): self.io_loop = self.request.connection.stream.io_loop self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.write("1") self.io_loop.add_callback((yield gen.Callback("k2"))) yield gen.Wait("k2") self.write("2") # reuse an old key self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.finish("3") class GenCoroutineSequenceHandler(RequestHandler): @gen.coroutine def get(self): yield gen.moment self.write("1") yield gen.moment self.write("2") yield gen.moment self.finish("3") class GenCoroutineUnfinishedSequenceHandler(RequestHandler): @gen.coroutine def get(self): yield gen.moment self.write("1") yield gen.moment self.write("2") yield gen.moment # just write, don't finish self.write("3") class GenTaskHandler(RequestHandler): @gen.coroutine def get(self): client = AsyncHTTPClient() with ignore_deprecation(): response = yield gen.Task(client.fetch, self.get_argument('url')) response.rethrow() self.finish(b"got response: " + response.body) class GenExceptionHandler(RequestHandler): with ignore_deprecation(): @asynchronous @gen.engine def get(self): # This test depends on the order of the two decorators. io_loop = self.request.connection.stream.io_loop yield gen.Task(io_loop.add_callback) raise Exception("oops") class GenCoroutineExceptionHandler(RequestHandler): @gen.coroutine def get(self): # This test depends on the order of the two decorators. io_loop = self.request.connection.stream.io_loop yield gen.Task(io_loop.add_callback) raise Exception("oops") class GenYieldExceptionHandler(RequestHandler): @gen.coroutine def get(self): io_loop = self.request.connection.stream.io_loop # Test the interaction of the two stack_contexts. with ignore_deprecation(): def fail_task(callback): io_loop.add_callback(lambda: 1 / 0) try: yield gen.Task(fail_task) raise Exception("did not get expected exception") except ZeroDivisionError: self.finish('ok') # "Undecorated" here refers to the absence of @asynchronous. class UndecoratedCoroutinesHandler(RequestHandler): @gen.coroutine def prepare(self): self.chunks = [] yield gen.moment self.chunks.append('1') @gen.coroutine def get(self): self.chunks.append('2') yield gen.moment self.chunks.append('3') yield gen.moment self.write(''.join(self.chunks)) class AsyncPrepareErrorHandler(RequestHandler): @gen.coroutine def prepare(self): yield gen.moment raise HTTPError(403) def get(self): self.finish('ok') class NativeCoroutineHandler(RequestHandler): if sys.version_info > (3, 5): exec(textwrap.dedent(""" async def get(self): import asyncio await asyncio.sleep(0) self.write("ok") """)) class GenWebTest(AsyncHTTPTestCase): def get_app(self): return Application([ ('/sequence', GenSequenceHandler), ('/coroutine_sequence', GenCoroutineSequenceHandler), ('/coroutine_unfinished_sequence', GenCoroutineUnfinishedSequenceHandler), ('/task', GenTaskHandler), ('/exception', GenExceptionHandler), ('/coroutine_exception', GenCoroutineExceptionHandler), ('/yield_exception', GenYieldExceptionHandler), ('/undecorated_coroutine', UndecoratedCoroutinesHandler), ('/async_prepare_error', AsyncPrepareErrorHandler), ('/native_coroutine', NativeCoroutineHandler), ]) def test_sequence_handler(self): response = self.fetch('/sequence') self.assertEqual(response.body, b"123") def test_coroutine_sequence_handler(self): response = self.fetch('/coroutine_sequence') self.assertEqual(response.body, b"123") def test_coroutine_unfinished_sequence_handler(self): response = self.fetch('/coroutine_unfinished_sequence') self.assertEqual(response.body, b"123") def test_task_handler(self): response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence'))) self.assertEqual(response.body, b"got response: 123") def test_exception_handler(self): # Make sure we get an error and not a timeout with ExpectLog(app_log, "Uncaught exception GET /exception"): response = self.fetch('/exception') self.assertEqual(500, response.code) def test_coroutine_exception_handler(self): # Make sure we get an error and not a timeout with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"): response = self.fetch('/coroutine_exception') self.assertEqual(500, response.code) def test_yield_exception_handler(self): response = self.fetch('/yield_exception') self.assertEqual(response.body, b'ok') def test_undecorated_coroutines(self): response = self.fetch('/undecorated_coroutine') self.assertEqual(response.body, b'123') def test_async_prepare_error_handler(self): response = self.fetch('/async_prepare_error') self.assertEqual(response.code, 403) @skipBefore35 def test_native_coroutine_handler(self): response = self.fetch('/native_coroutine') self.assertEqual(response.code, 200) self.assertEqual(response.body, b'ok') class WithTimeoutTest(AsyncTestCase): @gen_test def test_timeout(self): with self.assertRaises(gen.TimeoutError): yield gen.with_timeout(datetime.timedelta(seconds=0.1), Future()) @gen_test def test_completes_before_timeout(self): future = Future() self.io_loop.add_timeout(datetime.timedelta(seconds=0.1), lambda: future.set_result('asdf')) result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future) self.assertEqual(result, 'asdf') @gen_test def test_fails_before_timeout(self): future = Future() self.io_loop.add_timeout( datetime.timedelta(seconds=0.1), lambda: future.set_exception(ZeroDivisionError())) with self.assertRaises(ZeroDivisionError): yield gen.with_timeout(datetime.timedelta(seconds=3600), future) @gen_test def test_already_resolved(self): future = Future() future.set_result('asdf') result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future) self.assertEqual(result, 'asdf') @unittest.skipIf(futures is None, 'futures module not present') @gen_test def test_timeout_concurrent_future(self): # A concurrent future that does not resolve before the timeout. with futures.ThreadPoolExecutor(1) as executor: with self.assertRaises(gen.TimeoutError): yield gen.with_timeout(self.io_loop.time(), executor.submit(time.sleep, 0.1)) @unittest.skipIf(futures is None, 'futures module not present') @gen_test def test_completed_concurrent_future(self): # A concurrent future that is resolved before we even submit it # to with_timeout. with futures.ThreadPoolExecutor(1) as executor: f = executor.submit(lambda: None) f.result() # wait for completion yield gen.with_timeout(datetime.timedelta(seconds=3600), f) @unittest.skipIf(futures is None, 'futures module not present') @gen_test def test_normal_concurrent_future(self): # A conccurrent future that resolves while waiting for the timeout. with futures.ThreadPoolExecutor(1) as executor: yield gen.with_timeout(datetime.timedelta(seconds=3600), executor.submit(lambda: time.sleep(0.01))) class WaitIteratorTest(AsyncTestCase): @gen_test def test_empty_iterator(self): g = gen.WaitIterator() self.assertTrue(g.done(), 'empty generator iterated') with self.assertRaises(ValueError): g = gen.WaitIterator(False, bar=False) self.assertEqual(g.current_index, None, "bad nil current index") self.assertEqual(g.current_future, None, "bad nil current future") @gen_test def test_already_done(self): f1 = Future() f2 = Future() f3 = Future() f1.set_result(24) f2.set_result(42) f3.set_result(84) g = gen.WaitIterator(f1, f2, f3) i = 0 while not g.done(): r = yield g.next() # Order is not guaranteed, but the current implementation # preserves ordering of already-done Futures. if i == 0: self.assertEqual(g.current_index, 0) self.assertIs(g.current_future, f1) self.assertEqual(r, 24) elif i == 1: self.assertEqual(g.current_index, 1) self.assertIs(g.current_future, f2) self.assertEqual(r, 42) elif i == 2: self.assertEqual(g.current_index, 2) self.assertIs(g.current_future, f3) self.assertEqual(r, 84) i += 1 self.assertEqual(g.current_index, None, "bad nil current index") self.assertEqual(g.current_future, None, "bad nil current future") dg = gen.WaitIterator(f1=f1, f2=f2) while not dg.done(): dr = yield dg.next() if dg.current_index == "f1": self.assertTrue(dg.current_future == f1 and dr == 24, "WaitIterator dict status incorrect") elif dg.current_index == "f2": self.assertTrue(dg.current_future == f2 and dr == 42, "WaitIterator dict status incorrect") else: self.fail("got bad WaitIterator index {}".format( dg.current_index)) i += 1 self.assertEqual(dg.current_index, None, "bad nil current index") self.assertEqual(dg.current_future, None, "bad nil current future") def finish_coroutines(self, iteration, futures): if iteration == 3: futures[2].set_result(24) elif iteration == 5: futures[0].set_exception(ZeroDivisionError()) elif iteration == 8: futures[1].set_result(42) futures[3].set_result(84) if iteration < 8: self.io_loop.add_callback(self.finish_coroutines, iteration + 1, futures) @gen_test def test_iterator(self): futures = [Future(), Future(), Future(), Future()] self.finish_coroutines(0, futures) g = gen.WaitIterator(*futures) i = 0 while not g.done(): try: r = yield g.next() except ZeroDivisionError: self.assertIs(g.current_future, futures[0], 'exception future invalid') else: if i == 0: self.assertEqual(r, 24, 'iterator value incorrect') self.assertEqual(g.current_index, 2, 'wrong index') elif i == 2: self.assertEqual(r, 42, 'iterator value incorrect') self.assertEqual(g.current_index, 1, 'wrong index') elif i == 3: self.assertEqual(r, 84, 'iterator value incorrect') self.assertEqual(g.current_index, 3, 'wrong index') i += 1 @skipBefore35 @gen_test def test_iterator_async_await(self): # Recreate the previous test with py35 syntax. It's a little clunky # because of the way the previous test handles an exception on # a single iteration. futures = [Future(), Future(), Future(), Future()] self.finish_coroutines(0, futures) self.finished = False namespace = exec_test(globals(), locals(), """ async def f(): i = 0 g = gen.WaitIterator(*futures) try: async for r in g: if i == 0: self.assertEqual(r, 24, 'iterator value incorrect') self.assertEqual(g.current_index, 2, 'wrong index') else: raise Exception("expected exception on iteration 1") i += 1 except ZeroDivisionError: i += 1 async for r in g: if i == 2: self.assertEqual(r, 42, 'iterator value incorrect') self.assertEqual(g.current_index, 1, 'wrong index') elif i == 3: self.assertEqual(r, 84, 'iterator value incorrect') self.assertEqual(g.current_index, 3, 'wrong index') else: raise Exception("didn't expect iteration %d" % i) i += 1 self.finished = True """) yield namespace['f']() self.assertTrue(self.finished) @gen_test def test_no_ref(self): # In this usage, there is no direct hard reference to the # WaitIterator itself, only the Future it returns. Since # WaitIterator uses weak references internally to improve GC # performance, this used to cause problems. yield gen.with_timeout(datetime.timedelta(seconds=0.1), gen.WaitIterator(gen.sleep(0)).next()) class RunnerGCTest(AsyncTestCase): def is_pypy3(self): return (platform.python_implementation() == 'PyPy' and sys.version_info > (3,)) @gen_test def test_gc(self): # Github issue 1769: Runner objects can get GCed unexpectedly # while their future is alive. weakref_scope = [None] def callback(): gc.collect(2) weakref_scope[0]().set_result(123) @gen.coroutine def tester(): fut = Future() weakref_scope[0] = weakref.ref(fut) self.io_loop.add_callback(callback) yield fut yield gen.with_timeout( datetime.timedelta(seconds=0.2), tester() ) def test_gc_infinite_coro(self): # Github issue 2229: suspended coroutines should be GCed when # their loop is closed, even if they're involved in a reference # cycle. if IOLoop.configured_class().__name__.endswith('TwistedIOLoop'): raise unittest.SkipTest("Test may fail on TwistedIOLoop") loop = self.get_new_ioloop() result = [] wfut = [] @gen.coroutine def infinite_coro(): try: while True: yield gen.sleep(1e-3) result.append(True) finally: # coroutine finalizer result.append(None) @gen.coroutine def do_something(): fut = infinite_coro() fut._refcycle = fut wfut.append(weakref.ref(fut)) yield gen.sleep(0.2) loop.run_sync(do_something) loop.close() gc.collect() # Future was collected self.assertIs(wfut[0](), None) # At least one wakeup self.assertGreaterEqual(len(result), 2) if not self.is_pypy3(): # coroutine finalizer was called (not on PyPy3 apparently) self.assertIs(result[-1], None) @skipBefore35 def test_gc_infinite_async_await(self): # Same as test_gc_infinite_coro, but with a `async def` function import asyncio namespace = exec_test(globals(), locals(), """ async def infinite_coro(result): try: while True: await gen.sleep(1e-3) result.append(True) finally: # coroutine finalizer result.append(None) """) infinite_coro = namespace['infinite_coro'] loop = self.get_new_ioloop() result = [] wfut = [] @gen.coroutine def do_something(): fut = asyncio.get_event_loop().create_task(infinite_coro(result)) fut._refcycle = fut wfut.append(weakref.ref(fut)) yield gen.sleep(0.2) loop.run_sync(do_something) with ExpectLog('asyncio', "Task was destroyed but it is pending"): loop.close() gc.collect() # Future was collected self.assertIs(wfut[0](), None) # At least one wakeup and one finally self.assertGreaterEqual(len(result), 2) if not self.is_pypy3(): # coroutine finalizer was called (not on PyPy3 apparently) self.assertIs(result[-1], None) def test_multi_moment(self): # Test gen.multi with moment # now that it's not a real Future @gen.coroutine def wait_a_moment(): result = yield gen.multi([gen.moment, gen.moment]) raise gen.Return(result) loop = self.get_new_ioloop() result = loop.run_sync(wait_a_moment) self.assertEqual(result, [None, None]) if __name__ == '__main__': unittest.main()
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=relative-import import argparse import os import posixpath from code_generator import initialize_jinja_env from idl_reader import IdlReader from utilities import create_component_info_provider, write_file import utilities import v8_attributes import v8_interface import v8_types import v8_utilities INCLUDES = frozenset([ 'third_party/blink/renderer/bindings/core/v8/generated_code_helper.h', 'third_party/blink/renderer/bindings/core/v8/v8_html_document.h', 'third_party/blink/renderer/bindings/core/v8/v8_initializer.h', 'third_party/blink/renderer/bindings/core/v8/v8_window.h', 'third_party/blink/renderer/platform/bindings/v8_object_constructor.h', 'v8/include/v8.h' ]) TEMPLATE_FILE = 'external_reference_table.cc.tmpl' SNAPSHOTTED_INTERFACES = frozenset([ 'Window', 'EventTarget', 'HTMLDocument', 'Document', 'Node', ]) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( '--idl-files-list', type=str, required=True, help='file listing IDL files') parser.add_argument( '--output', type=str, required=True, help='output file path') parser.add_argument( '--info-dir', type=str, required=True, help='directory contains component info') parser.add_argument( '--cache-dir', type=str, required=True, help='cache directory') parser.add_argument( '--target-component', type=str, required=True, help='target component') return parser.parse_known_args() # This class creates a Jinja template context about an interface. class InterfaceTemplateContextBuilder(object): def __init__(self, opts, info_provider): self._opts = opts self._info_provider = info_provider def create_interface_context(self, interface, component, interfaces): '''Creates a Jinja context which is based on an interface.''' assert component in ['core', 'modules'] name = '%s%s' % (v8_utilities.cpp_name(interface), 'Partial' if interface.is_partial else '') # Constructors has_constructor_callback = False if not interface.is_partial: constructors = any(constructor.name == 'Constructor' for constructor in interface.constructors) custom_constructors = interface.custom_constructors html_constructor = 'HTMLConstructor' in interface.extended_attributes has_constructor_callback = constructors or custom_constructors or html_constructor attributes = [] methods = [] has_cross_origin_indexed_getter = False has_cross_origin_named_enum = False has_cross_origin_named_getter = False has_cross_origin_named_setter = False has_security_check = False indexed_property_getter = None is_global = False named_property_getter = None component_info = self._info_provider.component_info if interface.name in SNAPSHOTTED_INTERFACES: attributes = [ v8_attributes.attribute_context(interface, attribute, interfaces, component_info) for attribute in interface.attributes ] methods = v8_interface.methods_context(interface, component_info)['methods'] is_global = 'Global' in interface.extended_attributes named_property_getter = v8_interface.property_getter( interface.named_property_getter, ['name']) indexed_property_getter = v8_interface.property_getter( interface.indexed_property_getter, ['index']) if not interface.is_partial: has_security_check = ( 'CheckSecurity' in interface.extended_attributes and interface.name != 'EventTarget') has_cross_origin_named_getter = ( any(method['is_cross_origin'] for method in methods) or any(attribute['has_cross_origin_getter'] for attribute in attributes)) has_cross_origin_named_setter = any( attribute['has_cross_origin_setter'] for attribute in attributes) has_cross_origin_indexed_getter = ( indexed_property_getter and indexed_property_getter['is_cross_origin']) has_cross_origin_named_enum = has_cross_origin_named_getter \ or has_cross_origin_named_setter if (named_property_getter and named_property_getter['is_cross_origin']): has_cross_origin_named_getter = True return { 'attributes': attributes, 'component': component, 'has_constructor_callback': has_constructor_callback, 'has_cross_origin_named_getter': has_cross_origin_named_getter, 'has_cross_origin_named_setter': has_cross_origin_named_setter, 'has_cross_origin_named_enumerator': has_cross_origin_named_enum, 'has_cross_origin_indexed_getter': has_cross_origin_indexed_getter, 'has_security_check': has_security_check, 'indexed_property_getter': indexed_property_getter, 'indexed_property_setter': v8_interface.property_setter(interface.indexed_property_setter, interface), 'indexed_property_deleter': v8_interface.property_deleter(interface.indexed_property_deleter), 'internal_namespace': v8_interface.internal_namespace(interface), 'is_partial': interface.is_partial, 'methods': methods, 'name': name, 'named_constructor': v8_interface.named_constructor_context(interface), 'named_property_getter': named_property_getter, 'named_property_setter': v8_interface.property_setter(interface.named_property_setter, interface), 'named_property_deleter': v8_interface.property_deleter(interface.named_property_deleter), 'v8_class': v8_utilities.v8_class_name_or_partial(interface), } # This class applies a Jinja template and creates a .cpp file for the external reference table. class ExternalReferenceTableGenerator(object): def __init__(self, opts, info_provider): self._opts = opts self._info_provider = info_provider self._reader = IdlReader(info_provider.interfaces_info, opts.cache_dir) self._interface_contexts = {} self._include_files = set(INCLUDES) v8_types.set_component_dirs( info_provider.interfaces_info['component_dirs']) # Creates a Jinja context from an IDL file. def process_idl_file(self, idl_filename): definitions = self._reader.read_idl_definitions(idl_filename) for component in definitions: target_definitions = definitions[component] interfaces = target_definitions.interfaces first_name = target_definitions.first_name if first_name in interfaces.keys(): interface = interfaces[first_name] self._process_interface(interface, component, interfaces) # Creates a Jinja context from an interface. Some interfaces are not used # in V8 context snapshot, so we can skip them. def _process_interface(self, interface, component, interfaces): def has_impl(interface): component_info = self._info_provider.component_info runtime_features = component_info['runtime_enabled_features'] # Non legacy callback interface does not provide V8 callbacks. if interface.is_callback: return len(interface.constants) > 0 if v8_utilities.runtime_enabled_feature_name( interface, runtime_features): return False if 'Exposed' not in interface.extended_attributes: return True return any( exposure.exposed == 'Window' and exposure.runtime_enabled is None for exposure in interface.extended_attributes['Exposed']) if not has_impl(interface): return context_builder = InterfaceTemplateContextBuilder( self._opts, self._info_provider) context = context_builder.create_interface_context( interface, component, interfaces) name = '%s%s' % (interface.name, 'Partial' if interface.is_partial else '') self._interface_contexts[name] = context # Do not include unnecessary header files. if not context['attributes'] and not context['named_property_setter']: return include_file = 'third_party/blink/renderer/bindings/%s/v8/%s.h' % ( component, utilities.to_snake_case(context['v8_class'])) self._include_files.add(include_file) # Gathers all interface-dependent information and returns as a Jinja template context. def _create_template_context(self): interfaces = [] for name in sorted(self._interface_contexts): interfaces.append(self._interface_contexts[name]) header_path = 'third_party/blink/renderer/bindings/modules/v8/v8_context_snapshot_external_references.h' include_files = list(self._include_files) return { 'class': 'V8ContextSnapshotExternalReferences', 'interfaces': interfaces, 'include_files': sorted(include_files), 'this_include_header_path': header_path, 'code_generator': os.path.basename(__file__), 'jinja_template_filename': TEMPLATE_FILE } # Applies a Jinja template on a context and generates a C++ code. def generate(self): jinja_env = initialize_jinja_env(self._opts.cache_dir) context = self._create_template_context() cpp_template = jinja_env.get_template(TEMPLATE_FILE) cpp_text = cpp_template.render(context) return cpp_text def main(): opts, _ = parse_args() # TODO(peria): get rid of |info_provider| info_provider = create_component_info_provider(opts.info_dir, opts.target_component) generator = ExternalReferenceTableGenerator(opts, info_provider) idl_files = utilities.read_idl_files_list_from_file(opts.idl_files_list) for idl_file in idl_files: generator.process_idl_file(idl_file) output_code = generator.generate() output_path = opts.output write_file(output_code, output_path) if __name__ == '__main__': main()
import datetime from django.core.urlresolvers import reverse from molo.polls.admin import QuestionAdmin, download_as_csv from molo.polls.models import ( Choice, Question, FreeTextQuestion, ) from molo.polls.tests.base import BasePollsTestCase class AdminTestCase(BasePollsTestCase): def test_download_csv_question(self): # make choices choice1 = Choice(title='yes') choice2 = Choice(title='no') # make a question question = Question( title='is this a test', allow_multiple_choice=True, show_results=False) self.polls_index.add_child(instance=question) question.add_child(instance=choice1) question.add_child(instance=choice2) question.save_revision().publish() # make a vote self.client.login( username=self.superuser_name, password=self.superuser_password ) self.client.post( reverse('molo.polls:vote', kwargs={'question_id': question.id}), {'choice': [choice1.id, choice2.id]}) # should automatically create the poll vote # test poll vote response = download_as_csv(QuestionAdmin(Question, self.site), None, Question.objects.all()) date = str(datetime.datetime.now().date()) expected_output = (('title,date_submitted,user,answer' '\r\nis this a test,{0},{1},' '"yes,no"\r\n').format( date, self.superuser_name)) self.assertContains(response, expected_output) def test_choice_short_name(self): # make choices choice1 = Choice(title='yes', short_name='y') choice2 = Choice(title='no', short_name='n') # make a question question = Question( title='is this a test', allow_multiple_choice=True, show_results=False) self.polls_index.add_child(instance=question) question.add_child(instance=choice1) question.add_child(instance=choice2) question.save_revision().publish() # make a vote self.client.login( username=self.superuser_name, password=self.superuser_password ) self.client.post( reverse('molo.polls:vote', kwargs={'question_id': question.id}), {'choice': [choice1.id, choice2.id]}) # should automatically create the poll vote # test poll vote response = download_as_csv(QuestionAdmin(Question, self.site), None, Question.objects.all()) date = str(datetime.datetime.now().date()) expected_output = (('title,date_submitted,user,answer' '\r\nis this a test,{0},{1},' '"y,n"\r\n').format( date, self.superuser_name)) self.assertContains(response, expected_output) def test_choice_short_name_single_choice(self): # make choices choice1 = Choice(title='yes', short_name='y') # make a question question = Question( title='is this a test', allow_multiple_choice=True, show_results=False) self.polls_index.add_child(instance=question) question.add_child(instance=choice1) question.save_revision().publish() # make a vote self.client.login( username=self.superuser_name, password=self.superuser_password ) self.client.post( reverse('molo.polls:vote', kwargs={'question_id': question.id}), {'choice': choice1.id}) # should automatically create the poll vote # test poll vote response = download_as_csv(QuestionAdmin(Question, self.site), None, Question.objects.all()) date = str(datetime.datetime.now().date()) expected_output = (('title,date_submitted,user,answer' '\r\nis this a test,{0},{1},' 'y\r\n').format( date, self.superuser_name)) self.assertContains(response, expected_output) def test_download_csv_free_text_question(self): question = FreeTextQuestion( title='is this a test') self.polls_index.add_child(instance=question) question.save_revision().publish() self.client.login( username=self.superuser_name, password=self.superuser_password ) response = self.client.get('/') self.assertContains(response, 'is this a test') self.client.post( reverse('molo.polls:free_text_vote', kwargs={'question_id': question.id}), {'answer': 'this is an answer'}) response = download_as_csv(QuestionAdmin(Question, self.site), None, Question.objects.all()) date = str(datetime.datetime.now().date()) expected_output = (('title,date_submitted,user,answer' '\r\nis this a test,{0},{1},' 'this is an answer\r\n').format( date, self.superuser_name)) self.assertContains(response, expected_output) def test_download_csv_free_text_question_short_name(self): question = FreeTextQuestion( title='is this a test', short_name='short') self.polls_index.add_child(instance=question) question.save_revision().publish() self.client.login( username=self.superuser_name, password=self.superuser_password ) response = self.client.get('/') self.assertContains(response, 'is this a test') self.client.post( reverse('molo.polls:free_text_vote', kwargs={'question_id': question.id}), {'answer': 'this is an answer'}) response = download_as_csv(QuestionAdmin(Question, self.site), None, Question.objects.all()) date = str(datetime.datetime.now().date()) expected_output = (('title,date_submitted,user,answer' '\r\nshort,{0},{1},' 'this is an answer\r\n').format( date, self.superuser_name)) self.assertContains(response, expected_output) def test_multisite_download_csv_question(self): # make choices choice1 = Choice(title='yes') choice2 = Choice(title='no') # make a question question = Question( title='poll for main1', allow_multiple_choice=True, show_results=False) self.polls_index.add_child(instance=question) question.add_child(instance=choice1) question.add_child(instance=choice2) question.save_revision().publish() # make a vote self.client.login( username=self.superuser_name, password=self.superuser_password ) self.client.post( reverse('molo.polls:vote', kwargs={'question_id': question.id}), {'choice': [choice1.id, choice2.id]}) # should automatically create the poll vote # test poll vote response = self.client.get( ('/admin/polls/question/{0}/' 'results/?action=download').format(question.pk)) date = str(datetime.datetime.now().date()) expected_output = ( 'Submission Date,Answer,User\r\n{0},"yes,no",{1}\r\n').format( date, self.superuser_name) self.assertContains(response, expected_output) # test seperation on multi-site # make choices choice1_main2 = Choice(title='yes') # make a question question_main2 = Question( title='poll for main2', allow_multiple_choice=True, show_results=False) self.polls_index_main2.add_child(instance=question_main2) question_main2.add_child(instance=choice1_main2) question_main2.save_revision().publish() self.client2.login( username=self.superuser_name, password=self.superuser_password ) self.client2.post( reverse('molo.polls:vote', kwargs={'question_id': question_main2.id}), {'choice': [choice1_main2.id]}) expected_output = ( 'Submission Date,Answer,User\r\n{0},yes,{1}\r\n').format( date, self.superuser_name) response = self.client2.get( ('/admin/polls/question/{0}/' 'results/?action=download').format(question_main2.pk)) self.assertContains(response, expected_output)
import numpy as np import theano import theano.tensor as T from lib import sigmoid, softmax, dropout, floatX, random_weights, zeros class NNLayer: def get_params(self): return self.params def get_param_names(self): return [ 'UNK' if p.name is None else p.name for p in self.params ] def save_model(self): return def load_model(self): return def updates(self): return [] def reset_state(self): return class LSTMLayer(NNLayer): def __init__(self, num_input, num_cells, input_layers=None, name="", go_backwards=False): """ LSTM Layer Takes as input sequence of inputs, returns sequence of outputs """ self.name = name self.num_input = num_input self.num_cells = num_cells if len(input_layers) >= 2: self.X = T.concatenate([input_layer.output() for input_layer in input_layers], axis=1) else: self.X = input_layers[0].output() self.h0 = theano.shared(floatX(np.zeros(num_cells))) self.s0 = theano.shared(floatX(np.zeros(num_cells))) self.go_backwards = go_backwards self.W_gx = random_weights((num_input, num_cells), name=self.name+"W_gx") self.W_ix = random_weights((num_input, num_cells), name=self.name+"W_ix") self.W_fx = random_weights((num_input, num_cells), name=self.name+"W_fx") self.W_ox = random_weights((num_input, num_cells), name=self.name+"W_ox") self.W_gh = random_weights((num_cells, num_cells), name=self.name+"W_gh") self.W_ih = random_weights((num_cells, num_cells), name=self.name+"W_ih") self.W_fh = random_weights((num_cells, num_cells), name=self.name+"W_fh") self.W_oh = random_weights((num_cells, num_cells), name=self.name+"W_oh") self.b_g = zeros(num_cells, name=self.name+"b_g") self.b_i = zeros(num_cells, name=self.name+"b_i") self.b_f = zeros(num_cells, name=self.name+"b_f") self.b_o = zeros(num_cells, name=self.name+"b_o") self.params = [self.W_gx, self.W_ix, self.W_ox, self.W_fx, self.W_gh, self.W_ih, self.W_oh, self.W_fh, self.b_g, self.b_i, self.b_f, self.b_o, ] self.output() def one_step(self, x, h_tm1, s_tm1): """ """ g = T.tanh(T.dot(x, self.W_gx) + T.dot(h_tm1, self.W_gh) + self.b_g) i = T.nnet.sigmoid(T.dot(x, self.W_ix) + T.dot(h_tm1, self.W_ih) + self.b_i) f = T.nnet.sigmoid(T.dot(x, self.W_fx) + T.dot(h_tm1, self.W_fh) + self.b_f) o = T.nnet.sigmoid(T.dot(x, self.W_ox) + T.dot(h_tm1, self.W_oh) + self.b_o) s = i*g + s_tm1 * f h = T.tanh(s) * o return h, s def output(self, train=True): outputs_info = [self.h0, self.s0] ([outputs, states], updates) = theano.scan( fn=self.one_step, sequences=self.X, outputs_info = outputs_info, go_backwards = self.go_backwards ) return outputs def reset_state(self): self.h0 = theano.shared(floatX(np.zeros(self.num_cells))) self.s0 = theano.shared(floatX(np.zeros(self.num_cells))) class GRULayer(NNLayer): def __init__(self, num_input, num_cells, input_layers=None, name="", go_backwards=False): """ GRU Layer Takes as input sequence of inputs, returns sequence of outputs """ self.name = name self.num_input = num_input self.num_cells = num_cells if len(input_layers) >= 2: self.X = T.concatenate([input_layer.output() for input_layer in input_layers], axis=1) else: self.X = input_layers[0].output() self.s0 = zeros(num_cells) self.go_backwards = go_backwards self.U_z = random_weights((num_input, num_cells), name=self.name+"U_z") self.W_z = random_weights((num_cells, num_cells), name=self.name+"W_z") self.U_r = random_weights((num_input, num_cells), name=self.name+"U_r") self.W_r = random_weights((num_cells, num_cells), name=self.name+"W_r") self.U_h = random_weights((num_input, num_cells), name=self.name+"U_h") self.W_h = random_weights((num_cells, num_cells), name=self.name+"W_h") self.b_z = zeros(num_cells, name=self.name+"b_z") self.b_r = zeros(num_cells, name=self.name+"b_r") self.b_h = zeros(num_cells, name=self.name+"b_h") self.params = [ self.U_z, self.W_z, self.U_r, self.W_r, self.U_h, self.W_h, self.b_z, self.b_r, self.b_h ] self.output() def one_step(self, x, s_tm1): """ """ z = T.nnet.sigmoid(T.dot(x, self.U_z) + T.dot(s_tm1, self.W_z) + self.b_z) r = T.nnet.sigmoid(T.dot(x, self.U_r) + T.dot(s_tm1, self.W_r) + self.b_r) h = T.tanh(T.dot(x, self.U_h) + T.dot(s_tm1 * r, self.W_h) + self.b_h) s = (1-z) * h + z * s_tm1 return [s] def output(self, train=True): outputs_info = [self.s0] (outputs, updates) = theano.scan( fn=self.one_step, sequences=self.X, outputs_info = outputs_info, go_backwards = self.go_backwards ) return outputs def reset_state(self): self.s0 = zeros(self.num_cells) class FullyConnectedLayer(NNLayer): """ """ def __init__(self, num_input, num_output, input_layers, name=""): if len(input_layers) >= 2: self.X = T.concatenate([input_layer.output() for input_layer in input_layers], axis=1) else: self.X = input_layers[0].output() self.W_yh = random_weights((num_input, num_output),name="W_yh") self.b_y = zeros(num_output, name="b_y") self.params = [self.W_yh, self.b_y] def output(self): return T.dot(self.X, self.W_yh) + self.b_y class InputLayer(NNLayer): """ """ def __init__(self, X, name=""): self.name = name self.X = X self.params=[] def output(self, train=False): return self.X class SoftmaxLayer(NNLayer): """ """ def __init__(self, num_input, num_output, input_layer, temperature=1.0, name=""): self.name = "" self.X = input_layer self.params = [] self.temp = temperature self.W_yh = random_weights((num_input, num_output),name="W_yh") self.b_y = zeros(num_output, name="b_y") self.params = [self.W_yh, self.b_y] def output(self, train=True): if train: input_sequence = self.X.output(train=True) else: input_sequence = self.X.output(train=False) return softmax((T.dot(input_sequence, self.W_yh) + self.b_y), temperature=self.temp) class SigmoidLayer(NNLayer): def __init__(self, num_input, num_output, input_layers, name=""): if len(input_layers) >= 2: print "number of input layers: %s" % len(input_layers) print "len of list comprehension: %s" % len([input_layer.output() for input_layer in input_layers]) self.X = T.concatenate([input_layer.output() for input_layer in input_layers], axis=1) else: self.X = input_layers[0].output() self.W_yh = random_weights((num_input, num_output),name="W_yh") self.b_y = zeros(num_output, name="b_y") self.params = [self.W_yh, self.b_y] def output(self): return sigmoid(T.dot(self.X, self.W_yh) + self.b_y) class DropoutLayer(NNLayer): def __init__(self, input_layer, name="", dropout_probability=0.): self.X = input_layer.output() self.params = [] self.dropout_probability = dropout_probability def output(self): return dropout(self.X, self.dropout_probability) class MergeLayer(NNLayer): def init(self, input_layers): return def output(self): return
#!/usr/bin/python # -*- coding: utf-8 -*- # libthumbor - python extension to thumbor # http://github.com/heynemann/libthumbor # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 Bernardo Heynemann [email protected] '''libthumbor URL composer tests''' import sys from unittest import TestCase from thumbor.crypto import Cryptor from libthumbor.url import url_for from libthumbor.url import unsafe_url IMAGE_URL = 'my.server.com/some/path/to/image.jpg' IMAGE_MD5 = '84996242f65a4d864aceb125e1c4c5ba' def decrypt_in_thumbor(key, encrypted): '''Uses thumbor to decrypt libthumbor's encrypted URL''' crypto = Cryptor(key) return crypto.decrypt(encrypted) def test_no_options_specified(): '''test_no_options_specified Given An image URL of "my.server.com/some/path/to/image.jpg" When I ask my library for an URL Then I get "84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(image_url=IMAGE_URL) assert url == IMAGE_MD5, url def test_url_raises_if_no_url(): '''test_url_raises_if_no_url Given An image URL of "" or null When I ask my library for an URL Then I get an exception that says image URL is mandatory ''' try: url_for() except ValueError, err: assert str(err) == 'The image_url argument is mandatory.' return True assert False, 'Should not have gotten this far' def test_url_width_height_1(): '''test_url_width_height_1 Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 300 When I ask my library for an URL Then I get "300x0/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=300, image_url=IMAGE_URL) assert url == "300x0/84996242f65a4d864aceb125e1c4c5ba", url def test_url_width_height_2(): '''test_url_width_height_2 Given An image URL of "my.server.com/some/path/to/image.jpg" And a height of 300 When I ask my library for an URL Then I get "0x300/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(height=300, image_url=IMAGE_URL) assert url == "0x300/84996242f65a4d864aceb125e1c4c5ba", url def test_url_width_height_3(): '''test_url_width_height_3 Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And a height of 300 When I ask my library for an URL Then I get "200x300/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=200, height=300, image_url=IMAGE_URL) assert url == "200x300/84996242f65a4d864aceb125e1c4c5ba", url def test_url_width_height_4(): '''test_url_width_height_4 Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of orig When I ask my library for an URL Then I get "origx0/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width="orig", image_url=IMAGE_URL) assert url == "origx0/84996242f65a4d864aceb125e1c4c5ba", url def test_url_width_height_5(): '''test_url_width_height_5 Given An image URL of "my.server.com/some/path/to/image.jpg" And a height of orig When I ask my library for an URL Then I get "0xorig/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(height="orig", image_url=IMAGE_URL) assert url == "0xorig/84996242f65a4d864aceb125e1c4c5ba", url def test_url_width_height_6(): '''test_url_width_height_6 Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 100 And a height of orig When I ask my library for an URL Then I get "100xorig/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=100, height="orig", image_url=IMAGE_URL) assert url == "100xorig/84996242f65a4d864aceb125e1c4c5ba", url def test_url_width_height_7(): '''test_url_width_height_7 Given An image URL of "my.server.com/some/path/to/image.jpg" And a height of 100 And a width of orig When I ask my library for an URL Then I get "origx100/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width="orig", height=100, image_url=IMAGE_URL) assert url == "origx100/84996242f65a4d864aceb125e1c4c5ba", url def test_url_width_height_8(): '''test_url_width_height_8 Given An image URL of "my.server.com/some/path/to/image.jpg" And a height of orig And a width of orig When I ask my library for an URL Then I get "origxorig/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width="orig", height="orig", image_url=IMAGE_URL) assert url == "origxorig/84996242f65a4d864aceb125e1c4c5ba", url def test_smart_url(): '''test_smart_url Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And a height of 300 And the smart flag When I ask my library for an URL Then I get "200x300/smart/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=200, height=300, smart=True, image_url=IMAGE_URL) assert url == "200x300/smart/84996242f65a4d864aceb125e1c4c5ba", url def test_fit_in_url(): '''test_fit_in_url Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And a height of 300 And the fit-in flag When I ask my library for an URL Then I get "fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=200, height=300, fit_in=True, image_url=IMAGE_URL) assert url == "fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba", url def test_adaptive_fit_in_url(): '''test_adaptive_fit_in_url Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And a height of 300 And the adaptive fit-in flag When I ask my library for an URL Then I get "adaptive-fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=200, height=300, adaptive_fit_in=True, image_url=IMAGE_URL) assert url == "adaptive-fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba", url def test_fit_in_fails_if_no_width_supplied(): try: url_for(fit_in=True, image_url=IMAGE_URL) except ValueError: err = sys.exc_info()[1] assert err is not None else: assert False, "Should not have gotten this far" def test_full_fit_in_fails_if_no_width_supplied(): try: url_for(full_fit_in=True, image_url=IMAGE_URL) except ValueError: err = sys.exc_info()[1] assert err is not None else: assert False, "Should not have gotten this far" def test_adaptive_fit_in_fails_if_no_width_supplied(): try: url_for(adaptive_fit_in=True, image_url=IMAGE_URL) except ValueError: err = sys.exc_info()[1] assert err is not None else: assert False, "Should not have gotten this far" def test_adaptive_full_fit_in_fails_if_no_width_supplied(): try: url_for(adaptive_full_fit_in=True, image_url=IMAGE_URL) except ValueError: err = sys.exc_info()[1] assert err is not None else: assert False, "Should not have gotten this far" def test_full_fit_in_url(): '''test_full_fit_in_url Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And a height of 300 And the full-fit-in flag When I ask my library for an URL Then I get "full-fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=200, height=300, full_fit_in=True, image_url=IMAGE_URL) assert url == "full-fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba", url def test_adaptive_full_fit_in_url(): '''test_adaptive_full_fit_in_url Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And a height of 300 And the adaptive full-fit-in flag When I ask my library for an URL Then I get "adaptive-full-fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(width=200, height=300, adaptive_full_fit_in=True, image_url=IMAGE_URL) assert url == "adaptive-full-fit-in/200x300/84996242f65a4d864aceb125e1c4c5ba", url def test_flip_1(): '''test_flip_1 Given An image URL of "my.server.com/some/path/to/image.jpg" And the flip flag When I ask my library for an URL Then I get "-0x0/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(flip=True, image_url=IMAGE_URL) assert url == "-0x0/84996242f65a4d864aceb125e1c4c5ba", url def test_flip_2(): '''test_flip_2 Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And the flip flag When I ask my library for an URL Then I get "-200x0/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(flip=True, width=200, image_url=IMAGE_URL) assert url == "-200x0/84996242f65a4d864aceb125e1c4c5ba", url def test_flop_1(): '''test_flop_1 Given An image URL of "my.server.com/some/path/to/image.jpg" And the flop flag When I ask my library for an URL Then I get "0x-0/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(flop=True, image_url=IMAGE_URL) assert url == "0x-0/84996242f65a4d864aceb125e1c4c5ba", url def test_flop_2(): '''test_flop_2 Given An image URL of "my.server.com/some/path/to/image.jpg" And a height of 200 And the flop flag When I ask my library for an URL Then I get "0x-200/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(flop=True, height=200, image_url=IMAGE_URL) assert url == "0x-200/84996242f65a4d864aceb125e1c4c5ba", url def test_flip_flop(): '''test_flip_flop Given An image URL of "my.server.com/some/path/to/image.jpg" And the flip flag And the flop flag When I ask my library for an URL Then I get "-0x-0/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(flip=True, flop=True, image_url=IMAGE_URL) assert url == "-0x-0/84996242f65a4d864aceb125e1c4c5ba", url def test_flip_flop2(): '''test_flip_flop2 Given An image URL of "my.server.com/some/path/to/image.jpg" And a width of 200 And a height of 300 And the flip flag And the flop flag When I ask my library for an URL Then I get "-200x-300/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(flip=True, flop=True, width=200, height=300, image_url=IMAGE_URL) assert url == "-200x-300/84996242f65a4d864aceb125e1c4c5ba", url def test_horizontal_alignment(): '''test_horizontal_alignment Given An image URL of "my.server.com/some/path/to/image.jpg" And a 'left' horizontal alignment option When I ask my library for an URL Then I get "left/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(halign='left', image_url=IMAGE_URL) assert url == 'left/84996242f65a4d864aceb125e1c4c5ba', url def test_horizontal_alignment2(): '''test_horizontal_alignment2 Given An image URL of "my.server.com/some/path/to/image.jpg" And a 'center' horizontal alignment option When I ask my library for an URL Then I get "84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(halign='center', image_url=IMAGE_URL) assert url == '84996242f65a4d864aceb125e1c4c5ba', url def test_vertical_alignment(): '''test_vertical_alignment Given An image URL of "my.server.com/some/path/to/image.jpg" And a 'top' vertical alignment option When I ask my library for an URL Then I get "top/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(valign='top', image_url=IMAGE_URL) assert url == 'top/84996242f65a4d864aceb125e1c4c5ba', url def test_vertical_alignment2(): '''test_vertical_alignment2 Given An image URL of "my.server.com/some/path/to/image.jpg" And a 'middle' vertical alignment option When I ask my library for an URL Then I get "84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(valign='middle', image_url=IMAGE_URL) assert url == '84996242f65a4d864aceb125e1c4c5ba', url def test_both_alignments(): '''test_both_alignments Given An image URL of "my.server.com/some/path/to/image.jpg" And a 'left' horizontal alignment option And a 'top' vertical alignment option When I ask my library for an URL Then I get "left/top/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(halign='left', valign='top', image_url=IMAGE_URL) assert url == 'left/top/84996242f65a4d864aceb125e1c4c5ba', url def test_proper_haligns(): '''test_proper_haligns''' try: url_for(halign='wrong', image_url=IMAGE_URL) except ValueError, err: assert str(err) == 'Only "left", "center" and "right"' + \ ' are valid values for horizontal alignment.' return True assert False, "Should not have gotten this far." def test_proper_valigns(): '''test_proper_haligns''' try: url_for(valign='wrong', image_url=IMAGE_URL) except ValueError, err: assert str(err) == 'Only "top", "middle" and "bottom"' + \ ' are valid values for vertical alignment.' return True assert False, "Should not have gotten this far." def test_proper_meta(): '''test_proper_meta Given An image URL of "my.server.com/some/path/to/image.jpg" And a 'meta' flag When I ask my library for an URL Then I get "meta/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(meta=True, image_url=IMAGE_URL) assert url == 'meta/84996242f65a4d864aceb125e1c4c5ba', url def test_trim_standard(): url = url_for(trim=True, image_url=IMAGE_URL) assert url == 'trim/84996242f65a4d864aceb125e1c4c5ba', url def test_trim_pixel_and_tolerance(): url = url_for(trim=('bottom-right', 15), image_url=IMAGE_URL) assert url == 'trim:bottom-right:15/84996242f65a4d864aceb125e1c4c5ba', url def test_trim_pixel_only(): url = url_for(trim=('top-left', None), image_url=IMAGE_URL) assert url == 'trim:top-left/84996242f65a4d864aceb125e1c4c5ba', url def test_trim_tolerance_only(): url = url_for(trim=(None, 15), image_url=IMAGE_URL) assert url == 'trim::15/84996242f65a4d864aceb125e1c4c5ba', url def test_manual_crop_1(): '''test_manual_crop_1 Given An image URL of "my.server.com/some/path/to/image.jpg" And a manual crop left-top point of (10, 20) And a manual crop right-bottom point of (30, 40) When I ask my library for an URL Then I get "10x20:30x40/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(crop=((10, 20), (30, 40)), image_url=IMAGE_URL) assert url == '10x20:30x40/84996242f65a4d864aceb125e1c4c5ba', url def test_manual_crop_2(): '''test_manual_crop_2 Given An image URL of "my.server.com/some/path/to/image.jpg" And a manual crop left-top point of (0, 0) And a manual crop right-bottom point of (0, 0) When I ask my library for an URL Then I get "84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(crop=((0, 0), (0, 0)), image_url=IMAGE_URL) assert url == '84996242f65a4d864aceb125e1c4c5ba', url def test_smart_after_alignments(): '''test_smart_after_alignments Given An image URL of "my.server.com/some/path/to/image.jpg" And a 'smart' flag And a 'left' horizontal alignment option When I ask my library for an URL Then I get "left/smart/84996242f65a4d864aceb125e1c4c5ba" as URL ''' url = url_for(smart=True, halign='left', image_url=IMAGE_URL) assert url == 'left/smart/84996242f65a4d864aceb125e1c4c5ba', url class UnsafeUrlTestCase(TestCase): def test_should_return_a_valid_unsafe_url_with_no_params(self): self.assertEqual('unsafe/%s' % IMAGE_URL, unsafe_url(image_url=IMAGE_URL)) def test_should_return_an_unsafe_url_with_width_and_height(self): self.assertEqual('unsafe/100x140/%s' % IMAGE_URL, unsafe_url(image_url=IMAGE_URL, width=100, height=140)) def test_should_return_an_unsafe_url_with_crop_and_smart(self): self.assertEqual('unsafe/100x140/smart/%s' % IMAGE_URL, unsafe_url(image_url=IMAGE_URL, width=100, height=140, smart=True))
#!/usr/bin/env python # # Copyright 2011-2015 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from io import BytesIO from threading import Thread from splunklib.six.moves import BaseHTTPServer from splunklib.six.moves.urllib.request import Request, urlopen from splunklib.six.moves.urllib.error import HTTPError import splunklib.six as six from xml.etree.ElementTree import XML import json import logging from tests import testlib import unittest import socket import sys import ssl import splunklib.six.moves.http_cookies import splunklib.binding as binding from splunklib.binding import HTTPError, AuthenticationError, UrlEncoded import splunklib.data as data from splunklib import six import pytest # splunkd endpoint paths PATH_USERS = "authentication/users/" # XML Namespaces NAMESPACE_ATOM = "http://www.w3.org/2005/Atom" NAMESPACE_REST = "http://dev.splunk.com/ns/rest" NAMESPACE_OPENSEARCH = "http://a9.com/-/spec/opensearch/1.1" # XML Extended Name Fragments XNAMEF_ATOM = "{%s}%%s" % NAMESPACE_ATOM XNAMEF_REST = "{%s}%%s" % NAMESPACE_REST XNAMEF_OPENSEARCH = "{%s}%%s" % NAMESPACE_OPENSEARCH # XML Extended Names XNAME_AUTHOR = XNAMEF_ATOM % "author" XNAME_ENTRY = XNAMEF_ATOM % "entry" XNAME_FEED = XNAMEF_ATOM % "feed" XNAME_ID = XNAMEF_ATOM % "id" XNAME_TITLE = XNAMEF_ATOM % "title" def load(response): return data.load(response.body.read()) class BindingTestCase(unittest.TestCase): context = None def setUp(self): logging.info("%s", self.__class__.__name__) self.opts = testlib.parse([], {}, ".splunkrc") self.context = binding.connect(**self.opts.kwargs) logging.debug("Connected to splunkd.") class TestResponseReader(BindingTestCase): def test_empty(self): response = binding.ResponseReader(BytesIO(b"")) self.assertTrue(response.empty) self.assertEqual(response.peek(10), b"") self.assertEqual(response.read(10), b"") arr = bytearray(10) self.assertEqual(response.readinto(arr), 0) self.assertEqual(arr, bytearray(10)) self.assertTrue(response.empty) def test_read_past_end(self): txt = b"abcd" response = binding.ResponseReader(BytesIO(txt)) self.assertFalse(response.empty) self.assertEqual(response.peek(10), txt) self.assertEqual(response.read(10), txt) self.assertTrue(response.empty) self.assertEqual(response.peek(10), b"") self.assertEqual(response.read(10), b"") def test_read_partial(self): txt = b"This is a test of the emergency broadcasting system." response = binding.ResponseReader(BytesIO(txt)) self.assertEqual(response.peek(5), txt[:5]) self.assertFalse(response.empty) self.assertEqual(response.read(), txt) self.assertTrue(response.empty) self.assertEqual(response.read(), b'') def test_readable(self): txt = "abcd" response = binding.ResponseReader(six.StringIO(txt)) self.assertTrue(response.readable()) def test_readinto_bytearray(self): txt = b"Checking readinto works as expected" response = binding.ResponseReader(BytesIO(txt)) arr = bytearray(10) self.assertEqual(response.readinto(arr), 10) self.assertEqual(arr[:10], b"Checking r") self.assertEqual(response.readinto(arr), 10) self.assertEqual(arr[:10], b"eadinto wo") self.assertEqual(response.readinto(arr), 10) self.assertEqual(arr[:10], b"rks as exp") self.assertEqual(response.readinto(arr), 5) self.assertEqual(arr[:5], b"ected") self.assertTrue(response.empty) def test_readinto_memoryview(self): import sys if sys.version_info < (2, 7, 0): return # memoryview is new to Python 2.7 txt = b"Checking readinto works as expected" response = binding.ResponseReader(BytesIO(txt)) arr = bytearray(10) mv = memoryview(arr) self.assertEqual(response.readinto(mv), 10) self.assertEqual(arr[:10], b"Checking r") self.assertEqual(response.readinto(mv), 10) self.assertEqual(arr[:10], b"eadinto wo") self.assertEqual(response.readinto(mv), 10) self.assertEqual(arr[:10], b"rks as exp") self.assertEqual(response.readinto(mv), 5) self.assertEqual(arr[:5], b"ected") self.assertTrue(response.empty) class TestUrlEncoded(BindingTestCase): def test_idempotent(self): a = UrlEncoded('abc') self.assertEqual(a, UrlEncoded(a)) def test_append(self): self.assertEqual(UrlEncoded('a') + UrlEncoded('b'), UrlEncoded('ab')) def test_append_string(self): self.assertEqual(UrlEncoded('a') + '%', UrlEncoded('a%')) def test_append_to_string(self): self.assertEqual('%' + UrlEncoded('a'), UrlEncoded('%a')) def test_interpolation_fails(self): self.assertRaises(TypeError, lambda: UrlEncoded('%s') % 'boris') def test_chars(self): for char, code in [(' ', '%20'), ('"', '%22'), ('%', '%25')]: self.assertEqual(UrlEncoded(char), UrlEncoded(code, skip_encode=True)) def test_repr(self): self.assertEqual(repr(UrlEncoded('% %')), "UrlEncoded('% %')") class TestAuthority(unittest.TestCase): def test_authority_default(self): self.assertEqual(binding._authority(), "https://localhost:8089") def test_ipv4_host(self): self.assertEqual( binding._authority( host="splunk.utopia.net"), "https://splunk.utopia.net:8089") def test_ipv6_host(self): self.assertEqual( binding._authority( host="2001:0db8:85a3:0000:0000:8a2e:0370:7334"), "https://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8089") def test_all_fields(self): self.assertEqual( binding._authority( scheme="http", host="splunk.utopia.net", port="471"), "http://splunk.utopia.net:471") class TestUserManipulation(BindingTestCase): def setUp(self): BindingTestCase.setUp(self) self.username = testlib.tmpname() self.password = "changeme!" self.roles = "power" # Delete user if it exists already try: response = self.context.delete(PATH_USERS + self.username) self.assertEqual(response.status, 200) except HTTPError as e: self.assertTrue(e.status in [400, 500]) def tearDown(self): BindingTestCase.tearDown(self) try: self.context.delete(PATH_USERS + self.username) except HTTPError as e: if e.status not in [400, 500]: raise def test_user_without_role_fails(self): self.assertRaises(binding.HTTPError, self.context.post, PATH_USERS, name=self.username, password=self.password) def test_create_user(self): response = self.context.post( PATH_USERS, name=self.username, password=self.password, roles=self.roles) self.assertEqual(response.status, 201) response = self.context.get(PATH_USERS + self.username) entry = load(response).feed.entry self.assertEqual(entry.title, self.username) def test_update_user(self): self.test_create_user() response = self.context.post( PATH_USERS + self.username, password=self.password, roles=self.roles, defaultApp="search", realname="Renzo", email="[email protected]") self.assertEqual(response.status, 200) response = self.context.get(PATH_USERS + self.username) self.assertEqual(response.status, 200) entry = load(response).feed.entry self.assertEqual(entry.title, self.username) self.assertEqual(entry.content.defaultApp, "search") self.assertEqual(entry.content.realname, "Renzo") self.assertEqual(entry.content.email, "[email protected]") def test_post_with_body_behaves(self): self.test_create_user() response = self.context.post( PATH_USERS + self.username, body="defaultApp=search", ) self.assertEqual(response.status, 200) def test_post_with_get_arguments_to_receivers_stream(self): text = 'Hello, world!' response = self.context.post( '/services/receivers/simple', headers=[('x-splunk-input-mode', 'streaming')], source='sdk', sourcetype='sdk_test', body=text ) self.assertEqual(response.status, 200) class TestSocket(BindingTestCase): def test_socket(self): socket = self.context.connect() socket.write(("POST %s HTTP/1.1\r\n" % \ self.context._abspath("some/path/to/post/to")).encode('utf-8')) socket.write(("Host: %s:%s\r\n" % \ (self.context.host, self.context.port)).encode('utf-8')) socket.write("Accept-Encoding: identity\r\n".encode('utf-8')) socket.write(("Authorization: %s\r\n" % \ self.context.token).encode('utf-8')) socket.write("X-Splunk-Input-Mode: Streaming\r\n".encode('utf-8')) socket.write("\r\n".encode('utf-8')) socket.close() # Sockets take bytes not strings # # def test_unicode_socket(self): # socket = self.context.connect() # socket.write(u"POST %s HTTP/1.1\r\n" %\ # self.context._abspath("some/path/to/post/to")) # socket.write(u"Host: %s:%s\r\n" %\ # (self.context.host, self.context.port)) # socket.write(u"Accept-Encoding: identity\r\n") # socket.write((u"Authorization: %s\r\n" %\ # self.context.token).encode('utf-8')) # socket.write(u"X-Splunk-Input-Mode: Streaming\r\n") # socket.write("\r\n") # socket.close() def test_socket_gethostbyname(self): self.assertTrue(self.context.connect()) self.context.host = socket.gethostbyname(self.context.host) self.assertTrue(self.context.connect()) class TestUnicodeConnect(BindingTestCase): def test_unicode_connect(self): opts = self.opts.kwargs.copy() opts['host'] = six.text_type(opts['host']) context = binding.connect(**opts) # Just check to make sure the service is alive response = context.get("/services") self.assertEqual(response.status, 200) @pytest.mark.smoke class TestAutologin(BindingTestCase): def test_with_autologin(self): self.context.autologin = True self.assertEqual(self.context.get("/services").status, 200) self.context.logout() self.assertEqual(self.context.get("/services").status, 200) def test_without_autologin(self): self.context.autologin = False self.assertEqual(self.context.get("/services").status, 200) self.context.logout() self.assertRaises(AuthenticationError, self.context.get, "/services") class TestAbspath(BindingTestCase): def setUp(self): BindingTestCase.setUp(self) self.kwargs = self.opts.kwargs.copy() if 'app' in self.kwargs: del self.kwargs['app'] if 'owner' in self.kwargs: del self.kwargs['owner'] def test_default(self): path = self.context._abspath("foo", owner=None, app=None) self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/services/foo") def test_with_owner(self): path = self.context._abspath("foo", owner="me", app=None) self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/me/system/foo") def test_with_app(self): path = self.context._abspath("foo", owner=None, app="MyApp") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/MyApp/foo") def test_with_both(self): path = self.context._abspath("foo", owner="me", app="MyApp") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/me/MyApp/foo") def test_user_sharing(self): path = self.context._abspath("foo", owner="me", app="MyApp", sharing="user") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/me/MyApp/foo") def test_sharing_app(self): path = self.context._abspath("foo", owner="me", app="MyApp", sharing="app") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/MyApp/foo") def test_sharing_global(self): path = self.context._abspath("foo", owner="me", app="MyApp",sharing="global") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/MyApp/foo") def test_sharing_system(self): path = self.context._abspath("foo bar", owner="me", app="MyApp",sharing="system") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/system/foo%20bar") def test_url_forbidden_characters(self): path = self.context._abspath('/a/b c/d') self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, '/a/b%20c/d') def test_context_defaults(self): context = binding.connect(**self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/services/foo") def test_context_with_owner(self): context = binding.connect(owner="me", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/me/system/foo") def test_context_with_app(self): context = binding.connect(app="MyApp", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/MyApp/foo") def test_context_with_both(self): context = binding.connect(owner="me", app="MyApp", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/me/MyApp/foo") def test_context_with_user_sharing(self): context = binding.connect( owner="me", app="MyApp", sharing="user", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/me/MyApp/foo") def test_context_with_app_sharing(self): context = binding.connect( owner="me", app="MyApp", sharing="app", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/MyApp/foo") def test_context_with_global_sharing(self): context = binding.connect( owner="me", app="MyApp", sharing="global", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/MyApp/foo") def test_context_with_system_sharing(self): context = binding.connect( owner="me", app="MyApp", sharing="system", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/nobody/system/foo") def test_context_with_owner_as_email(self): context = binding.connect(owner="[email protected]", **self.kwargs) path = context._abspath("foo") self.assertTrue(isinstance(path, UrlEncoded)) self.assertEqual(path, "/servicesNS/me%40me.com/system/foo") self.assertEqual(path, UrlEncoded("/servicesNS/[email protected]/system/foo")) # An urllib2 based HTTP request handler, used to test the binding layers # support for pluggable request handlers. def urllib2_handler(url, message, **kwargs): method = message['method'].lower() data = message.get('body', b"") if method == 'post' else None headers = dict(message.get('headers', [])) req = Request(url, data, headers) try: # If running Python 2.7.9+, disable SSL certificate validation if sys.version_info >= (2, 7, 9): response = urlopen(req, context=ssl._create_unverified_context()) else: response = urlopen(req) except HTTPError as response: pass # Propagate HTTP errors via the returned response message return { 'status': response.code, 'reason': response.msg, 'headers': dict(response.info()), 'body': BytesIO(response.read()) } def isatom(body): """Answers if the given response body looks like ATOM.""" root = XML(body) return \ root.tag == XNAME_FEED and \ root.find(XNAME_AUTHOR) is not None and \ root.find(XNAME_ID) is not None and \ root.find(XNAME_TITLE) is not None class TestPluggableHTTP(testlib.SDKTestCase): # Verify pluggable HTTP reqeust handlers. def test_handlers(self): paths = ["/services", "authentication/users", "search/jobs"] handlers = [binding.handler(), # default handler urllib2_handler] for handler in handlers: logging.debug("Connecting with handler %s", handler) context = binding.connect( handler=handler, **self.opts.kwargs) for path in paths: body = context.get(path).body.read() self.assertTrue(isatom(body)) @pytest.mark.smoke class TestLogout(BindingTestCase): def test_logout(self): response = self.context.get("/services") self.assertEqual(response.status, 200) self.context.logout() self.assertEqual(self.context.token, binding._NoAuthenticationToken) self.assertEqual(self.context.get_cookies(), {}) self.assertRaises(AuthenticationError, self.context.get, "/services") self.assertRaises(AuthenticationError, self.context.post, "/services") self.assertRaises(AuthenticationError, self.context.delete, "/services") self.context.login() response = self.context.get("/services") self.assertEqual(response.status, 200) class TestCookieAuthentication(unittest.TestCase): def setUp(self): self.opts = testlib.parse([], {}, ".splunkrc") self.context = binding.connect(**self.opts.kwargs) # Skip these tests if running below Splunk 6.2, cookie-auth didn't exist before import splunklib.client as client service = client.Service(**self.opts.kwargs) # TODO: Workaround the fact that skipTest is not defined by unittest2.TestCase service.login() splver = service.splunk_version if splver[:2] < (6, 2): self.skipTest("Skipping cookie-auth tests, running in %d.%d.%d, this feature was added in 6.2+" % splver) if getattr(unittest.TestCase, 'assertIsNotNone', None) is None: def assertIsNotNone(self, obj, msg=None): if obj is None: raise self.failureException(msg or '%r is not None' % obj) @pytest.mark.smoke def test_cookie_in_auth_headers(self): self.assertIsNotNone(self.context._auth_headers) self.assertNotEqual(self.context._auth_headers, []) self.assertEqual(len(self.context._auth_headers), 1) self.assertEqual(len(self.context._auth_headers), 1) self.assertEqual(self.context._auth_headers[0][0], "Cookie") self.assertEqual(self.context._auth_headers[0][1][:8], "splunkd_") @pytest.mark.smoke def test_got_cookie_on_connect(self): self.assertIsNotNone(self.context.get_cookies()) self.assertNotEqual(self.context.get_cookies(), {}) self.assertEqual(len(self.context.get_cookies()), 1) self.assertEqual(list(self.context.get_cookies().keys())[0][:8], "splunkd_") @pytest.mark.smoke def test_cookie_with_autologin(self): self.context.autologin = True self.assertEqual(self.context.get("/services").status, 200) self.assertTrue(self.context.has_cookies()) self.context.logout() self.assertFalse(self.context.has_cookies()) self.assertEqual(self.context.get("/services").status, 200) self.assertTrue(self.context.has_cookies()) @pytest.mark.smoke def test_cookie_without_autologin(self): self.context.autologin = False self.assertEqual(self.context.get("/services").status, 200) self.assertTrue(self.context.has_cookies()) self.context.logout() self.assertFalse(self.context.has_cookies()) self.assertRaises(AuthenticationError, self.context.get, "/services") @pytest.mark.smoke def test_got_updated_cookie_with_get(self): old_cookies = self.context.get_cookies() resp = self.context.get("apps/local") found = False for key, value in resp.headers: if key.lower() == "set-cookie": found = True self.assertEqual(value[:8], "splunkd_") new_cookies = {} binding._parse_cookies(value, new_cookies) # We're only expecting 1 in this scenario self.assertEqual(len(old_cookies), 1) self.assertTrue(len(list(new_cookies.values())), 1) self.assertEqual(old_cookies, new_cookies) self.assertEqual(list(new_cookies.values())[0], list(old_cookies.values())[0]) self.assertTrue(found) def test_login_fails_with_bad_cookie(self): new_context = binding.connect(**{"cookie": "bad=cookie"}) # We should get an error if using a bad cookie try: new_context.get("apps/local") self.fail() except AuthenticationError as ae: self.assertEqual(str(ae), "Request failed: Session is not logged in.") def test_login_with_multiple_cookies(self): bad_cookie = 'bad=cookie' new_context = binding.connect(**{"cookie": bad_cookie}) # We should get an error if using a bad cookie try: new_context.get("apps/local") self.fail() except AuthenticationError as ae: self.assertEqual(str(ae), "Request failed: Session is not logged in.") # Bring in a valid cookie now for key, value in self.context.get_cookies().items(): new_context.get_cookies()[key] = value self.assertEqual(len(new_context.get_cookies()), 2) self.assertTrue('bad' in list(new_context.get_cookies().keys())) self.assertTrue('cookie' in list(new_context.get_cookies().values())) for k, v in self.context.get_cookies().items(): self.assertEqual(new_context.get_cookies()[k], v) self.assertEqual(new_context.get("apps/local").status, 200) @pytest.mark.smoke def test_login_fails_without_cookie_or_token(self): opts = { 'host': self.opts.kwargs['host'], 'port': self.opts.kwargs['port'] } try: binding.connect(**opts) self.fail() except AuthenticationError as ae: self.assertEqual(str(ae), "Login failed.") class TestNamespace(unittest.TestCase): def test_namespace(self): tests = [ ({ }, { 'sharing': None, 'owner': None, 'app': None }), ({ 'owner': "Bob" }, { 'sharing': None, 'owner': "Bob", 'app': None }), ({ 'app': "search" }, { 'sharing': None, 'owner': None, 'app': "search" }), ({ 'owner': "Bob", 'app': "search" }, { 'sharing': None, 'owner': "Bob", 'app': "search" }), ({ 'sharing': "user", 'owner': "[email protected]" }, { 'sharing': "user", 'owner': "[email protected]", 'app': None }), ({ 'sharing': "user" }, { 'sharing': "user", 'owner': None, 'app': None }), ({ 'sharing': "user", 'owner': "Bob" }, { 'sharing': "user", 'owner': "Bob", 'app': None }), ({ 'sharing': "user", 'app': "search" }, { 'sharing': "user", 'owner': None, 'app': "search" }), ({ 'sharing': "user", 'owner': "Bob", 'app': "search" }, { 'sharing': "user", 'owner': "Bob", 'app': "search" }), ({ 'sharing': "app" }, { 'sharing': "app", 'owner': "nobody", 'app': None }), ({ 'sharing': "app", 'owner': "Bob" }, { 'sharing': "app", 'owner': "nobody", 'app': None }), ({ 'sharing': "app", 'app': "search" }, { 'sharing': "app", 'owner': "nobody", 'app': "search" }), ({ 'sharing': "app", 'owner': "Bob", 'app': "search" }, { 'sharing': "app", 'owner': "nobody", 'app': "search" }), ({ 'sharing': "global" }, { 'sharing': "global", 'owner': "nobody", 'app': None }), ({ 'sharing': "global", 'owner': "Bob" }, { 'sharing': "global", 'owner': "nobody", 'app': None }), ({ 'sharing': "global", 'app': "search" }, { 'sharing': "global", 'owner': "nobody", 'app': "search" }), ({ 'sharing': "global", 'owner': "Bob", 'app': "search" }, { 'sharing': "global", 'owner': "nobody", 'app': "search" }), ({ 'sharing': "system" }, { 'sharing': "system", 'owner': "nobody", 'app': "system" }), ({ 'sharing': "system", 'owner': "Bob" }, { 'sharing': "system", 'owner': "nobody", 'app': "system" }), ({ 'sharing': "system", 'app': "search" }, { 'sharing': "system", 'owner': "nobody", 'app': "system" }), ({ 'sharing': "system", 'owner': "Bob", 'app': "search" }, { 'sharing': "system", 'owner': "nobody", 'app': "system" }), ({ 'sharing': 'user', 'owner': '-', 'app': '-'}, { 'sharing': 'user', 'owner': '-', 'app': '-'})] for kwargs, expected in tests: namespace = binding.namespace(**kwargs) for k, v in six.iteritems(expected): self.assertEqual(namespace[k], v) def test_namespace_fails(self): self.assertRaises(ValueError, binding.namespace, sharing="gobble") @pytest.mark.smoke class TestBasicAuthentication(unittest.TestCase): def setUp(self): self.opts = testlib.parse([], {}, ".splunkrc") opts = self.opts.kwargs.copy() opts["basic"] = True opts["username"] = self.opts.kwargs["username"] opts["password"] = self.opts.kwargs["password"] self.context = binding.connect(**opts) import splunklib.client as client service = client.Service(**opts) if getattr(unittest.TestCase, 'assertIsNotNone', None) is None: def assertIsNotNone(self, obj, msg=None): if obj is None: raise self.failureException(msg or '%r is not None' % obj) def test_basic_in_auth_headers(self): self.assertIsNotNone(self.context._auth_headers) self.assertNotEqual(self.context._auth_headers, []) self.assertEqual(len(self.context._auth_headers), 1) self.assertEqual(len(self.context._auth_headers), 1) self.assertEqual(self.context._auth_headers[0][0], "Authorization") self.assertEqual(self.context._auth_headers[0][1][:6], "Basic ") self.assertEqual(self.context.get("/services").status, 200) @pytest.mark.smoke class TestTokenAuthentication(BindingTestCase): def test_preexisting_token(self): token = self.context.token opts = self.opts.kwargs.copy() opts["token"] = token opts["username"] = "boris the mad baboon" opts["password"] = "nothing real" newContext = binding.Context(**opts) response = newContext.get("/services") self.assertEqual(response.status, 200) socket = newContext.connect() socket.write(("POST %s HTTP/1.1\r\n" % \ self.context._abspath("some/path/to/post/to")).encode('utf-8')) socket.write(("Host: %s:%s\r\n" % \ (self.context.host, self.context.port)).encode('utf-8')) socket.write(("Accept-Encoding: identity\r\n").encode('utf-8')) socket.write(("Authorization: %s\r\n" % \ self.context.token).encode('utf-8')) socket.write("X-Splunk-Input-Mode: Streaming\r\n".encode('utf-8')) socket.write(("\r\n").encode('utf-8')) socket.close() def test_preexisting_token_sans_splunk(self): token = self.context.token if token.startswith('Splunk '): token = token.split(' ', 1)[1] self.assertFalse(token.startswith('Splunk ')) else: self.fail('Token did not start with "Splunk ".') opts = self.opts.kwargs.copy() opts["token"] = token opts["username"] = "boris the mad baboon" opts["password"] = "nothing real" newContext = binding.Context(**opts) response = newContext.get("/services") self.assertEqual(response.status, 200) socket = newContext.connect() socket.write(("POST %s HTTP/1.1\r\n" %\ self.context._abspath("some/path/to/post/to")).encode('utf-8')) socket.write(("Host: %s:%s\r\n" %\ (self.context.host, self.context.port)).encode('utf-8')) socket.write("Accept-Encoding: identity\r\n".encode('utf-8')) socket.write(("Authorization: %s\r\n" %\ self.context.token).encode('utf-8')) socket.write(("X-Splunk-Input-Mode: Streaming\r\n").encode('utf-8')) socket.write(("\r\n").encode('utf-8')) socket.close() def test_connect_with_preexisting_token_sans_user_and_pass(self): token = self.context.token opts = self.opts.kwargs.copy() del opts['username'] del opts['password'] opts["token"] = token newContext = binding.connect(**opts) response = newContext.get('/services') self.assertEqual(response.status, 200) socket = newContext.connect() socket.write(("POST %s HTTP/1.1\r\n" % \ self.context._abspath("some/path/to/post/to")).encode('utf-8')) socket.write(("Host: %s:%s\r\n" % \ (self.context.host, self.context.port)).encode('utf-8')) socket.write("Accept-Encoding: identity\r\n".encode('utf-8')) socket.write(("Authorization: %s\r\n" % \ self.context.token).encode('utf-8')) socket.write("X-Splunk-Input-Mode: Streaming\r\n".encode('utf-8')) socket.write("\r\n".encode('utf-8')) socket.close() class TestPostWithBodyParam(unittest.TestCase): def test_post(self): def handler(url, message, **kwargs): assert six.ensure_str(url) == "https://localhost:8089/servicesNS/testowner/testapp/foo/bar" assert six.ensure_str(message["body"]) == "testkey=testvalue" return splunklib.data.Record({ "status": 200, "headers": [], }) ctx = binding.Context(handler=handler) ctx.post("foo/bar", owner="testowner", app="testapp", body={"testkey": "testvalue"}) def test_post_with_params_and_body(self): def handler(url, message, **kwargs): assert url == "https://localhost:8089/servicesNS/testowner/testapp/foo/bar?extrakey=extraval" assert six.ensure_str(message["body"]) == "testkey=testvalue" return splunklib.data.Record({ "status": 200, "headers": [], }) ctx = binding.Context(handler=handler) ctx.post("foo/bar", extrakey="extraval", owner="testowner", app="testapp", body={"testkey": "testvalue"}) def test_post_with_params_and_no_body(self): def handler(url, message, **kwargs): assert url == "https://localhost:8089/servicesNS/testowner/testapp/foo/bar" assert six.ensure_str(message["body"]) == "extrakey=extraval" return splunklib.data.Record({ "status": 200, "headers": [], }) ctx = binding.Context(handler=handler) ctx.post("foo/bar", extrakey="extraval", owner="testowner", app="testapp") def _wrap_handler(func, response_code=200, body=""): def wrapped(handler_self): result = func(handler_self) if result is None: handler_self.send_response(response_code) handler_self.end_headers() handler_self.wfile.write(body) return wrapped class MockServer(object): def __init__(self, port=9093, **handlers): methods = {"do_" + k: _wrap_handler(v) for (k, v) in handlers.items()} def init(handler_self, socket, address, server): BaseHTTPServer.BaseHTTPRequestHandler.__init__(handler_self, socket, address, server) def log(*args): # To silence server access logs pass methods["__init__"] = init methods["log_message"] = log Handler = type("Handler", (BaseHTTPServer.BaseHTTPRequestHandler, object), methods) self._svr = BaseHTTPServer.HTTPServer(("localhost", port), Handler) def run(): self._svr.handle_request() self._thread = Thread(target=run) self._thread.daemon = True def __enter__(self): self._thread.start() return self._svr def __exit__(self, typ, value, traceback): self._thread.join(10) self._svr.server_close() class TestFullPost(unittest.TestCase): def test_post_with_body_urlencoded(self): def check_response(handler): length = int(handler.headers.get('content-length', 0)) body = handler.rfile.read(length) assert six.ensure_str(body) == "foo=bar" with MockServer(POST=check_response): ctx = binding.connect(port=9093, scheme='http', token="waffle") ctx.post("/", foo="bar") def test_post_with_body_string(self): def check_response(handler): length = int(handler.headers.get('content-length', 0)) body = handler.rfile.read(length) assert six.ensure_str(handler.headers['content-type']) == 'application/json' assert json.loads(body)["baz"] == "baf" with MockServer(POST=check_response): ctx = binding.connect(port=9093, scheme='http', token="waffle", headers=[("Content-Type", "application/json")]) ctx.post("/", foo="bar", body='{"baz": "baf"}') def test_post_with_body_dict(self): def check_response(handler): length = int(handler.headers.get('content-length', 0)) body = handler.rfile.read(length) assert six.ensure_str(handler.headers['content-type']) == 'application/x-www-form-urlencoded' assert six.ensure_str(body) == 'baz=baf&hep=cat' or six.ensure_str(body) == 'hep=cat&baz=baf' with MockServer(POST=check_response): ctx = binding.connect(port=9093, scheme='http', token="waffle") ctx.post("/", foo="bar", body={"baz": "baf", "hep": "cat"}) if __name__ == "__main__": try: import unittest2 as unittest except ImportError: import unittest unittest.main()
import logging import os import tkinter import tkinter.filedialog import yaml from msquaredc import persistence from msquaredc import utils from msquaredc.ui.gui.widgets import ScaleWidget class MainFrame(object): # pragma no cover def __init__(self, widgets): self.widgets = widgets self.tk = tkinter.Tk() self.tk.title("msquaredc") self.q = self.a = None self.__is_fullscreen = False self.frame = tkinter.Frame(self.tk) self.frame.grid(row=0, column=0) self.init_keybindings() self.init_menubar() self.init_content() self.open_files = {"save": None, "open": None} self.__input = None self.showResults("<No file loaded!>", "<Please open a file!>") self.act = None self.prev = [] self.user = None def init(self): # Show NameDialog # validate output # draw gui pass def init_menubar(self): menubar = tkinter.Menu(self.tk) self.tk.config(menu=menubar) fileMenu = tkinter.Menu(menubar) fileMenu.add_command(label="Open", command=persistence.open_file) fileMenu.add_command(label="Save", command=self.save_file) fileMenu.add_separator() fileMenu.add_command(label="Exit", underline=0, command=self.onExit) menubar.add_cascade(label="File", underline=0, menu=fileMenu) def showResults(self, q, a): self.q = tkinter.Label(self.tk, text=q) self.q.grid(column=2, row=1, sticky=tkinter.NSEW, columnspan=1) self.a = tkinter.Label(self.tk, text=a) self.a.grid(column=2, row=2, sticky=tkinter.NSEW, columnspan=1) def init_content(self): for i, j in enumerate(self.widgets): j.draw(self.tk, i + 3) self.tk.grid_rowconfigure(0, weight=1) self.tk.grid_rowconfigure(len(self.widgets) + 3, weight=1) self.tk.grid_columnconfigure(0, weight=1) self.tk.grid_columnconfigure(len(self.widgets) + 3, weight=1) def init_keybindings(self): self.tk.bind("<F11>", self.toggle_fullscreen) self.tk.bind("<Escape>", self.end_fullscreen) def toggle_fullscreen(self, event=None): self.__is_fullscreen = not self.__is_fullscreen # Just toggling the boolean self.tk.attributes('-fullscreen', self.__is_fullscreen) self.tk.overrideredirect(self.__is_fullscreen) return "break" def end_fullscreen(self, event=None): self.__is_fullscreen = False self.tk.attributes("-fullscreen", False) self.tk.overrideredirect(False) return "break" def save_file(self): filename = tkinter.filedialog.asksaveasfilename() try: file = open(filename, 'w') self.open_files["save"].append(file) except FileNotFoundError: pass def onExit(self): for category in self.open_files: self.open_files[category].close() self.tk.quit() def start(self): self.tk.mainloop() class MainApplication(tkinter.Frame): # pragma no cover def __init__(self, parent, *args, **kwargs): tkinter.Frame.__init__(self, parent, *args, **kwargs) self.parent = parent self.callbacks = {} self.statusbar = StatusBar(self) self.toolbar = ToolBar(parent, self) self.navbar = NavBar(self) self.main = Main(self, "config.yml", "jerome.txt") self.statusbar.pack(side="bottom", fill="x") # self.toolbar.pack(side="top", fill="x") self.navbar.pack(side="bottom", anchor="se") self.main.pack(side="top", expand=True) # fill removed def add_callback(self, name, function): callbacks = self.get_callbacks(name) callbacks.append(function) self.callbacks[name] = callbacks def get_callbacks(self, name): return self.callbacks.get(name, []) def handle_callback(self, name): if self.get_callbacks(name): for i in self.get_callbacks(name): i() else: self.notice("".join(["The event ", name, " has been unhandled!"])) def notice(self, string): logging.log(logging.INFO, string) self.statusbar.variable.set(string) class StatusBar(tkinter.Frame): # pragma no cover def __init__(self, master): tkinter.Frame.__init__(self, master) self.variable = tkinter.StringVar() self.label = tkinter.Label(self, bd=1, relief=tkinter.SUNKEN, anchor=tkinter.W, textvariable=self.variable, font=('arial', 10, 'normal')) self.variable.set('Status Bar') self.label.pack(fill=tkinter.X) class NavBar(tkinter.Frame): # pragma no cover def __init__(self, master): tkinter.Frame.__init__(self, master) self.next = tkinter.Button(text="Next >", command=lambda: master.handle_callback("next")) self.prev = tkinter.Button(text="< Previous", command=lambda: master.handle_callback("prev")) self.prev.grid(column=0, row=0, in_=self, pady=5) self.next.grid(column=1, row=0, in_=self, padx=5, pady=5) class ToolBar(tkinter.Menu): # pragma no cover def __init__(self, master, handler): tkinter.Menu.__init__(self, master) master.config(menu=self) fileMenu = tkinter.Menu(self, tearoff=False) fileMenu.add_command(label="Open", command=lambda: handler.handle_callback("open")) fileMenu.add_command(label="Save", command=lambda: handler.handle_callback("save")) fileMenu.add_separator() fileMenu.add_command(label="Exit", underline=0, command=lambda: handler.handle_callback("exit")) self.add_cascade(label="File", underline=0, menu=fileMenu) class Main(tkinter.Frame): # pragma no cover def __init__(self, master, paper, data): tkinter.Frame.__init__(self, master) master.add_callback("next", lambda: Main.get_next(self)) self.master = master # Get paper information ci = None with open(paper) as stream: ci = yaml.load(stream) self.questions = ci["questions"] self.title = ci["title"] self.order = ci["order"] self.show = ci["show"] self.user = ci["user"] # Get Data self.data = persistence.obtain(data) self.infofield = InfoField(self) self.infofield.grid(row=0) self.infofield.title = self.title self.widgetfield = WidgetField(self, {}) self.current_question_index = 0 self.current_answerer_index = 0 self.start() def run(self): questions = [i["text"] for i in self.questions] for i, question in enumerate(self.questions): # Collect answers to code # coded = [] if "out{}.txt".format(i) in os.listdir(os.getcwd()): # coded = persistence.obtain("out{}.txt".format(i)) pass for answerer in self.data: for column in answerer: if column not in questions: pass def start(self): # Pick question + solution # Build and display self.infofield.question = self.questions[self.current_question_index]["text"] self.infofield.answer = self.data[self.current_answerer_index][self.infofield.question] self.widgetfield = WidgetField(self, self.questions[self.current_question_index]["coding"]) self.widgetfield.show() self.widgetfield.grid(row=1) def get_next(self): # store previous used = [i["text"] for i in self.questions] sample = {i: self.data[self.current_answerer_index][i] for i in self.data[self.current_answerer_index] if i not in used} sample["question"] = self.questions[self.current_question_index]["text"] sample.update(self.widgetfield.get_res_dict()) print(sample) persistence.persist("out{}.txt".format(self.current_question_index), sample, "a+") self.current_answerer_index += 1 if self.current_answerer_index >= len(self.data): self.current_answerer_index = 0 self.current_question_index += 1 # Check for resumables if self.current_question_index >= len(self.questions): self.infofield.question = "Finished" self.infofield.answer = "You may now leave" else: self.infofield.question = self.questions[self.current_question_index]["text"] if self.infofield.question in self.data[self.current_answerer_index]: self.infofield.answer = self.data[self.current_answerer_index][self.infofield.question] else: best = -1 element = None for i in self.data[self.current_answerer_index]: res = utils.lcs(i, self.infofield.question) if len(res) > best: element = i best = len(res) self.infofield.answer = self.data[self.current_answerer_index][element] self.widgetfield.grid_forget() self.widgetfield.destroy() self.widgetfield = WidgetField(self, self.questions[self.current_question_index]["coding"]) self.widgetfield.show() self.widgetfield.grid(row=1) class InfoField(tkinter.Frame): # pragma no cover def __init__(self, master): font = ("serif", 16) tkinter.Frame.__init__(self, master) self.__titlevar = tkinter.StringVar(self, "Title") self.__title = tkinter.Label(master, textvariable=self.__titlevar, font=("Helvetica", 18), pady=10) self.__questionvar = tkinter.StringVar(self, "Question") self.__question = tkinter.Label(master, textvariable=self.__questionvar, anchor=tkinter.W, font=("serif", 16, "bold"), pady=5) self.__answervar = tkinter.StringVar(self, "Answer") self.__answer = tkinter.Label(master, textvariable=self.__answervar, anchor=tkinter.W, font=("Times", 16), pady=5, relief="groove") self.__lengthvar = tkinter.StringVar(self, "Length") self.__length = tkinter.Label(master, textvariable=self.__lengthvar, anchor=tkinter.W, font=font, pady=5) self.q = tkinter.Label(self, text="Question:", anchor=tkinter.E, font=font, pady=5) self.a = tkinter.Label(self, text="Answer:", anchor=tkinter.E, font=font, pady=10) self.length_label = tkinter.Label(self, text="Length:", anchor=tkinter.E, font=font, pady=5) self.__title.grid(in_=self, row=0, columnspan=2) self.q.grid(in_=self, column=0, row=1) self.__question.grid(in_=self, column=1, row=1) self.a.grid(in_=self, column=0, row=2) self.__answer.grid(in_=self, column=1, row=2) # self.l.grid(in_=self,column=0,row=3) # self.__length.grid(in_=self,column=1,row=3) @property def title(self): return self.__titlevar.get() @title.setter def title(self, value): self.__titlevar.set(value) @property def question(self): return self.__questionvar.get() @question.setter def question(self, value): self.__questionvar.set(value) @property def answer(self): return self.__answervar.get() @answer.setter def answer(self, value): self.__answervar.set(value) self.__lengthvar.set(" ".join(["Symbols", str(len(self.answer)), "Words", str(len(self.answer.split(" ")))])) @property def length(self): return self.__lengthvar.get() @length.setter def length(self, value): self.__lengthvar.set(value) class WidgetField(tkinter.Frame): # pragma no cover def __init__(self, master, criterias): tkinter.Frame.__init__(self, master) self.criterias = criterias self.widgets = [] for i in criterias: self.widgets.append(ScaleWidget(master, i["criteria"], i["min"], i["max"])) def show(self): for i, element in enumerate(self.widgets): element.variables[0].grid(column=0, row=i, in_=self) for i, element in enumerate(self.widgets): element.label.grid(column=1, row=i, in_=self) for i, element in enumerate(self.widgets): index = 2 for k, j in enumerate(element.variables[1:]): j.grid(column=index + k, row=i, in_=self) def get_res_dict(self): return {element.label.cget('text'): element.variables[0].get() for element in self.widgets}
# -*- coding: utf-8 -*- from reportlab.graphics.barcode import createBarcodeDrawing from reportlab.lib.pagesizes import A4 from reportlab.lib.units import inch, mm from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak from reportlab.platypus.flowables import Spacer, HRFlowable, PageBreak, Flowable from reportlab.platypus.frames import Frame from reportlab.platypus.paraparser import tt2ps, ABag from xhtml2pdf import xhtml2pdf_reportlab from xhtml2pdf.util import getColor, getSize, getAlign, dpi96 from xhtml2pdf.xhtml2pdf_reportlab import PmlImage, PmlPageTemplate import copy import logging import re import warnings # Copyright 2010 Dirk Holtwick, holtwick.it # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. log = logging.getLogger("xhtml2pdf") def deprecation(message): warnings.warn("<" + message + "> is deprecated!", DeprecationWarning, stacklevel=2) class pisaTag: """ The default class for a tag definition """ def __init__(self, node, attr): self.node = node self.tag = node.tagName self.attr = attr def start(self, c): pass def end(self, c): pass class pisaTagBODY(pisaTag): """ We can also asume that there is a BODY tag because html5lib adds it for us. Here we take the base font size for later calculations in the FONT tag. """ def start(self, c): c.baseFontSize = c.frag.fontSize # print "base font size", c.baseFontSize class pisaTagTITLE(pisaTag): def end(self, c): c.meta["title"] = c.text c.clearFrag() class pisaTagSTYLE(pisaTag): def start(self, c): c.addPara() def end(self, c): c.clearFrag() class pisaTagMETA(pisaTag): def start(self, c): name = self.attr.name.lower() if name in ("author" , "subject", "keywords"): c.meta[name] = self.attr.content class pisaTagSUP(pisaTag): def start(self, c): c.frag.super = 1 class pisaTagSUB(pisaTag): def start(self, c): c.frag.sub = 1 class pisaTagA(pisaTag): rxLink = re.compile("^(#|[a-z]+\:).*") def start(self, c): attr = self.attr # XXX Also support attr.id ? if attr.name: # Important! Make sure that cbDefn is not inherited by other # fragments because of a bug in Reportlab! afrag = c.frag.clone() # These 3 lines are needed to fix an error with non internal fonts afrag.fontName = "Helvetica" afrag.bold = 0 afrag.italic = 0 afrag.cbDefn = ABag( kind="anchor", name=attr.name, label="anchor") c.fragAnchor.append(afrag) c.anchorName.append(attr.name) if attr.href and self.rxLink.match(attr.href): c.frag.link = attr.href def end(self, c): pass class pisaTagFONT(pisaTag): # Source: http://www.w3.org/TR/CSS21/fonts.html#propdef-font-size def start(self, c): if self.attr["color"] is not None: c.frag.textColor = getColor(self.attr["color"]) if self.attr["face"] is not None: c.frag.fontName = c.getFontName(self.attr["face"]) if self.attr["size"] is not None: size = getSize(self.attr["size"], c.frag.fontSize, c.baseFontSize) c.frag.fontSize = max(size, 1.0) def end(self, c): pass class pisaTagP(pisaTag): def start(self, c): # save the type of tag; it's used in PmlBaseDoc.afterFlowable() # to check if we need to add an outline-entry # c.frag.tag = self.tag if self.attr.align is not None: #print self.attr.align, getAlign(self.attr.align) c.frag.alignment = getAlign(self.attr.align) class pisaTagDIV(pisaTagP): pass class pisaTagH1(pisaTagP): pass class pisaTagH2(pisaTagP): pass class pisaTagH3(pisaTagP): pass class pisaTagH4(pisaTagP): pass class pisaTagH5(pisaTagP): pass class pisaTagH6(pisaTagP): pass def listDecimal(c): c.listCounter += 1 return unicode("%d." % c.listCounter) _bullet = u"\u2022" _list_style_type = { "none": u"", "disc": _bullet, "circle": _bullet, # XXX PDF has no equivalent "square": _bullet, # XXX PDF has no equivalent "decimal": listDecimal, "decimal-leading-zero": listDecimal, "lower-roman": listDecimal, "upper-roman": listDecimal, "hebrew": listDecimal, "georgian": listDecimal, "armenian": listDecimal, "cjk-ideographic": listDecimal, "hiragana": listDecimal, "katakana": listDecimal, "hiragana-iroha": listDecimal, "katakana-iroha": listDecimal, "lower-latin": listDecimal, "lower-alpha": listDecimal, "upper-latin": listDecimal, "upper-alpha": listDecimal, "lower-greek": listDecimal, } class pisaTagUL(pisaTagP): def start(self, c): self.counter, c.listCounter = c.listCounter, 0 def end(self, c): c.addPara() # XXX Simulate margin for the moment c.addStory(Spacer(width=1, height=c.fragBlock.spaceAfter)) c.listCounter = self.counter class pisaTagOL(pisaTagUL): pass class pisaTagLI(pisaTag): def start(self, c): lst = _list_style_type.get(c.frag.listStyleType or "disc", _bullet) #log.debug("frag %r", c.copyFrag( # text=lst, # bulletFontName=c.getFontName("helvetica"), # fontName=c.getFontName("helvetica"))) # c.addFrag("") #frag = ParaFrag() #frag.fontName = frag.bulletFontName = c.getFontName("helvetica") #frag.fontSize = c.frag.fontSize #c.frag.fontName = c.getFontName("helvetica") frag = copy.copy(c.frag) #print "###", c.frag.fontName #frag.fontName = "au_00" # c.getFontName("helvetica") #frag.bulletFontName = "au_00" # c.getFontName("helvetica") self.offset = 0 if frag.listStyleImage is not None: frag.text = u"" f = frag.listStyleImage if f and (not f.notFound()): img = PmlImage( f.getData(), width=None, height=None) img.drawHeight *= dpi96 img.drawWidth *= dpi96 img.pisaZoom = frag.zoom img.drawWidth *= img.pisaZoom img.drawHeight *= img.pisaZoom frag.image = img self.offset = max(0, img.drawHeight - c.frag.fontSize) else: if type(lst) == type(u""): frag.text = lst else: # XXX This should be the recent font, but it throws errors in Reportlab! frag.text = lst(c) # XXX This should usually be done in the context!!! frag.fontName = frag.bulletFontName = tt2ps(frag.fontName, frag.bold, frag.italic) c.frag.bulletText = [frag] def end(self, c): c.fragBlock.spaceBefore += self.offset #c.fragBlock.bulletText = self.bulletText #print 999, self.bulletText # c.addPara() class pisaTagBR(pisaTag): def start(self, c): # print "BR", c.text[-40:] c.frag.lineBreak = 1 c.addFrag() c.fragStrip = True del c.frag.lineBreak c.force = True class pisaTagIMG(pisaTag): def start(self, c): attr = self.attr if attr.src and (not attr.src.notFound()): try: align = attr.align or c.frag.vAlign or "baseline" # print "align", align, attr.align, c.frag.vAlign width = c.frag.width height = c.frag.height if attr.width: width = attr.width * dpi96 if attr.height: height = attr.height * dpi96 img = PmlImage( attr.src.getData(), width=None, height=None) img.pisaZoom = c.frag.zoom img.drawHeight *= dpi96 img.drawWidth *= dpi96 if (width is None) and (height is not None): factor = getSize(height, relative=img.drawHeight) / img.drawHeight img.drawWidth *= factor img.drawHeight = getSize(height, img.drawHeight) elif (height is None) and (width is not None): factor = getSize(width, relative=img.drawWidth) / img.drawWidth img.drawHeight *= factor img.drawWidth = getSize(width, relative=img.drawWidth) elif (width is not None) and (height is not None): img.drawWidth = getSize(width, relative=img.drawWidth) or img.drawWidth img.drawHeight = getSize(height, relative=img.drawHeight) or img.drawHeight img.drawWidth *= img.pisaZoom img.drawHeight *= img.pisaZoom pw, ph = c.pageSize pw *= 0.8 # some euristic factor ph *= 0.8 # some euristic factor if pw < img.drawWidth or ph < img.drawHeight: factor = min(float(pw) / img.drawWidth, float(ph) / img.drawHeight) img.drawWidth *= factor img.drawHeight *= factor img.spaceBefore = c.frag.spaceBefore img.spaceAfter = c.frag.spaceAfter # print "image", id(img), img.drawWidth, img.drawHeight ''' TODO: - Apply styles - vspace etc. - Borders - Test inside tables ''' c.force = True if align in ["left", "right"]: c.image = img c.imageData = dict(align=align) else: # Important! Make sure that cbDefn is not inherited by other # fragments because of a bug in Reportlab! # afrag = c.frag.clone() valign = align if valign in ["texttop"]: valign = "top" elif valign in ["absmiddle"]: valign = "middle" elif valign in ["absbottom", "baseline"]: valign = "bottom" afrag = c.frag.clone() afrag.text = "" afrag.fontName="Helvetica" # Fix for a nasty bug!!! afrag.cbDefn = ABag( kind="img", image=img, #.getImage(), # XXX Inline? valign=valign, fontName="Helvetica", fontSize=img.drawHeight, width=img.drawWidth, height=img.drawHeight) # print "add frag", id(afrag), img.drawWidth, img.drawHeight c.fragList.append(afrag) c.fontSize = img.drawHeight except Exception: # TODO: Kill catch-all log.warn(c.warning("Error in handling image"), exc_info=1) else: log.warn(c.warning("Need a valid file name!")) class pisaTagHR(pisaTag): def start(self, c): c.addPara() width = self.attr.get('width', "100%") or "100%" c.addStory(HRFlowable( color=self.attr.color, thickness=self.attr.size, width=width.replace('"', '').replace('\'', ''), spaceBefore=c.frag.spaceBefore, spaceAfter=c.frag.spaceAfter )) # --- Forms if 0: class pisaTagINPUT(pisaTag): def _render(self, c, attr): width = 10 height = 10 if attr.type == "text": width = 100 height = 12 c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name, type=attr.type, default=attr.value, width=width, height=height, )) def end(self, c): c.addPara() attr = self.attr if attr.name: self._render(c, attr) c.addPara() class pisaTagTEXTAREA(pisaTagINPUT): def _render(self, c, attr): c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name, default="", width=100, height=100)) class pisaTagSELECT(pisaTagINPUT): def start(self, c): c.select_options = ["One", "Two", "Three"] def _render(self, c, attr): c.addStory(xhtml2pdf_reportlab.PmlInput(attr.name, type="select", default=c.select_options[0], options=c.select_options, width=100, height=40)) c.select_options = None class pisaTagOPTION(pisaTag): pass # ============================================ class pisaTagPDFNEXTPAGE(pisaTag): """ <pdf:nextpage name="" /> """ def start(self, c): # deprecation("pdf:nextpage") c.addPara() if self.attr.name: c.addStory(NextPageTemplate(self.attr.name)) c.addStory(PageBreak()) class pisaTagPDFNEXTTEMPLATE(pisaTag): """ <pdf:nexttemplate name="" /> """ def start(self, c): # deprecation("pdf:frame") c.addStory(NextPageTemplate(self.attr["name"])) class pisaTagPDFNEXTFRAME(pisaTag): """ <pdf:nextframe name="" /> """ def start(self, c): c.addPara() c.addStory(FrameBreak()) class pisaTagPDFSPACER(pisaTag): """ <pdf:spacer height="" /> """ def start(self, c): c.addPara() c.addStory(Spacer(1, self.attr.height)) class pisaTagPDFPAGENUMBER(pisaTag): """ <pdf:pagenumber example="" /> """ def start(self, c): c.frag.pageNumber = True c.addFrag(self.attr.example) c.frag.pageNumber = False class pisaTagPDFTOC(pisaTag): """ <pdf:toc /> """ def end(self, c): c.multiBuild = True c.addTOC() class pisaTagPDFFRAME(pisaTag): """ <pdf:frame name="" static box="" /> """ def start(self, c): deprecation("pdf:frame") attrs = self.attr name = attrs["name"] if name is None: name = "frame%d" % c.UID() x, y, w, h = attrs.box self.frame = Frame( x, y, w, h, id=name, leftPadding=0, rightPadding=0, bottomPadding=0, topPadding=0, showBoundary=attrs.border) self.static = False if self.attr.static: self.static = True c.addPara() self.story = c.swapStory() else: c.frameList.append(self.frame) def end(self, c): if self.static: c.addPara() self.frame.pisaStaticStory = c.story c.frameStaticList.append(self.frame) c.swapStory(self.story) class pisaTagPDFTEMPLATE(pisaTag): """ <pdf:template name="" static box="" > <pdf:frame...> </pdf:template> """ def start(self, c): deprecation("pdf:template") attrs = self.attr #print attrs name = attrs["name"] c.frameList = [] c.frameStaticList = [] if c.templateList.has_key(name): log.warn(c.warning("template '%s' has already been defined", name)) ''' self.oldpagesize = A4 # self._pagesize self._pagesize = PML_PAGESIZES[attrs.format] if attrs.orientation is not None: if attrs.orientation == "landscape": self._pagesize = landscape(self._pagesize) elif attrs.orientation == "portrait": self._pagesize = portrait(self._pagesize) ''' # self._drawing = PmlPageDrawing(self._pagesize) def end(self, c): attrs = self.attr name = attrs["name"] if len(c.frameList) <= 0: log.warn(c.warning("missing frame definitions for template")) pt = PmlPageTemplate( id=name, frames=c.frameList, pagesize=A4, ) pt.pisaStaticList = c.frameStaticList pt.pisaBackgroundList = c.pisaBackgroundList pt.pisaBackground = self.attr.background # self._pagesize) # pt.pml_statics = self._statics # pt.pml_draw = self._draw # pt.pml_drawing = self._drawing # pt.pml_background = attrs.background # pt.pml_bgstory = self._bgstory c.templateList[name] = pt c.template = None c.frameList = [] c.frameStaticList = [] class pisaTagPDFFONT(pisaTag): """ <pdf:fontembed name="" src="" /> """ def start(self, c): deprecation("pdf:font") c.loadFont(self.attr.name, self.attr.src, self.attr.encoding) class pisaTagPDFBARCODE(pisaTag): _codeName = { "I2OF5": "I2of5", "ITF": "I2of5", "CODE39": "Standard39", "EXTENDEDCODE39": "Extended39", "CODE93": "Standard93", "EXTENDEDCODE93": "Extended93", "MSI": "MSI", "CODABAR": "Codabar", "NW7": "Codabar", "CODE11": "Code11", "FIM": "FIM", "POSTNET": "POSTNET", "USPS4S": "USPS_4State", "CODE128": "Code128", "EAN13": "EAN13", "EAN8": "EAN8", } class _barcodeWrapper(Flowable): """Wrapper for barcode widget """ def __init__(self, codeName="Code128", value="", **kw): self.widget = createBarcodeDrawing(codeName, value=value, **kw) def draw(self, canvas, xoffset=0, **kw): # NOTE: `canvas' is mutable, so canvas.restoreState() is a MUST. canvas.saveState() canvas.translate(xoffset, 0) self.widget.canv = canvas self.widget.draw() canvas.restoreState() def wrap(self, aW, aH): return self.widget.wrap(aW, aH) def start(self, c): attr = self.attr codeName = attr.type or "Code128" codeName = pisaTagPDFBARCODE._codeName[codeName.upper().replace("-", "")] humanReadable = bool(attr.humanreadable) barWidth = attr.barwidth or 0.01*inch barHeight = attr.barheight or 0.5*inch fontName = c.getFontName("OCRB10,OCR-B,OCR B,OCRB") # or "Helvetica" fontSize = 2.75*mm # Assure minimal size. if codeName in ("EAN13", "EAN8"): barWidth = max(barWidth, 0.264*mm) fontSize = max(fontSize, 2.75*mm) else: # Code39 etc. barWidth = max(barWidth, 0.0075*inch) #barHeight = max(barHeight, 25.93*mm) barcode = pisaTagPDFBARCODE._barcodeWrapper( codeName=codeName, value=attr.value, barWidth=barWidth, barHeight=barHeight, humanReadable=humanReadable, fontName=fontName, fontSize=fontSize, ) width, height = barcode.wrap(c.frag.width, c.frag.height) #barcode.spaceBefore = c.frag.spaceBefore #barcode.spaceAfter = c.frag.spaceAfter c.force = True valign = attr.align or c.frag.vAlign or "baseline" if valign in ["texttop"]: valign = "top" elif valign in ["absmiddle"]: valign = "middle" elif valign in ["absbottom", "baseline"]: valign = "bottom" afrag = c.frag.clone() afrag.text = "" afrag.fontName = fontName afrag.cbDefn = ABag( kind="barcode", barcode=barcode, width=width, height=height, valign=valign, ) c.fragList.append(afrag)
from abc import ABC, abstractmethod from collections import OrderedDict from collections.abc import MutableSequence from copy import deepcopy import numpy as np from .checkvalue import check_type class Region(ABC): """Region of space that can be assigned to a cell. Region is an abstract base class that is inherited by :class:`openmc.Halfspace`, :class:`openmc.Intersection`, :class:`openmc.Union`, and :class:`openmc.Complement`. Each of those respective classes are typically not instantiated directly but rather are created through operators of the Surface and Region classes. """ def __and__(self, other): return Intersection((self, other)) def __or__(self, other): return Union((self, other)) def __invert__(self): return Complement(self) @abstractmethod def __contains__(self, point): pass @abstractmethod def __str__(self): pass def __eq__(self, other): if not isinstance(other, type(self)): return False else: return str(self) == str(other) def get_surfaces(self, surfaces=None): """Recursively find all surfaces referenced by a region and return them Parameters ---------- surfaces: collections.OrderedDict, optional Dictionary mapping surface IDs to :class:`openmc.Surface` instances Returns ------- surfaces: collections.OrderedDict Dictionary mapping surface IDs to :class:`openmc.Surface` instances """ if surfaces is None: surfaces = OrderedDict() for region in self: surfaces = region.get_surfaces(surfaces) return surfaces def remove_redundant_surfaces(self, redundant_surfaces): """Recursively remove all redundant surfaces referenced by this region .. versionadded:: 0.12 Parameters ---------- redundant_surfaces : dict Dictionary mapping redundant surface IDs to class:`openmc.Surface` instances that should replace them. """ for region in self: region.remove_redundant_surfaces(redundant_surfaces) @staticmethod def from_expression(expression, surfaces): """Generate a region given an infix expression. Parameters ---------- expression : str Boolean expression relating surface half-spaces. The possible operators are union '|', intersection ' ', and complement '~'. For example, '(1 -2) | 3 ~(4 -5)'. surfaces : dict Dictionary whose keys are suface IDs that appear in the Boolean expression and whose values are Surface objects. """ # Strip leading and trailing whitespace expression = expression.strip() # Convert the string expression into a list of tokens, i.e., operators # and surface half-spaces, representing the expression in infix # notation. i = 0 i_start = -1 tokens = [] while i < len(expression): if expression[i] in '()|~ ': # If special character appears immediately after a non-operator, # create a token with the apporpriate half-space if i_start >= 0: j = int(expression[i_start:i]) if j < 0: tokens.append(-surfaces[abs(j)]) else: tokens.append(+surfaces[abs(j)]) if expression[i] in '()|~': # For everything other than intersection, add the operator # to the list of tokens tokens.append(expression[i]) else: # Find next non-space character while expression[i+1] == ' ': i += 1 # If previous token is a halfspace or right parenthesis and next token # is not a left parenthese or union operator, that implies that the # whitespace is to be interpreted as an intersection operator if (i_start >= 0 or tokens[-1] == ')') and \ expression[i+1] not in ')|': tokens.append(' ') i_start = -1 else: # Check for invalid characters if expression[i] not in '-+0123456789': raise SyntaxError(f"Invalid character '{expression[i]}' in " "expression") # If we haven't yet reached the start of a word, start one if i_start < 0: i_start = i i += 1 # If we've reached the end and we're still in a word, create a # half-space token and add it to the list if i_start >= 0: j = int(expression[i_start:]) if j < 0: tokens.append(-surfaces[abs(j)]) else: tokens.append(+surfaces[abs(j)]) # The functions below are used to apply an operator to operands on the # output queue during the shunting yard algorithm. def can_be_combined(region): return isinstance(region, Complement) or hasattr(region, 'surface') def apply_operator(output, operator): r2 = output.pop() if operator == ' ': r1 = output.pop() if isinstance(r1, Intersection): r1 &= r2 output.append(r1) elif isinstance(r2, Intersection) and can_be_combined(r1): r2.insert(0, r1) output.append(r2) else: output.append(r1 & r2) elif operator == '|': r1 = output.pop() if isinstance(r1, Union): r1 |= r2 output.append(r1) elif isinstance(r2, Union) and can_be_combined(r1): r2.insert(0, r1) output.append(r2) else: output.append(r1 | r2) elif operator == '~': output.append(~r2) # The following is an implementation of the shunting yard algorithm to # generate an abstract syntax tree for the region expression. output = [] stack = [] precedence = {'|': 1, ' ': 2, '~': 3} associativity = {'|': 'left', ' ': 'left', '~': 'right'} for token in tokens: if token in (' ', '|', '~'): # Normal operators while stack: op = stack[-1] if (op not in ('(', ')') and ((associativity[token] == 'right' and precedence[token] < precedence[op]) or (associativity[token] == 'left' and precedence[token] <= precedence[op]))): apply_operator(output, stack.pop()) else: break stack.append(token) elif token == '(': # Left parentheses stack.append(token) elif token == ')': # Right parentheses while stack[-1] != '(': apply_operator(output, stack.pop()) if len(stack) == 0: raise SyntaxError('Mismatched parentheses in ' 'region specification.') stack.pop() else: # Surface halfspaces output.append(token) while stack: if stack[-1] in '()': raise SyntaxError('Mismatched parentheses in region ' 'specification.') apply_operator(output, stack.pop()) # Since we are generating an abstract syntax tree rather than a reverse # Polish notation expression, the output queue should have a single item # at the end return output[0] def clone(self, memo=None): """Create a copy of this region - each of the surfaces in the region's nodes will be cloned and will have new unique IDs. Parameters ---------- memo : dict or None A nested dictionary of previously cloned objects. This parameter is used internally and should not be specified by the user. Returns ------- clone : openmc.Region The clone of this region """ if memo is None: memo = {} clone = deepcopy(self) clone[:] = [n.clone(memo) for n in self] return clone def translate(self, vector, memo=None): """Translate region in given direction Parameters ---------- vector : iterable of float Direction in which region should be translated memo : dict or None Dictionary used for memoization. This parameter is used internally and should not be specified by the user. Returns ------- openmc.Region Translated region """ if memo is None: memo = {} return type(self)(n.translate(vector, memo) for n in self) def rotate(self, rotation, pivot=(0., 0., 0.), order='xyz', inplace=False, memo=None): r"""Rotate surface by angles provided or by applying matrix directly. .. versionadded:: 0.12 Parameters ---------- rotation : 3-tuple of float, or 3x3 iterable A 3-tuple of angles :math:`(\phi, \theta, \psi)` in degrees where the first element is the rotation about the x-axis in the fixed laboratory frame, the second element is the rotation about the y-axis in the fixed laboratory frame, and the third element is the rotation about the z-axis in the fixed laboratory frame. The rotations are active rotations. Additionally a 3x3 rotation matrix can be specified directly either as a nested iterable or array. pivot : iterable of float, optional (x, y, z) coordinates for the point to rotate about. Defaults to (0., 0., 0.) order : str, optional A string of 'x', 'y', and 'z' in some order specifying which rotation to perform first, second, and third. Defaults to 'xyz' which means, the rotation by angle :math:`\phi` about x will be applied first, followed by :math:`\theta` about y and then :math:`\psi` about z. This corresponds to an x-y-z extrinsic rotation as well as a z-y'-x'' intrinsic rotation using Tait-Bryan angles :math:`(\phi, \theta, \psi)`. inplace : boolean Whether or not to return a new instance of Surface or to modify the coefficients of this Surface in place. Defaults to False. memo : dict or None Dictionary used for memoization Returns ------- openmc.Region Translated region """ if memo is None: memo = {} return type(self)(n.rotate(rotation, pivot=pivot, order=order, inplace=inplace, memo=memo) for n in self) class Intersection(Region, MutableSequence): r"""Intersection of two or more regions. Instances of Intersection are generally created via the & operator applied to two instances of :class:`openmc.Region`. This is illustrated in the following example: >>> equator = openmc.ZPlane(z0=0.0) >>> earth = openmc.Sphere(r=637.1e6) >>> northern_hemisphere = -earth & +equator >>> southern_hemisphere = -earth & -equator >>> type(northern_hemisphere) <class 'openmc.region.Intersection'> Instances of this class behave like a mutable sequence, e.g., they can be indexed and have an append() method. Parameters ---------- nodes : iterable of openmc.Region Regions to take the intersection of Attributes ---------- bounding_box : tuple of numpy.array Lower-left and upper-right coordinates of an axis-aligned bounding box """ def __init__(self, nodes): self._nodes = list(nodes) def __and__(self, other): new = Intersection(self) new &= other return new def __iand__(self, other): if isinstance(other, Intersection): self.extend(other) else: self.append(other) return self # Implement mutable sequence protocol by delegating to list def __getitem__(self, key): return self._nodes[key] def __setitem__(self, key, value): self._nodes[key] = value def __delitem__(self, key): del self._nodes[key] def __len__(self): return len(self._nodes) def insert(self, index, value): self._nodes.insert(index, value) def __contains__(self, point): """Check whether a point is contained in the region. Parameters ---------- point : 3-tuple of float Cartesian coordinates, :math:`(x',y',z')`, of the point Returns ------- bool Whether the point is in the region """ return all(point in n for n in self) def __str__(self): return '(' + ' '.join(map(str, self)) + ')' @property def bounding_box(self): lower_left = np.array([-np.inf, -np.inf, -np.inf]) upper_right = np.array([np.inf, np.inf, np.inf]) for n in self: lower_left_n, upper_right_n = n.bounding_box lower_left[:] = np.maximum(lower_left, lower_left_n) upper_right[:] = np.minimum(upper_right, upper_right_n) return lower_left, upper_right class Union(Region, MutableSequence): r"""Union of two or more regions. Instances of Union are generally created via the | operator applied to two instances of :class:`openmc.Region`. This is illustrated in the following example: >>> s1 = openmc.ZPlane(z0=0.0) >>> s2 = openmc.Sphere(r=637.1e6) >>> type(-s2 | +s1) <class 'openmc.region.Union'> Instances of this class behave like a mutable sequence, e.g., they can be indexed and have an append() method. Parameters ---------- nodes : iterable of openmc.Region Regions to take the union of Attributes ---------- bounding_box : 2-tuple of numpy.array Lower-left and upper-right coordinates of an axis-aligned bounding box """ def __init__(self, nodes): self._nodes = list(nodes) def __or__(self, other): new = Union(self) new |= other return new def __ior__(self, other): if isinstance(other, Union): self.extend(other) else: self.append(other) return self # Implement mutable sequence protocol by delegating to list def __getitem__(self, key): return self._nodes[key] def __setitem__(self, key, value): self._nodes[key] = value def __delitem__(self, key): del self._nodes[key] def __len__(self): return len(self._nodes) def insert(self, index, value): self._nodes.insert(index, value) def __contains__(self, point): """Check whether a point is contained in the region. Parameters ---------- point : 3-tuple of float Cartesian coordinates, :math:`(x',y',z')`, of the point Returns ------- bool Whether the point is in the region """ return any(point in n for n in self) def __str__(self): return '(' + ' | '.join(map(str, self)) + ')' @property def bounding_box(self): lower_left = np.array([np.inf, np.inf, np.inf]) upper_right = np.array([-np.inf, -np.inf, -np.inf]) for n in self: lower_left_n, upper_right_n = n.bounding_box lower_left[:] = np.minimum(lower_left, lower_left_n) upper_right[:] = np.maximum(upper_right, upper_right_n) return lower_left, upper_right class Complement(Region): """Complement of a region. The Complement of an existing :class:`openmc.Region` can be created by using the ~ operator as the following example demonstrates: >>> xl = openmc.XPlane(-10.0) >>> xr = openmc.XPlane(10.0) >>> yl = openmc.YPlane(-10.0) >>> yr = openmc.YPlane(10.0) >>> inside_box = +xl & -xr & +yl & -yr >>> outside_box = ~inside_box >>> type(outside_box) <class 'openmc.region.Complement'> Parameters ---------- node : openmc.Region Region to take the complement of Attributes ---------- node : openmc.Region Regions to take the complement of bounding_box : tuple of numpy.array Lower-left and upper-right coordinates of an axis-aligned bounding box """ def __init__(self, node): self.node = node def __contains__(self, point): """Check whether a point is contained in the region. Parameters ---------- point : 3-tuple of float Cartesian coordinates, :math:`(x',y',z')`, of the point Returns ------- bool Whether the point is in the region """ return point not in self.node def __str__(self): return '~' + str(self.node) @property def node(self): return self._node @node.setter def node(self, node): check_type('node', node, Region) self._node = node @property def bounding_box(self): # Use De Morgan's laws to distribute the complement operator so that it # only applies to surface half-spaces, thus allowing us to calculate the # bounding box in the usual recursive manner. if isinstance(self.node, Union): temp_region = Intersection(~n for n in self.node) elif isinstance(self.node, Intersection): temp_region = Union(~n for n in self.node) elif isinstance(self.node, Complement): temp_region = self.node.node else: temp_region = ~self.node return temp_region.bounding_box def get_surfaces(self, surfaces=None): """Recursively find and return all the surfaces referenced by the node Parameters ---------- surfaces: collections.OrderedDict, optional Dictionary mapping surface IDs to :class:`openmc.Surface` instances Returns ------- surfaces: collections.OrderedDict Dictionary mapping surface IDs to :class:`openmc.Surface` instances """ if surfaces is None: surfaces = OrderedDict() for region in self.node: surfaces = region.get_surfaces(surfaces) return surfaces def remove_redundant_surfaces(self, redundant_surfaces): """Recursively remove all redundant surfaces referenced by this region .. versionadded:: 0.12 Parameters ---------- redundant_surfaces : dict Dictionary mapping redundant surface IDs to class:`openmc.Surface` instances that should replace them. """ for region in self.node: region.remove_redundant_surfaces(redundant_surfaces) def clone(self, memo=None): if memo is None: memo = {} clone = deepcopy(self) clone.node = self.node.clone(memo) return clone def translate(self, vector, memo=None): if memo is None: memo = {} return type(self)(self.node.translate(vector, memo)) def rotate(self, rotation, pivot=(0., 0., 0.), order='xyz', inplace=False, memo=None): if memo is None: memo = {} return type(self)(self.node.rotate(rotation, pivot=pivot, order=order, inplace=inplace, memo=memo))
r""" Nonlinear solvers ================= .. currentmodule:: scipy.optimize This is a collection of general-purpose nonlinear multidimensional solvers. These solvers find *x* for which *F(x) = 0*. Both *x* and *F* can be multidimensional. Routines -------- Large-scale nonlinear solvers: .. autosummary:: newton_krylov anderson General nonlinear solvers: .. autosummary:: broyden1 broyden2 Simple iterations: .. autosummary:: excitingmixing linearmixing diagbroyden Examples ======== Small problem ------------- >>> def F(x): ... return np.cos(x) + x[::-1] - [1, 2, 3, 4] >>> import scipy.optimize >>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14) >>> x array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251]) >>> np.cos(x) + x[::-1] array([ 1., 2., 3., 4.]) Large problem ------------- Suppose that we needed to solve the following integrodifferential equation on the square :math:`[0,1]\times[0,1]`: .. math:: \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of the square. The solution can be found using the `newton_krylov` solver: .. plot:: import numpy as np from scipy.optimize import newton_krylov from numpy import cosh, zeros_like, mgrid, zeros # parameters nx, ny = 75, 75 hx, hy = 1./(nx-1), 1./(ny-1) P_left, P_right = 0, 0 P_top, P_bottom = 1, 0 def residual(P): d2x = zeros_like(P) d2y = zeros_like(P) d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy return d2x + d2y - 10*cosh(P).mean()**2 # solve guess = zeros((nx, ny), float) sol = newton_krylov(residual, guess, method='lgmres', verbose=1) print 'Residual', abs(residual(sol)).max() # visualize import matplotlib.pyplot as plt x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] plt.pcolor(x, y, sol) plt.colorbar() plt.show() """ # Copyright (C) 2009, Pauli Virtanen <[email protected]> # Distributed under the same license as Scipy. import sys import numpy as np from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError from numpy import asarray, dot, vdot if sys.platform != 'cli': import scipy.sparse.linalg import scipy.sparse import scipy.lib.blas as blas import inspect else: print "Warning: scipy.optimize.nonlin package is not supported under IronPython yet." from linesearch import scalar_search_wolfe1, scalar_search_armijo __all__ = [ 'broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'newton_krylov', # Deprecated functions: 'broyden_generalized', 'anderson2', 'broyden3'] #------------------------------------------------------------------------------ # Utility functions #------------------------------------------------------------------------------ class NoConvergence(Exception): pass def maxnorm(x): return np.absolute(x).max() def _as_inexact(x): """Return `x` as an array, of either floats or complex floats""" x = asarray(x) if not np.issubdtype(x.dtype, np.inexact): return asarray(x, dtype=np.float_) return x def _array_like(x, x0): """Return ndarray `x` as same array subclass and shape as `x0`""" x = np.reshape(x, np.shape(x0)) wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) return wrap(x) def _safe_norm(v): if not np.isfinite(v).all(): return np.array(np.inf) return norm(v) #------------------------------------------------------------------------------ # Generic nonlinear solver machinery #------------------------------------------------------------------------------ _doc_parts = dict( params_basic=""" F : function(x) -> f Function whose root to find; should take and return an array-like object. x0 : array-like Initial guess for the solution """.strip(), params_extra=""" iter : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. verbose : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. f_tol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. f_rtol : float, optional Relative tolerance for the residual. If omitted, not used. x_tol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. x_rtol : float, optional Relative minimum step size. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. callback : function, optional Optional callback function. It is called on every iteration as ``callback(x, f)`` where `x` is the current solution and `f` the corresponding residual. Returns ------- sol : array-like An array (of similar array type as `x0`) containing the final solution. Raises ------ NoConvergence When a solution was not found. """.strip() ) def _set_doc(obj): if obj.__doc__: obj.__doc__ = obj.__doc__ % _doc_parts def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None): """ Find a root of a function, in a way suitable for large-scale problems. Parameters ---------- %(params_basic)s jacobian : Jacobian A Jacobian approximation: `Jacobian` object or something that `asjacobian` can transform to one. Alternatively, a string specifying which of the builtin Jacobian approximations to use: krylov, broyden1, broyden2, anderson diagbroyden, linearmixing, excitingmixing %(params_extra)s See Also -------- asjacobian, Jacobian Notes ----- This algorithm implements the inexact Newton method, with backtracking or full line searches. Several Jacobian approximations are available, including Krylov and Quasi-Newton methods. References ---------- .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear Equations\". Society for Industrial and Applied Mathematics. (1995) http://www.siam.org/books/kelley/ """ condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, x_tol=x_tol, x_rtol=x_rtol, iter=iter, norm=tol_norm) x0 = _as_inexact(x0) func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten() x = x0.flatten() dx = np.inf Fx = func(x) Fx_norm = norm(Fx) jacobian = asjacobian(jacobian) jacobian.setup(x.copy(), Fx, func) if maxiter is None: if iter is not None: maxiter = iter + 1 else: maxiter = 100*(x.size+1) if line_search is True: line_search = 'armijo' elif line_search is False: line_search = None if line_search not in (None, 'armijo', 'wolfe'): raise ValueError("Invalid line search") # Solver tolerance selection gamma = 0.9 eta_max = 0.9999 eta_treshold = 0.1 eta = 1e-3 for n in xrange(maxiter): if condition.check(Fx, x, dx): break # The tolerance, as computed for scipy.sparse.linalg.* routines tol = min(eta, eta*Fx_norm) dx = -jacobian.solve(Fx, tol=tol) if norm(dx) == 0: raise ValueError("Jacobian inversion yielded zero vector. " "This indicates a bug in the Jacobian " "approximation.") # Line search, or Newton step if line_search: s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, line_search) else: s = 1.0 x += dx Fx = func(x) Fx_norm_new = norm(Fx) jacobian.update(x.copy(), Fx) if callback: callback(x, Fx) # Adjust forcing parameters for inexact methods eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 if gamma * eta**2 < eta_treshold: eta = min(eta_max, eta_A) else: eta = min(eta_max, max(eta_A, gamma*eta**2)) Fx_norm = Fx_norm_new # Print status if verbose: sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % ( n, norm(Fx), s, eta)) sys.stdout.flush() else: raise NoConvergence(_array_like(x, x0)) return _array_like(x, x0) _set_doc(nonlin_solve) def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, smin=1e-2): tmp_s = [0] tmp_Fx = [Fx] tmp_phi = [norm(Fx)**2] s_norm = norm(x) / norm(dx) def phi(s, store=True): if s == tmp_s[0]: return tmp_phi[0] xt = x + s*dx v = func(xt) p = _safe_norm(v)**2 if store: tmp_s[0] = s tmp_phi[0] = p tmp_Fx[0] = v return p def derphi(s): ds = (abs(s) + s_norm + 1) * rdiff return (phi(s+ds, store=False) - phi(s)) / ds if search_type == 'wolfe': s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], xtol=1e-2, amin=smin) elif search_type == 'armijo': s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=smin) if s is None: # XXX: No suitable step length found. Take the full Newton step, # and hope for the best. s = 1.0 x = x + s*dx if s == tmp_s[0]: Fx = tmp_Fx[0] else: Fx = func(x) Fx_norm = norm(Fx) return s, x, Fx, Fx_norm class TerminationCondition(object): """ Termination condition for an iteration. It is terminated if - |F| < f_rtol*|F_0|, AND - |F| < f_tol AND - |dx| < x_rtol*|x|, AND - |dx| < x_tol """ def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, iter=None, norm=maxnorm): if f_tol is None: f_tol = np.finfo(np.float_).eps ** (1./3) if f_rtol is None: f_rtol = np.inf if x_tol is None: x_tol = np.inf if x_rtol is None: x_rtol = np.inf self.x_tol = x_tol self.x_rtol = x_rtol self.f_tol = f_tol self.f_rtol = f_rtol self.norm = maxnorm self.iter = iter self.f0_norm = None self.iteration = 0 def check(self, f, x, dx): self.iteration += 1 f_norm = self.norm(f) x_norm = self.norm(x) dx_norm = self.norm(dx) if self.f0_norm is None: self.f0_norm = f_norm if f_norm == 0: return True if self.iter is not None: # backwards compatibility with Scipy 0.6.0 return self.iteration > self.iter # NB: condition must succeed for rtol=inf even if norm == 0 return ((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm) and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm)) #------------------------------------------------------------------------------ # Generic Jacobian approximation #------------------------------------------------------------------------------ class Jacobian(object): """ Common interface for Jacobians or Jacobian approximations. The optional methods come useful when implementing trust region etc. algorithms that often require evaluating transposes of the Jacobian. Methods ------- solve Returns J^-1 * v update Updates Jacobian to point `x` (where the function has residual `Fx`) matvec : optional Returns J * v rmatvec : optional Returns A^H * v rsolve : optional Returns A^-H * v matmat : optional Returns A * V, where V is a dense matrix with dimensions (N,K). todense : optional Form the dense Jacobian matrix. Necessary for dense trust region algorithms, and useful for testing. Attributes ---------- shape Matrix dimensions (M, N) dtype Data type of the matrix. func : callable, optional Function the Jacobian corresponds to """ def __init__(self, **kw): names = ["solve", "update", "matvec", "rmatvec", "rsolve", "matmat", "todense", "shape", "dtype"] for name, value in kw.items(): if name not in names: raise ValueError("Unknown keyword argument %s" % name) if value is not None: setattr(self, name, kw[name]) if hasattr(self, 'todense'): self.__array__ = lambda: self.todense() def aspreconditioner(self): return InverseJacobian(self) def solve(self, v, tol=0): raise NotImplementedError def update(self, x, F): pass def setup(self, x, F, func): self.func = func self.shape = (F.size, x.size) self.dtype = F.dtype if self.__class__.setup is Jacobian.setup: # Call on the first point unless overridden self.update(self, x, F) class InverseJacobian(object): def __init__(self, jacobian): self.jacobian = jacobian self.matvec = jacobian.solve self.update = jacobian.update if hasattr(jacobian, 'setup'): self.setup = jacobian.setup if hasattr(jacobian, 'rsolve'): self.rmatvec = jacobian.rsolve @property def shape(self): return self.jacobian.shape @property def dtype(self): return self.jacobian.dtype def asjacobian(J): """ Convert given object to one suitable for use as a Jacobian. """ spsolve = scipy.sparse.linalg.spsolve if isinstance(J, Jacobian): return J elif inspect.isclass(J) and issubclass(J, Jacobian): return J() elif isinstance(J, np.ndarray): if J.ndim > 2: raise ValueError('array must have rank <= 2') J = np.atleast_2d(np.asarray(J)) if J.shape[0] != J.shape[1]: raise ValueError('array must be square') return Jacobian(matvec=lambda v: dot(J, v), rmatvec=lambda v: dot(J.conj().T, v), solve=lambda v: solve(J, v), rsolve=lambda v: solve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif scipy.sparse.isspmatrix(J): if J.shape[0] != J.shape[1]: raise ValueError('matrix must be square') return Jacobian(matvec=lambda v: J*v, rmatvec=lambda v: J.conj().T * v, solve=lambda v: spsolve(J, v), rsolve=lambda v: spsolve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): return Jacobian(matvec=getattr(J, 'matvec'), rmatvec=getattr(J, 'rmatvec'), solve=J.solve, rsolve=getattr(J, 'rsolve'), update=getattr(J, 'update'), setup=getattr(J, 'setup'), dtype=J.dtype, shape=J.shape) elif callable(J): # Assume it's a function J(x) that returns the Jacobian class Jac(Jacobian): def update(self, x, F): self.x = x def solve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m, v) elif scipy.sparse.isspmatrix(m): return spsolve(m, v) else: raise ValueError("Unknown matrix type") def matvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m, v) elif scipy.sparse.isspmatrix(m): return m*v else: raise ValueError("Unknown matrix type") def rsolve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return spsolve(m.conj().T, v) else: raise ValueError("Unknown matrix type") def rmatvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return m.conj().T * v else: raise ValueError("Unknown matrix type") return Jac() elif isinstance(J, str): return dict(broyden1=BroydenFirst, broyden2=BroydenSecond, anderson=Anderson, diagbroyden=DiagBroyden, linearmixing=LinearMixing, excitingmixing=ExcitingMixing, krylov=KrylovJacobian)[J]() else: raise TypeError('Cannot convert object to a Jacobian') #------------------------------------------------------------------------------ # Broyden #------------------------------------------------------------------------------ class GenericBroyden(Jacobian): def setup(self, x0, f0, func): Jacobian.setup(self, x0, f0, func) self.last_f = f0 self.last_x = x0 if hasattr(self, 'alpha') and self.alpha is None: # autoscale the initial Jacobian parameter self.alpha = 0.5*max(norm(x0), 1) / norm(f0) def _update(self, x, f, dx, df, dx_norm, df_norm): raise NotImplementedError def update(self, x, f): df = f - self.last_f dx = x - self.last_x self._update(x, f, dx, df, norm(dx), norm(df)) self.last_f = f self.last_x = x class LowRankMatrix(object): r""" A matrix represented as .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger However, if the rank of the matrix reaches the dimension of the vectors, full matrix representation will be used thereon. """ def __init__(self, alpha, n, dtype): self.alpha = alpha self.cs = [] self.ds = [] self.n = n self.dtype = dtype self.collapsed = None @staticmethod def _matvec(v, alpha, cs, ds): axpy, scal, dotc = blas.get_blas_funcs(['axpy', 'scal', 'dotc'], cs[:1] + [v]) w = alpha * v for c, d in zip(cs, ds): a = dotc(d, v) w = axpy(c, w, w.size, a) return w @staticmethod def _solve(v, alpha, cs, ds): """Evaluate w = M^-1 v""" if len(cs) == 0: return v/alpha # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 axpy, dotc = blas.get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) c0 = cs[0] A = alpha * np.identity(len(cs), dtype=c0.dtype) for i, d in enumerate(ds): for j, c in enumerate(cs): A[i,j] += dotc(d, c) q = np.zeros(len(cs), dtype=c0.dtype) for j, d in enumerate(ds): q[j] = dotc(d, v) q /= alpha q = solve(A, q) w = v/alpha for c, qc in zip(cs, q): w = axpy(c, w, w.size, -qc) return w def matvec(self, v): """Evaluate w = M v""" if self.collapsed is not None: return np.dot(self.collapsed, v) return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) def rmatvec(self, v): """Evaluate w = M^H v""" if self.collapsed is not None: return np.dot(self.collapsed.T.conj(), v) return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) def solve(self, v, tol=0): """Evaluate w = M^-1 v""" if self.collapsed is not None: return solve(self.collapsed, v) return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) def rsolve(self, v, tol=0): """Evaluate w = M^-H v""" if self.collapsed is not None: return solve(self.collapsed.T.conj(), v) return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) def append(self, c, d): if self.collapsed is not None: self.collapsed += c[:,None] * d[None,:].conj() return self.cs.append(c) self.ds.append(d) if len(self.cs) > c.size: self.collapse() def __array__(self): if self.collapsed is not None: return self.collapsed Gm = self.alpha*np.identity(self.n, dtype=self.dtype) for c, d in zip(self.cs, self.ds): Gm += c[:,None]*d[None,:].conj() return Gm def collapse(self): """Collapse the low-rank matrix to a full-rank one.""" self.collapsed = np.array(self) self.cs = None self.ds = None self.alpha = None def restart_reduce(self, rank): """ Reduce the rank of the matrix by dropping all vectors. """ if self.collapsed is not None: return assert rank > 0 if len(self.cs) > rank: del self.cs[:] del self.ds[:] def simple_reduce(self, rank): """ Reduce the rank of the matrix by dropping oldest vectors. """ if self.collapsed is not None: return assert rank > 0 while len(self.cs) > rank: del self.cs[0] del self.ds[0] def svd_reduce(self, max_rank, to_retain=None): """ Reduce the rank of the matrix by retaining some SVD components. This corresponds to the \"Broyden Rank Reduction Inverse\" algorithm described in [vR]_. Note that the SVD decomposition can be done by solving only a problem whose size is the effective rank of this matrix, which is viable even for large problems. Parameters ---------- max_rank : int Maximum rank of this matrix after reduction. to_retain : int, optional Number of SVD components to retain when reduction is done (ie. rank > max_rank). Default is ``max_rank - 2``. References ---------- .. [vR] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ if self.collapsed is not None: return p = max_rank if to_retain is not None: q = to_retain else: q = p - 2 if self.cs: p = min(p, len(self.cs[0])) q = max(0, min(q, p-1)) m = len(self.cs) if m < p: # nothing to do return C = np.array(self.cs).T D = np.array(self.ds).T D, R = qr(D, mode='qr', econ=True) C = dot(C, R.T.conj()) U, S, WH = svd(C, full_matrices=False, compute_uv=True) C = dot(C, inv(WH)) D = dot(D, WH.T.conj()) for k in xrange(q): self.cs[k] = C[:,k].copy() self.ds[k] = D[:,k].copy() del self.cs[q:] del self.ds[q:] _doc_parts['broyden_params'] = """ alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart``: drop all matrix columns. Has no extra parameters. - ``simple``: drop oldest matrix column. Has no extra parameters. - ``svd``: keep only the most significant SVD components. Extra parameters: - ``to_retain`: number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). """.strip() class BroydenFirst(GenericBroyden): r""" Find a root of a function, using Broyden's first Jacobian approximation. This method is also known as \"Broyden's good method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) which corresponds to Broyden's first Jacobian update .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx References ---------- .. [vR] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ def __init__(self, alpha=None, reduction_method='restart', max_rank=None): GenericBroyden.__init__(self) self.alpha = alpha self.Gm = None if max_rank is None: max_rank = np.inf self.max_rank = max_rank if isinstance(reduction_method, str): reduce_params = () else: reduce_params = reduction_method[1:] reduction_method = reduction_method[0] reduce_params = (max_rank - 1,) + reduce_params if reduction_method == 'svd': self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) elif reduction_method == 'simple': self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) elif reduction_method == 'restart': self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) else: raise ValueError("Unknown rank reduction method '%s'" % reduction_method) def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) def todense(self): return inv(self.Gm) def solve(self, f, tol=0): r = self.Gm.matvec(f) if not np.isfinite(r).all(): # singular; reset the Jacobian approximation self.setup(self.last_x, self.last_f, self.func) return self.Gm.matvec(f) def matvec(self, f): return self.Gm.solve(f) def rsolve(self, f, tol=0): return self.Gm.rmatvec(f) def rmatvec(self, f): return self.Gm.rsolve(f) def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = self.Gm.rmatvec(dx) c = dx - self.Gm.matvec(df) d = v / vdot(df, v) self.Gm.append(c, d) class BroydenSecond(BroydenFirst): """ Find a root of a function, using Broyden\'s second Jacobian approximation. This method is also known as \"Broyden's bad method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df) corresponding to Broyden's second method. References ---------- .. [vR] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = df c = dx - self.Gm.matvec(df) d = v / df_norm**2 self.Gm.append(c, d) #------------------------------------------------------------------------------ # Broyden-like (restricted memory) #------------------------------------------------------------------------------ class Anderson(GenericBroyden): """ Find a root of a function, using (extended) Anderson mixing. The Jacobian is formed by for a 'best' solution in the space spanned by last `M` vectors. As a result, only a MxM matrix inversions and MxN multiplications are required. [Ey]_ Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01. %(params_extra)s References ---------- .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). """ # Note: # # Anderson method maintains a rank M approximation of the inverse Jacobian, # # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v # A = W + dF^H dF # W = w0^2 diag(dF^H dF) # # so that for w0 = 0 the secant condition applies for last M iterates, ie., # # J^-1 df_j = dx_j # # for all j = 0 ... M-1. # # Moreover, (from Sherman-Morrison-Woodbury formula) # # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v # C = (dX + alpha dF) A^-1 # b = -1/alpha # # and after simplification # # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v # def __init__(self, alpha=None, w0=0.01, M=5): GenericBroyden.__init__(self) self.alpha = alpha self.M = M self.dx = [] self.df = [] self.gamma = None self.w0 = w0 def solve(self, f, tol=0): dx = -self.alpha*f n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in xrange(n): df_f[k] = vdot(self.df[k], f) try: gamma = solve(self.a, df_f) except LinAlgError: # singular; reset the Jacobian approximation del self.dx[:] del self.df[:] return dx for m in xrange(n): dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) return dx def matvec(self, f): dx = -f/self.alpha n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in xrange(n): df_f[k] = vdot(self.df[k], f) b = np.empty((n, n), dtype=f.dtype) for i in xrange(n): for j in xrange(n): b[i,j] = vdot(self.df[i], self.dx[j]) if i == j and self.w0 != 0: b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha gamma = solve(b, df_f) for m in xrange(n): dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) return dx def _update(self, x, f, dx, df, dx_norm, df_norm): if self.M == 0: return self.dx.append(dx) self.df.append(df) while len(self.dx) > self.M: self.dx.pop(0) self.df.pop(0) n = len(self.dx) a = np.zeros((n, n), dtype=f.dtype) for i in xrange(n): for j in xrange(i, n): if i == j: wd = self.w0**2 else: wd = 0 a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) a += np.triu(a, 1).T.conj() self.a = a #------------------------------------------------------------------------------ # Simple iterations #------------------------------------------------------------------------------ class DiagBroyden(GenericBroyden): """ Find a root of a function, using diagonal Broyden Jacobian approximation. The Jacobian approximation is derived from previous iterations, by retaining only the diagonal of Broyden matrices. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). %(params_extra)s """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha def solve(self, f, tol=0): return -f / self.d def matvec(self, f): return -f * self.d def rsolve(self, f, tol=0): return -f / self.d.conj() def rmatvec(self, f): return -f * self.d.conj() def todense(self): return np.diag(-self.d) def _update(self, x, f, dx, df, dx_norm, df_norm): self.d -= (df + self.d*dx)*dx/dx_norm**2 class LinearMixing(GenericBroyden): """ Find a root of a function, using a scalar Jacobian approximation. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional The Jacobian approximation is (-1/alpha). %(params_extra)s """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def solve(self, f, tol=0): return -f*self.alpha def matvec(self, f): return -f/self.alpha def rsolve(self, f, tol=0): return -f*np.conj(self.alpha) def rmatvec(self, f): return -f/np.conj(self.alpha) def todense(self): return np.diag(-np.ones(self.shape[0])/self.alpha) def _update(self, x, f, dx, df, dx_norm, df_norm): pass class ExcitingMixing(GenericBroyden): """ Find a root of a function, using a tuned diagonal Jacobian approximation. The Jacobian matrix is diagonal and is tuned on each iteration. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial Jacobian approximation is (-1/alpha). alphamax : float, optional The entries of the diagonal Jacobian are kept in the range ``[alpha, alphamax]``. %(params_extra)s """ def __init__(self, alpha=None, alphamax=1.0): GenericBroyden.__init__(self) self.alpha = alpha self.alphamax = alphamax self.beta = None def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype) def solve(self, f, tol=0): return -f*self.beta def matvec(self, f): return -f/self.beta def rsolve(self, f, tol=0): return -f*self.beta.conj() def rmatvec(self, f): return -f/self.beta.conj() def todense(self): return np.diag(-1/self.beta) def _update(self, x, f, dx, df, dx_norm, df_norm): incr = f*self.last_f > 0 self.beta[incr] += self.alpha self.beta[~incr] = self.alpha np.clip(self.beta, 0, self.alphamax, out=self.beta) #------------------------------------------------------------------------------ # Iterative/Krylov approximated Jacobians #------------------------------------------------------------------------------ class KrylovJacobian(Jacobian): r""" Find a root of a function, using Krylov approximation for inverse Jacobian. This method is suitable for solving large-scale problems. Parameters ---------- %(params_basic)s rdiff : float, optional Relative step size to use in numerical differentiation. method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function Krylov method to use to approximate the Jacobian. Can be a string, or a function implementing the same interface as the iterative solvers in `scipy.sparse.linalg`. The default is `scipy.sparse.linalg.lgmres`. inner_M : LinearOperator or InverseJacobian Preconditioner for the inner Krylov iteration. Note that you can use also inverse Jacobians as (adaptive) preconditioners. For example, >>> jac = BroydenFirst() >>> kjac = KrylovJacobian(inner_M=jac.inverse). If the preconditioner has a method named 'update', it will be called as ``update(x, f)`` after each nonlinear step, with ``x`` giving the current point, and ``f`` the current function value. inner_tol, inner_maxiter, ... Parameters to pass on to the \"inner\" Krylov solver. See `scipy.sparse.linalg.gmres` for details. outer_k : int, optional Size of the subspace kept across LGMRES nonlinear iterations. See `scipy.sparse.linalg.lgmres` for details. %(params_extra)s See Also -------- scipy.sparse.linalg.gmres scipy.sparse.linalg.lgmres Notes ----- This function implements a Newton-Krylov solver. The basic idea is to compute the inverse of the Jacobian with an iterative Krylov method. These methods require only evaluating the Jacobian-vector products, which are conveniently approximated by numerical differentiation: .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega Due to the use of iterative matrix inverses, these methods can deal with large nonlinear problems. Scipy's `scipy.sparse.linalg` module offers a selection of Krylov solvers to choose from. The default here is `lgmres`, which is a variant of restarted GMRES iteration that reuses some of the information obtained in the previous Newton steps to invert Jacobians in subsequent steps. For a review on Newton-Krylov methods, see for example [KK]_, and for the LGMRES sparse inverse method, see [BJM]_. References ---------- .. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003). .. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel, SIAM J. Matrix Anal. Appl. 26, 962 (2005). """ def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, inner_M=None, outer_k=10, **kw): self.preconditioner = inner_M self.rdiff = rdiff self.method = dict( bicgstab=scipy.sparse.linalg.bicgstab, gmres=scipy.sparse.linalg.gmres, lgmres=scipy.sparse.linalg.lgmres, cgs=scipy.sparse.linalg.cgs, minres=scipy.sparse.linalg.minres, ).get(method, method) self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) if self.method is scipy.sparse.linalg.gmres: # Replace GMRES's outer iteration with Newton steps self.method_kw['restrt'] = inner_maxiter self.method_kw['maxiter'] = 1 elif self.method is scipy.sparse.linalg.lgmres: self.method_kw['outer_k'] = outer_k # Replace LGMRES's outer iteration with Newton steps self.method_kw['maxiter'] = 1 # Carry LGMRES's `outer_v` vectors across nonlinear iterations self.method_kw.setdefault('outer_v', []) # But don't carry the corresponding Jacobian*v products, in case # the Jacobian changes a lot in the nonlinear step # # XXX: some trust-region inspired ideas might be more efficient... # See eg. Brown & Saad. But needs to be implemented separately # since it's not an inexact Newton method. self.method_kw.setdefault('store_outer_Av', False) for key, value in kw.items(): if not key.startswith('inner_'): raise ValueError("Unknown parameter %s" % key) self.method_kw[key[6:]] = value def _update_diff_step(self): mx = abs(self.x0).max() mf = abs(self.f0).max() self.omega = self.rdiff * max(1, mx) / max(1, mf) def matvec(self, v): nv = norm(v) if nv == 0: return 0*v sc = self.omega / nv r = (self.func(self.x0 + sc*v) - self.f0) / sc if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): raise ValueError('Function returned non-finite results') return r def solve(self, rhs, tol=0): sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw) return sol def update(self, x, f): self.x0 = x self.f0 = f self._update_diff_step() # Update also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'update'): self.preconditioner.update(x, f) def setup(self, x, f, func): Jacobian.setup(self, x, f, func) self.x0 = x self.f0 = f self.op = scipy.sparse.linalg.aslinearoperator(self) if self.rdiff is None: self.rdiff = np.finfo(x.dtype).eps ** (1./2) self._update_diff_step() # Setup also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'setup'): self.preconditioner.setup(x, f, func) #------------------------------------------------------------------------------ # Wrapper functions #------------------------------------------------------------------------------ def _nonlin_wrapper(name, jac): """ Construct a solver wrapper with given name and jacobian approx. It inspects the keyword arguments of ``jac.__init__``, and allows to use the same arguments in the wrapper function, in addition to the keyword arguments of `nonlin_solve` """ import inspect args, varargs, varkw, defaults = inspect.getargspec(jac.__init__) kwargs = zip(args[-len(defaults):], defaults) kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs]) if kw_str: kw_str = ", " + kw_str kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs]) if kwkw_str: kwkw_str = kwkw_str + ", " # Construct the wrapper function so that it's keyword arguments # are visible in pydoc.help etc. wrapper = """ def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw): jac = %(jac)s(%(kwkw)s **kw) return nonlin_solve(F, xin, jac, iter, verbose, maxiter, f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, callback) """ wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, kwkw=kwkw_str) ns = {} ns.update(globals()) exec wrapper in ns func = ns[name] func.__doc__ = jac.__doc__ _set_doc(func) return func broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) anderson = _nonlin_wrapper('anderson', Anderson) linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian) # Deprecated functions @np.deprecate def broyden_generalized(*a, **kw): """Use *anderson(..., w0=0)* instead""" kw.setdefault('w0', 0) return anderson(*a, **kw) @np.deprecate def broyden1_modified(*a, **kw): """Use `broyden1` instead""" return broyden1(*a, **kw) @np.deprecate def broyden_modified(*a, **kw): """Use `anderson` instead""" return anderson(*a, **kw) @np.deprecate def anderson2(*a, **kw): """Use `anderson` instead""" return anderson(*a, **kw) @np.deprecate def broyden3(*a, **kw): """Use `broyden2` instead""" return broyden2(*a, **kw) @np.deprecate def vackar(*a, **kw): """Use `diagbroyden` instead""" return diagbroyden(*a, **kw)
import logging from flask import request, flash, abort, Response from flask_admin import expose from flask_admin.babel import gettext, ngettext, lazy_gettext from flask_admin.model import BaseModelView from flask_admin.model.form import wrap_fields_in_fieldlist from flask_admin.model.fields import ListEditableFieldList from flask_admin._compat import iteritems, string_types import mongoengine import gridfs from mongoengine.connection import get_db from bson.objectid import ObjectId from flask_admin.actions import action from .filters import FilterConverter, BaseMongoEngineFilter from .form import get_form, CustomModelConverter from .typefmt import DEFAULT_FORMATTERS from .tools import parse_like_term from .helpers import format_error from .ajax import process_ajax_references, create_ajax_loader from .subdoc import convert_subdocuments # Set up logger log = logging.getLogger("flask-admin.mongo") SORTABLE_FIELDS = set(( mongoengine.StringField, mongoengine.IntField, mongoengine.FloatField, mongoengine.BooleanField, mongoengine.DateTimeField, mongoengine.ComplexDateTimeField, mongoengine.ObjectIdField, mongoengine.DecimalField, mongoengine.ReferenceField, mongoengine.EmailField, mongoengine.UUIDField, mongoengine.URLField )) class ModelView(BaseModelView): """ MongoEngine model scaffolding. """ column_filters = None """ Collection of the column filters. Can contain either field names or instances of :class:`flask_admin.contrib.mongoengine.filters.BaseFilter` classes. For example:: class MyModelView(BaseModelView): column_filters = ('user', 'email') or:: class MyModelView(BaseModelView): column_filters = (BooleanEqualFilter(User.name, 'Name')) """ model_form_converter = CustomModelConverter """ Model form conversion class. Use this to implement custom field conversion logic. Custom class should be derived from the `flask_admin.contrib.mongoengine.form.CustomModelConverter`. For example:: class MyModelConverter(AdminModelConverter): pass class MyAdminView(ModelView): model_form_converter = MyModelConverter """ object_id_converter = ObjectId """ Mongodb ``_id`` value conversion function. Default is `bson.ObjectId`. Use this if you are using String, Binary and etc. For example:: class MyModelView(BaseModelView): object_id_converter = int or:: class MyModelView(BaseModelView): object_id_converter = str """ filter_converter = FilterConverter() """ Field to filter converter. Override this attribute to use a non-default converter. """ column_type_formatters = DEFAULT_FORMATTERS """ Customized type formatters for MongoEngine backend """ allowed_search_types = (mongoengine.StringField, mongoengine.URLField, mongoengine.EmailField) """ List of allowed search field types. """ form_subdocuments = None """ Subdocument configuration options. This field accepts dictionary, where key is field name and value is either dictionary or instance of the `flask_admin.contrib.EmbeddedForm`. Consider following example:: class Comment(db.EmbeddedDocument): name = db.StringField(max_length=20, required=True) value = db.StringField(max_length=20) class Post(db.Document): text = db.StringField(max_length=30) data = db.EmbeddedDocumentField(Comment) class MyAdmin(ModelView): form_subdocuments = { 'data': { 'form_columns': ('name',) } } In this example, `Post` model has child `Comment` subdocument. When generating form for `Comment` embedded document, Flask-Admin will only create `name` field. It is also possible to use class-based embedded document configuration:: class CommentEmbed(EmbeddedForm): form_columns = ('name',) class MyAdmin(ModelView): form_subdocuments = { 'data': CommentEmbed() } Arbitrary depth nesting is supported:: class SomeEmbed(EmbeddedForm): form_excluded_columns = ('test',) class CommentEmbed(EmbeddedForm): form_columns = ('name',) form_subdocuments = { 'inner': SomeEmbed() } class MyAdmin(ModelView): form_subdocuments = { 'data': CommentEmbed() } There's also support for forms embedded into `ListField`. All you have to do is to create nested rule with `None` as a name. Even though it is slightly confusing, but that's how Flask-MongoEngine creates form fields embedded into ListField:: class Comment(db.EmbeddedDocument): name = db.StringField(max_length=20, required=True) value = db.StringField(max_length=20) class Post(db.Document): text = db.StringField(max_length=30) data = db.ListField(db.EmbeddedDocumentField(Comment)) class MyAdmin(ModelView): form_subdocuments = { 'data': { 'form_subdocuments': { None: { 'form_columns': ('name',) } } } } """ def __init__(self, model, name=None, category=None, endpoint=None, url=None, static_folder=None, menu_class_name=None, menu_icon_type=None, menu_icon_value=None): """ Constructor :param model: Model class :param name: Display name :param category: Display category :param endpoint: Endpoint :param url: Custom URL :param menu_class_name: Optional class name for the menu item. :param menu_icon_type: Optional icon. Possible icon types: - `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon - `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon - `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory - `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL :param menu_icon_value: Icon glyph name or URL, depending on `menu_icon_type` setting """ self._search_fields = [] super(ModelView, self).__init__(model, name, category, endpoint, url, static_folder, menu_class_name=menu_class_name, menu_icon_type=menu_icon_type, menu_icon_value=menu_icon_value) self._primary_key = self.scaffold_pk() def _refresh_cache(self): """ Refresh cache. """ # Process subdocuments if self.form_subdocuments is None: self.form_subdocuments = {} self._form_subdocuments = convert_subdocuments(self.form_subdocuments) # Cache other properties super(ModelView, self)._refresh_cache() def _process_ajax_references(self): """ AJAX endpoint is exposed by top-level admin view class, but subdocuments might have AJAX references too. This method will recursively go over subdocument configuration and will precompute AJAX references for them ensuring that subdocuments can also use AJAX to populate their ReferenceFields. """ references = super(ModelView, self)._process_ajax_references() return process_ajax_references(references, self) def _get_model_fields(self, model=None): """ Inspect model and return list of model fields :param model: Model to inspect """ if model is None: model = self.model return sorted(iteritems(model._fields), key=lambda n: n[1].creation_counter) def scaffold_pk(self): # MongoEngine models have predefined 'id' as a key return 'id' def get_pk_value(self, model): """ Return the primary key value from the model instance :param model: Model instance """ return model.pk def scaffold_list_columns(self): """ Scaffold list columns """ columns = [] for n, f in self._get_model_fields(): # Verify type field_class = type(f) if (field_class == mongoengine.ListField and isinstance(f.field, mongoengine.EmbeddedDocumentField)): continue if field_class == mongoengine.EmbeddedDocumentField: continue if self.column_display_pk or field_class != mongoengine.ObjectIdField: columns.append(n) return columns def scaffold_sortable_columns(self): """ Return a dictionary of sortable columns (name, field) """ columns = {} for n, f in self._get_model_fields(): if type(f) in SORTABLE_FIELDS: if self.column_display_pk or type(f) != mongoengine.ObjectIdField: columns[n] = f return columns def init_search(self): """ Init search """ if self.column_searchable_list: for p in self.column_searchable_list: if isinstance(p, string_types): p = self.model._fields.get(p) if p is None: raise Exception('Invalid search field') field_type = type(p) # Check type if (field_type not in self.allowed_search_types): raise Exception('Can only search on text columns. ' + 'Failed to setup search for "%s"' % p) self._search_fields.append(p) return bool(self._search_fields) def scaffold_filters(self, name): """ Return filter object(s) for the field :param name: Either field name or field instance """ if isinstance(name, string_types): attr = self.model._fields.get(name) else: attr = name if attr is None: raise Exception('Failed to find field for filter: %s' % name) # Find name visible_name = None if not isinstance(name, string_types): visible_name = self.get_column_name(attr.name) if not visible_name: visible_name = self.get_column_name(name) # Convert filter type_name = type(attr).__name__ flt = self.filter_converter.convert(type_name, attr, visible_name) return flt def is_valid_filter(self, filter): """ Validate if the provided filter is a valid MongoEngine filter :param filter: Filter object """ return isinstance(filter, BaseMongoEngineFilter) def scaffold_form(self): """ Create form from the model. """ form_class = get_form(self.model, self.model_form_converter(self), base_class=self.form_base_class, only=self.form_columns, exclude=self.form_excluded_columns, field_args=self.form_args, extra_fields=self.form_extra_fields) return form_class def scaffold_list_form(self, custom_fieldlist=ListEditableFieldList, validators=None): """ Create form for the `index_view` using only the columns from `self.column_editable_list`. :param validators: `form_args` dict with only validators {'name': {'validators': [required()]}} :param custom_fieldlist: A WTForm FieldList class. By default, `ListEditableFieldList`. """ form_class = get_form(self.model, self.model_form_converter(self), base_class=self.form_base_class, only=self.column_editable_list, field_args=validators) return wrap_fields_in_fieldlist(self.form_base_class, form_class, custom_fieldlist) # AJAX foreignkey support def _create_ajax_loader(self, name, opts): return create_ajax_loader(self.model, name, name, opts) def get_query(self): """ Returns the QuerySet for this view. By default, it returns all the objects for the current model. """ return self.model.objects def _search(self, query, search_term): # TODO: Unfortunately, MongoEngine contains bug which # prevents running complex Q queries and, as a result, # Flask-Admin does not support per-word searching like # in other backends op, term = parse_like_term(search_term) criteria = None for field in self._search_fields: flt = {'%s__%s' % (field.name, op): term} q = mongoengine.Q(**flt) if criteria is None: criteria = q else: criteria |= q return query.filter(criteria) def get_list(self, page, sort_column, sort_desc, search, filters, execute=True): """ Get list of objects from MongoEngine :param page: Page number :param sort_column: Sort column :param sort_desc: Sort descending :param search: Search criteria :param filters: List of applied filters :param execute: Run query immediately or not """ query = self.get_query() # Filters if self._filters: for flt, flt_name, value in filters: f = self._filters[flt] query = f.apply(query, f.clean(value)) # Search if self._search_supported and search: query = self._search(query, search) # Get count count = query.count() if not self.simple_list_pager else None # Sorting if sort_column: query = query.order_by('%s%s' % ('-' if sort_desc else '', sort_column)) else: order = self._get_default_order() if order: query = query.order_by('%s%s' % ('-' if order[1] else '', order[0])) # Pagination if page is not None: query = query.skip(page * self.page_size) query = query.limit(self.page_size) if execute: query = query.all() return count, query def get_one(self, id): """ Return a single model instance by its ID :param id: Model ID """ try: return self.get_query().filter(pk=id).first() except mongoengine.ValidationError as ex: flash(gettext('Failed to get model. %(error)s', error=format_error(ex)), 'error') return None def create_model(self, form): """ Create model helper :param form: Form instance """ try: model = self.model() form.populate_obj(model) self._on_model_change(form, model, True) model.save() except Exception as ex: if not self.handle_view_exception(ex): flash(gettext('Failed to create record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to create record.') return False else: self.after_model_change(form, model, True) return model def update_model(self, form, model): """ Update model helper :param form: Form instance :param model: Model instance to update """ try: form.populate_obj(model) self._on_model_change(form, model, False) model.save() except Exception as ex: if not self.handle_view_exception(ex): flash(gettext('Failed to update record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to update record.') return False else: self.after_model_change(form, model, False) return True def delete_model(self, model): """ Delete model helper :param model: Model instance """ try: self.on_model_delete(model) model.delete() except Exception as ex: if not self.handle_view_exception(ex): flash(gettext('Failed to delete record. %(error)s', error=format_error(ex)), 'error') log.exception('Failed to delete record.') return False else: self.after_model_delete(model) return True # FileField access API @expose('/api/file/') def api_file_view(self): pk = request.args.get('id') coll = request.args.get('coll') db = request.args.get('db', 'default') if not pk or not coll or not db: abort(404) fs = gridfs.GridFS(get_db(db), coll) data = fs.get(self.object_id_converter(pk)) if not data: abort(404) return Response(data.read(), content_type=data.content_type, headers={ 'Content-Length': data.length }) # Default model actions def is_action_allowed(self, name): # Check delete action permission if name == 'delete' and not self.can_delete: return False return super(ModelView, self).is_action_allowed(name) @action('delete', lazy_gettext('Delete'), lazy_gettext('Are you sure you want to delete selected records?')) def action_delete(self, ids): try: count = 0 all_ids = [self.object_id_converter(pk) for pk in ids] for obj in self.get_query().in_bulk(all_ids).values(): count += self.delete_model(obj) flash(ngettext('Record was successfully deleted.', '%(count)s records were successfully deleted.', count, count=count)) except Exception as ex: if not self.handle_view_exception(ex): flash(gettext('Failed to delete records. %(error)s', error=str(ex)), 'error')
# # Copyright 2009 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """QueueInfo tools. A library for working with QueueInfo records, describing task queue entries for an application. Supports loading the records from queue.yaml. A queue has two required parameters and various optional ones. The required parameters are 'name' (must be unique for an appid) and 'rate' (the rate at which jobs in the queue are run). There is an optional parameter 'bucket_size' that will allow tokens to be 'saved up' (for more on the algorithm, see http://en.wikipedia.org/wiki/Token_Bucket). rate is expressed as number/unit, with number being an int or a float, and unit being one of 's' (seconds), 'm' (minutes), 'h' (hours) or 'd' (days). bucket_size is an integer. An example of the use of bucket_size rate: the free email quota is 2000/d, and the maximum you can send in a single minute is 11. So we can define a queue for sending email like this: queue: - name: mail-queue rate: 2000/d bucket_size: 10 If this queue had been idle for a while before some jobs were submitted to it, the first 10 jobs submitted would be run immediately, then subsequent ones would be run once every 40s or so. The limit of 2000 per day would still apply. Another optional parameter is 'max_concurrent_requests', which pertains to the requests being made by the queue. It specifies the maximum number of requests that may be in-flight at any one time. An example: queue: - name: server-queue rate: 50/s max_concurrent_requests: 5 Each queue has an optional 'mode' parameter with legal values 'push' and 'pull'. If mode is not specified, it defaults to 'push'. Tasks in queues with mode 'push' are invoked (pushed) at the specified rate. Tasks in queues with mode 'pull' are not directly invoked by App Engine. These tasks are leased for a period by client code, and deleted by client code when the task's work is finished. If not deleted before the expiry of the lease, the tasks are available for lease again. Each queue has an optional 'target' parameter. If specified all tasks inserted into the queue will be executed on the specified alternate version/server instance. A queue may also optionally specify retry_parameters. retry_parameters: task_retry_limit: 100 task_age_limit: 1d min_backoff_seconds: 0.1 max_backoff_seconds: 3600 max_doublings: 10 Each task in the queue that fails during execution will be retried using these parameters. All these fields are optional. task_retry_limit: A non-negative integer. Tasks will be retried a maximum of task_retry_limit times before failing permanently. If task_age_limit is also specified, both task_retry_limit and task_age_limit must be exceeded before a task fails permanently. task_age_limit: A non-negative floating point number followed by a suffix s (seconds), m (minutes), h (hours) or d (days). If the time since a task was first tried exceeds task_age_limit, it will fail permanently. If task_retry_limit is also specified, both task_retry_limit and task_age_limit must be exceeded before a task fails permanently. min_backoff_seconds: A non-negative floating point number. This is the minimum interval after the first failure and the first retry of a task. If max_backoff_seconds is also specified, min_backoff_seconds must not be greater than max_backoff_seconds. max_backoff_seconds: A non-negative floating point number. This is the maximum allowed interval between successive retries of a failed task. If min_backoff_seconds is also specified, min_backoff_seconds must not be greater than max_backoff_seconds. max_doublings: A non-negative integer. On successive failures, the retry backoff interval will be successively doubled up to max_doublings times, starting at min_backoff_seconds and not exceeding max_backoff_seconds. For retries after max_doublings, the retry backoff will increase by the value of the backoff when doubling ceased. e.g. for min_backoff_seconds of 1 ,max_doublings of 5, we have successive retry backoffs of 1, 2, 4, 8, 16, 32, 64, 96, 128, ... not exceeding max_backoff_seconds. A queue may optionally specify an acl (Access Control List). acl: - user_email: [email protected] - writer_email: [email protected] Each email must correspond to an account hosted by Google. The acl is enforced for queue access from outside AppEngine. An app's queues are also subject to storage quota limits for their stored tasks, i.e. those tasks that have been added to queues but not yet executed. This quota is part of their total storage quota (including datastore and blobstore quota). We allow an app to override the default portion of this quota available for taskqueue storage (100M) with a top level field "total_storage_limit". total_storage_limit: 1.2G If no suffix is specified, the number is interpreted as bytes. Supported suffices are B (bytes), K (kilobytes), M (megabytes), G (gigabytes) and T (terabytes). If total_storage_limit exceeds the total disk storage available to an app, it is clamped. """ __author__ = '[email protected] (Anthony Baxter)' # WARNING: This file is externally viewable by our users. All comments from # this file will be stripped. The docstrings will NOT. Do not put sensitive # information in docstrings. If you must communicate internal information in # this source file, please place them in comments only. from googlecloudsdk.third_party.appengine.api import appinfo from googlecloudsdk.third_party.appengine.api import validation from googlecloudsdk.third_party.appengine.api import yaml_builder from googlecloudsdk.third_party.appengine.api import yaml_listener from googlecloudsdk.third_party.appengine.api import yaml_object from googlecloudsdk.third_party.appengine.api.taskqueue import taskqueue_service_pb # This is exactly the same regex as is in api/taskqueue/taskqueue_service.cc _NAME_REGEX = r'^[A-Za-z0-9-]{0,499}$' _RATE_REGEX = r'^(0|[0-9]+(\.[0-9]*)?/[smhd])' _TOTAL_STORAGE_LIMIT_REGEX = r'^([0-9]+(\.[0-9]*)?[BKMGT]?)' _MODE_REGEX = r'(pull)|(push)' # we don't have to pull that file into python_lib for the taskqueue stub to work # in production. MODULE_ID_RE_STRING = r'(?!-)[a-z\d\-]{1,63}' # NOTE(user): The length here must remain 100 for backwards compatibility, # see b/5485871 for more information. MODULE_VERSION_RE_STRING = r'(?!-)[a-z\d\-]{1,100}' _VERSION_REGEX = r'^(?:(?:(%s)\.)?)(%s)$' % (MODULE_VERSION_RE_STRING, MODULE_ID_RE_STRING) QUEUE = 'queue' NAME = 'name' RATE = 'rate' BUCKET_SIZE = 'bucket_size' MODE = 'mode' TARGET = 'target' MAX_CONCURRENT_REQUESTS = 'max_concurrent_requests' TOTAL_STORAGE_LIMIT = 'total_storage_limit' BYTE_SUFFIXES = 'BKMGT' RETRY_PARAMETERS = 'retry_parameters' TASK_RETRY_LIMIT = 'task_retry_limit' TASK_AGE_LIMIT = 'task_age_limit' MIN_BACKOFF_SECONDS = 'min_backoff_seconds' MAX_BACKOFF_SECONDS = 'max_backoff_seconds' MAX_DOUBLINGS = 'max_doublings' ACL = 'acl' USER_EMAIL = 'user_email' WRITER_EMAIL = 'writer_email' class MalformedQueueConfiguration(Exception): """Configuration file for Task Queue is malformed.""" class RetryParameters(validation.Validated): """Retry parameters for a single task queue.""" ATTRIBUTES = { TASK_RETRY_LIMIT: validation.Optional(validation.TYPE_INT), TASK_AGE_LIMIT: validation.Optional(validation.TimeValue()), MIN_BACKOFF_SECONDS: validation.Optional(validation.TYPE_FLOAT), MAX_BACKOFF_SECONDS: validation.Optional(validation.TYPE_FLOAT), MAX_DOUBLINGS: validation.Optional(validation.TYPE_INT), } class Acl(validation.Validated): """Access control list for a single task queue.""" ATTRIBUTES = { USER_EMAIL: validation.Optional(validation.TYPE_STR), WRITER_EMAIL: validation.Optional(validation.TYPE_STR), } class QueueEntry(validation.Validated): """A queue entry describes a single task queue.""" ATTRIBUTES = { NAME: _NAME_REGEX, RATE: validation.Optional(_RATE_REGEX), MODE: validation.Optional(_MODE_REGEX), BUCKET_SIZE: validation.Optional(validation.TYPE_INT), MAX_CONCURRENT_REQUESTS: validation.Optional(validation.TYPE_INT), RETRY_PARAMETERS: validation.Optional(RetryParameters), ACL: validation.Optional(validation.Repeated(Acl)), # and version. TARGET: validation.Optional(_VERSION_REGEX), } class QueueInfoExternal(validation.Validated): """QueueInfoExternal describes all queue entries for an application.""" ATTRIBUTES = { appinfo.APPLICATION: validation.Optional(appinfo.APPLICATION_RE_STRING), TOTAL_STORAGE_LIMIT: validation.Optional(_TOTAL_STORAGE_LIMIT_REGEX), QUEUE: validation.Optional(validation.Repeated(QueueEntry)), } def LoadSingleQueue(queue_info, open_fn=None): """Load a queue.yaml file or string and return a QueueInfoExternal object. Args: queue_info: the contents of a queue.yaml file, as a string. open_fn: Function for opening files. Unused. Returns: A QueueInfoExternal object. """ builder = yaml_object.ObjectBuilder(QueueInfoExternal) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(queue_info) queue_info = handler.GetResults() if len(queue_info) < 1: raise MalformedQueueConfiguration('Empty queue configuration.') if len(queue_info) > 1: raise MalformedQueueConfiguration('Multiple queue: sections ' 'in configuration.') return queue_info[0] def ParseRate(rate): """Parses a rate string in the form number/unit, or the literal 0. The unit is one of s (seconds), m (minutes), h (hours) or d (days). Args: rate: the rate string. Returns: a floating point number representing the rate/second. Raises: MalformedQueueConfiguration: if the rate is invalid """ if rate == "0": return 0.0 elements = rate.split('/') if len(elements) != 2: raise MalformedQueueConfiguration('Rate "%s" is invalid.' % rate) number, unit = elements try: number = float(number) except ValueError: raise MalformedQueueConfiguration('Rate "%s" is invalid:' ' "%s" is not a number.' % (rate, number)) if unit not in 'smhd': raise MalformedQueueConfiguration('Rate "%s" is invalid:' ' "%s" is not one of s, m, h, d.' % (rate, unit)) if unit == 's': return number if unit == 'm': return number/60 if unit == 'h': return number/(60 * 60) if unit == 'd': return number/(24 * 60 * 60) def ParseTotalStorageLimit(limit): """Parses a string representing the storage bytes limit. Optional limit suffixes are: B (bytes), K (kilobytes), M (megabytes), G (gigabytes), T (terabytes) Args: limit: The storage bytes limit string. Returns: An int representing the storage limit in bytes. Raises: MalformedQueueConfiguration: if the limit argument isn't a valid python double followed by an optional suffix. """ limit = limit.strip() if not limit: raise MalformedQueueConfiguration('Total Storage Limit must not be empty.') try: if limit[-1] in BYTE_SUFFIXES: number = float(limit[0:-1]) for c in BYTE_SUFFIXES: if limit[-1] != c: number = number * 1024 else: return int(number) else: # We won't accept fractional bytes. If someone asks for # 1.1e12 bytes, too bad. return int(limit) except ValueError: raise MalformedQueueConfiguration('Total Storage Limit "%s" is invalid.' % limit) def ParseTaskAgeLimit(age_limit): """Parses a string representing the task's age limit (maximum allowed age). The string must be a non-negative integer or floating point number followed by one of s, m, h, or d (seconds, minutes, hours or days respectively). Args: age_limit: The task age limit string. Returns: An int representing the age limit in seconds. Raises: MalformedQueueConfiguration: if the limit argument isn't a valid python double followed by a required suffix. """ age_limit = age_limit.strip() if not age_limit: raise MalformedQueueConfiguration('Task Age Limit must not be empty.') unit = age_limit[-1] if unit not in "smhd": raise MalformedQueueConfiguration('Task Age_Limit must be in s (seconds), ' 'm (minutes), h (hours) or d (days)') try: number = float(age_limit[0:-1]) if unit == 's': return int(number) if unit == 'm': return int(number * 60) if unit == 'h': return int(number * 3600) if unit == 'd': return int(number * 86400) except ValueError: raise MalformedQueueConfiguration('Task Age_Limit "%s" is invalid.' % age_limit) def TranslateRetryParameters(retry): """Populates a TaskQueueRetryParameters from a queueinfo.RetryParameters. Args: retry: A queueinfo.RetryParameters read from queue.yaml that describes the queue's retry parameters. Returns: A taskqueue_service_pb.TaskQueueRetryParameters proto populated with the data from "retry". Raises: MalformedQueueConfiguration: if the retry parameters are invalid. """ params = taskqueue_service_pb.TaskQueueRetryParameters() if retry.task_retry_limit is not None: params.set_retry_limit(int(retry.task_retry_limit)) if retry.task_age_limit is not None: # This could raise MalformedQueueConfiguration. params.set_age_limit_sec(ParseTaskAgeLimit(retry.task_age_limit)) if retry.min_backoff_seconds is not None: params.set_min_backoff_sec(float(retry.min_backoff_seconds)) if retry.max_backoff_seconds is not None: params.set_max_backoff_sec(float(retry.max_backoff_seconds)) if retry.max_doublings is not None: params.set_max_doublings(int(retry.max_doublings)) # We enforce a couple of friendly rules here with min_backoff_sec and # max_backoff_sec. If only one is set, the other gets a default value. It # is not fair to users if the default (which can change) could cause their # parameters to violate min_backoff_sec() <= max_backoff_sec(). if params.has_min_backoff_sec() and not params.has_max_backoff_sec(): if params.min_backoff_sec() > params.max_backoff_sec(): params.set_max_backoff_sec(params.min_backoff_sec()) if not params.has_min_backoff_sec() and params.has_max_backoff_sec(): if params.min_backoff_sec() > params.max_backoff_sec(): params.set_min_backoff_sec(params.max_backoff_sec()) # Validation. if params.has_retry_limit() and params.retry_limit() < 0: raise MalformedQueueConfiguration( 'Task retry limit must not be less than zero.') if params.has_age_limit_sec() and not params.age_limit_sec() > 0: raise MalformedQueueConfiguration( 'Task age limit must be greater than zero.') if params.has_min_backoff_sec() and params.min_backoff_sec() < 0: raise MalformedQueueConfiguration( 'Min backoff seconds must not be less than zero.') if params.has_max_backoff_sec() and params.max_backoff_sec() < 0: raise MalformedQueueConfiguration( 'Max backoff seconds must not be less than zero.') if params.has_max_doublings() and params.max_doublings() < 0: raise MalformedQueueConfiguration( 'Max doublings must not be less than zero.') if (params.has_min_backoff_sec() and params.has_max_backoff_sec() and params.min_backoff_sec() > params.max_backoff_sec()): raise MalformedQueueConfiguration( 'Min backoff sec must not be greater than than max backoff sec.') return params
from metabotnik.models import Project, File, new_task from dropbox import Dropbox import os import sys import subprocess import traceback import json from django.utils import timezone from django.core.mail import send_mail from django.conf import settings import planodo from PIL import Image from metabotnik.xmp import read_metadata import random import shutil from natsort import natsorted class RetryTaskException(Exception): 'Raise this Exception in a task to have it retried later' pass def execute_task(task): payload = task.get_payload() task_function = globals().get(task.action) # Consider doing a getattr(sys.modules[__name__], action) ? if task_function: # Bit of fun on local development machine if sys.platform == 'darwin': subprocess.call(['say', 'started task %s' % task.pk]) task.status = 'wip' task.time_started = timezone.now() task.save() try: result = task_function(payload) if type(result) is dict: payload.update(result) # consider taking the return value from the task_function, if it is a dict # update the payload with the returned value task.status = 'done' task.time_ended = timezone.now() task.set_payload(payload) if sys.platform == 'darwin': subprocess.call(['say', 'task %s done' % task.pk]) except RetryTaskException, e: payload['error'] = 'Retrying %s' % e task.status = 'new' task.set_payload(payload) except Exception: if sys.platform == 'darwin': subprocess.call(['say', 'error in task %s' % task.pk]) payload['error'] = traceback.format_exc() task.status = 'error' task.set_payload(payload) # mail the error along send_mail('An error occurred in task %s ' % task.pk, '%s\nIt can be viewed at https://metabotnik.com/admin/metabotnik/task/%s/' % (payload['error'],task.pk), '[email protected]', [address for name, address in settings.ADMINS], fail_silently=True) else: task.status = 'error' payload['error'] = 'Task %s is unrecognised' % task.action task.set_payload(payload) # Defined tasks follow here ############################################################ def layout(payload): project = Project.objects.get(pk=payload['project_id']) data = planodo.layout(project) project.layout_data = json.dumps(data) project.save() def makethumbnails(payload): project = Project.objects.get(pk=payload['project_id']) output_filepath = os.path.join(project.storage_path, 'thumbnails') if not os.path.exists(output_filepath): os.mkdir(output_filepath) output_filepath += '/%s.jpg' # this command does not run in a shell, so we need to supply the wildcard arguments input_files = [os.path.join(project.originals_path, x) for x in os.listdir(project.originals_path) if x.lower().endswith('.jpg')] # Due to a file limit bug in vipsthumbnail, do 300 at a time : https://github.com/jcupitt/libvips/issues/182 while input_files: subprocess.call(['%svipsthumbnail'%settings.VIPSBIN_PATH, '-o', output_filepath]+input_files[:300]) input_files = input_files[300:] def makedeepzoom(payload): project = Project.objects.get(pk=payload['project_id']) project.set_status('dzgen') new_nonce = ''.join(random.choice('0123456789abcdef') for i in range(6)) # Call VIPS to make the DZ input_filepath = os.path.join(project.storage_path, 'metabotnik.jpg') output_filepath = os.path.join(project.storage_path, new_nonce) subprocess.call(['%svips'%settings.VIPSBIN_PATH, 'dzsave', input_filepath, output_filepath, '--suffix', '.jpg']) try: # clean up the previous deepzoom folder and dzi file old_path = os.path.join(project.storage_path, project.metabotnik_nonce+'_files') shutil.rmtree(old_path, ignore_errors=True) os.remove(os.path.join(project.storage_path, project.metabotnik_nonce+'.dzi')) except (OSError, TypeError): # maybe the files were removed by some other process, just move right along pass # though really, we need to do some better logging and signalling project.metabotnik_nonce = new_nonce project.set_status('done') def generate(payload): # Always first do a layout for the project using the current settings before generation # Especially neded where users do not first do a 'preview' before hitting generate button layout(payload) project = Project.objects.get(pk=payload['project_id']) # Check to see if the number of files retrieved from Dropbox is done yet. # If not, just reset this task to new and return if project.num_files_local < project.num_files_on_dropbox: raise RetryTaskException('Local files < Dropbox files') if payload.get('preview'): filename = os.path.join(project.storage_path, 'preview.jpg') # For Previews, an arbitrary size # We need to add setting preview size to layouter... project.set_status('previewing') else: filename = os.path.join(project.storage_path, 'metabotnik.jpg') project.set_status('generating') error_msgs = planodo.make_bitmap(project, filename) new_task(project.user, { 'action': 'makedeepzoom', 'project_id': project.pk }) if payload.get('sendemail'): send_mail('Your generation task for project %s done' % project, 'It can be viewed at https://metabotnik.com/projects/%s/' % project.pk, '[email protected]', [project.user.email], fail_silently=False) project.set_status('layout') if error_msgs: return {'error': error_msgs} def download_dropboxfiles(payload): # Get the Project project = Project.objects.get(pk=payload['project_id']) project.set_status('downloading') # Check to see what files to download from Dropbox client = Dropbox(project.user.dropboxinfo.access_token) num_files = 0 for x in client.files_list_folder(project.path).entries: if x.path_lower.endswith('.jpg') and x.size > 0: # Download the file from Dropbox to local disk local_filename = os.path.split(x.path_lower)[-1] local_filepath = os.path.join(project.originals_path, local_filename) num_files += 1 if os.path.exists(local_filepath): # and not payload.get('redownload') == True continue client.files_download_to_file(local_filepath, x.path_lower) # Get the metadata as a separate task new_task(project.user, { 'action': 'extract_metadata', 'project_id': project.pk }) # schedule a thumbnail task new_task(project.user, { 'action': 'makethumbnails', 'project_id': project.pk }) # Downloading files can take a long time # In the meantime this Project could have been changed by other tasks # Reload it before setting the status project = Project.objects.get(pk=payload['project_id']) project.num_files_on_dropbox = num_files project.status = 'layout' project.save() return {'downloaded_files_count':num_files} def extract_metadata(payload): project = Project.objects.get(pk=payload['project_id']) current_files = {} for image in project.files.all(): current_files[image.filename] = image # For every file, read the metadata order = 1 for filename in natsorted(os.listdir(project.originals_path)): if not filename.endswith('.jpg'): continue filepath = os.path.join(project.originals_path, filename) image = current_files.get(filename, File(project=project, filename=filename, order=order) ) order += 1 try: tmp = read_metadata(filepath) image.metadata = json.dumps(tmp) except: pass # check the filesize image.size = os.stat(filepath).st_size # check the image size image.width, image.height = Image.open(filepath).size image.save() def makemetametabotnik(payload): 'For all the public Projects with a metabotnik, make a thumbnail of them and combine into one giant metabotnik' metaproject = Project.objects.get(name='Metametabotnik') for p in Project.objects.filter(public=True): path = p.metabotnik_path() if not path: continue subprocess.call(['%svipsthumbnail'%settings.VIPSBIN_PATH, '-o', 'preview.jpg', '-s', '1000', path]) preview_path = os.path.join(p.storage_path, 'preview.jpg') os.link(preview_path, os.path.join(metaproject.originals_path, 'project_%s.jpg' % p.pk)) extract_metadata({'project_id':metaproject.pk})
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import ast # TODO: add to geoocode_db_utils import db import json import logging import psycopg2.extras from psycopg2.extensions import AsIs import urllib import yaml # Load the key we use for geocoding addressed # TODO: have a config file for this geocode_key = None with open("/tmp/geocode_key.txt", 'r') as f: geocode_key = yaml.load(f, Loader=yaml.FullLoader)['key'] #db.parser.add_argument("--related", help="Only compute related table", action="store_true") #db.parser.add_argument("--populate_has_data", help="Updates values so we know for which data we have somthing to show", action="store_true") db = db.db_connect() logging.basicConfig( format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG) # global variable for tracking number of api call api_calls = 0 # Return dictionary cursor and use mysql.* paths for tables def getCursor(set_search_path='mysql'): global db cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor) if set_search_path: cur.execute("SET search_path = %s", [set_search_path]) return cur def executeAndLog(cur, sql, params=None): command = cur.mogrify(sql, params) logging.info("SQL: " + command) cur.execute(command) # inserts the given dictionary {(column_name: value} into the given table # returns the id of the inserted row # if returnIDColumn is not None, also return 'RETURNIN returnIdColumn' def insertDictionary(table, d, returnIDColumn=None): with getCursor() as cur: columns = d.keys() values = [d[column] for column in columns] values_string = ','.join(["%s"] * len(values)) sql = 'INSERT INTO %s (%s) VALUES (' + values_string + ')' if returnIDColumn is not None: sql += 'RETURNING ' + returnIDColumn executeAndLog(cur, sql, [AsIs(table), AsIs(', '.join(columns))] + values) if returnIDColumn is not None: return cur.fetchone()[0] class IdMaster: # mappings # (has(name), address) -> id # (hash(name), (lat, lng)) -> id # (hash(address)) -> id name_address = {} name_lat_lng = {} address_data = {} def __init__(self): #load table to memory logging.info("Loading ids") cur = getCursor() executeAndLog(cur, "SELECT id, entity_name, address, original_address, lat, lng FROM entities") for row in cur: norm_name = self.normalize(row["entity_name"]) logging.info(norm_name) norm_address = self.normalize(row["address"]) norm_orig_address = self.normalize(row["original_address"]) t_address = (hash(norm_name), hash(norm_address)) t_orig_address = (hash(norm_name), hash(norm_orig_address)) t_lat_lng = (hash(norm_name), hash((str(row["lat"]), str(row["lng"])))) self.name_address[t_address] = row["id"] self.name_address[t_orig_address] = row["id"] self.name_lat_lng[t_lat_lng] = row["id"] self.address_data[hash(norm_orig_address)] = row["id"] cur.close() logging.info("Loading done") # normalize string be removing spaces, dots, commas and turning everything into lower cases # TODO: normalize upper case letters with diacritics def normalize(self, s): return s.lower().replace(" ", "").replace("."," ").replace(",", "") # load json for the provided entities.id def getJSONForId(self, row_id): logging.info("getJSONForId: " + str(row_id)) with getCursor() as cur: executeAndLog(cur, "SELECT json FROM entities WHERE id=%s", [row_id]) return json.loads(cur.fetchone()["json"]) # gecoodes given address. # returns json as returned by the GoogleAPI # the json is also stored in the database, so first the method checks whether # the hash of the provided address is stored in self.address_data # if the address cannot be geocoded, the method iteratively strips the first # word until it can geocode the address def geocode(self, address, mock=True): global api_calls, geocode_key logging.info("Geocoding: " + address) if mock: # TODO: fix unicode with open('tmp/address.json') as data_file: text = data_file.read() d = ast.literal_eval(text) return d["results"][0] return "" split = address.split(" ") for i in range(len(split)): # Let's ignore the first i words attempted = " ".join(split[i:]) # lookup if has been already geocoded norm_address = self.normalize(attempted) if (hash(norm_address) in self.address_data): return self.getJSONForId(self.address_data[hash(norm_address)]) # Build a request to the maps api # TODO: also add viewport biasing to slovakia params = { 'address': attempted.encode('utf-8'), 'region': 'sk', 'key': geocode_key } url = "https://maps.googleapis.com/maps/api/geocode/json?" + urllib.urlencode(params) try: response = urllib.urlopen(url) data = json.loads(response.read()) api_calls += 1 if data["status"] == 'OK': return data["results"] except: pass logging.info("Unable to geocode: (" + attempted + ") removing first word") return None # get entities.eid for the given (name, address) # if the pair is already is in the database, return the eid # otherwise create new entry in entities # Note this method tries to normalize the address (by geocoding to lat, lng) # as well as name, byt normalizing the string def getId(self, name, address): def toUnicode(s): if isinstance(s, str): return s.decode("utf8") return s name = toUnicode(name) address = toUnicode(address) norm_name = self.normalize(name) norm_address = self.normalize(address) t_address = (hash(norm_name), hash(norm_address)) if t_address in self.name_address: return self.name_address[t_address] djson = self.geocode(address, False) if djson is None: logging.info("Address " + address + "geocoded to None") return None g = djson[0] if g is None: logging.info("Address " + address + "geocoded to None") return None lat_n = g["geometry"]["location"]["lat"] lng_n = g["geometry"]["location"]["lng"] lat = "%3.7f" % lat_n lng = "%3.7f" % lng_n logging.info(address + " -> " + str(lat) + ", " + str(lng)) t_lat_lng = (hash(norm_name), hash((lat, lng))) if (t_lat_lng) in self.name_lat_lng: return self.name_lat_lng[t_lat_lng] #add to table data = { "address" : g["formatted_address"], "original_address": address, "entity_name": name, "json": json.dumps(djson), "lat": lat, "lng": lng } with getCursor() as cur: # Add entry into entities. As this is new entity, we set its eid=id. # However, before inserting the entry, we don't know id, so initially insert # -1 and then set eid to match. data["eid" ] = -1 row_id = int(insertDictionary("entities", data, returnIDColumn='id')) # remember the id for the various representations logging.info("Added id " + str(row_id)) self.address_data[hash(norm_address)] = row_id self.name_address[t_address] = row_id self.name_lat_lng[t_lat_lng] = row_id executeAndLog(cur, "UPDATE entities SET eid=%s WHERE id=%s", [row_id, row_id]) return row_id # returns set(ids) of ids that were already geocoded in the previous rus def getGeocodedIds(table_name): logging.info("getGeocodedIds " + table_name) result = set() with getCursor() as cur: sql = "SELECT orig_id FROM " + table_name executeAndLog(cur, sql) for row in cur: result.add(row["orig_id"]) return result master = None # TODO: document all the params # The master method to gecoode raw data into our format # Take 'input_table' and # 1) geocode it: transforms 'address_column' into lat long, # 2) add new entities to entities table, # 3) column 'id_column' is the column with identifiers in the 'input_table'. # The method creates 'input_table'_geocoded_" table which contains # mappings between ids from 'id_column' and entities.id for the # corresponding new record in entities table # 4) name of the added entities is extracted from 'name_column' # 5) 'extra_columns' = dist{column_name: new_column_name} is a dictionary. # Keys are names of columns from input_table that get also extracted from # the input table. The data are extracted into table called 'new_table_name' # and the column names are the values in the 'extra_columns' dictionary. # As as convention, new_table_name = `source_name`_data # 6) to keep track from which source individual entries in entities table # came from, the method adds new boolean column 'source_name', which is # true if the data came from that particular source. def checkTableExists(table): with getCursor() as cur: executeAndLog(cur, "SELECT * FROM information_schema.tables WHERE table_name=%s", [table]) exists = bool(cur.rowcount) logging.info("Exists %s = %s", table, exists) return exists def getColumnType(table, column): with getCursor() as cur: executeAndLog(cur, "SELECT data_type FROM information_schema.columns " + "WHERE table_name = %s AND column_name = %s", [table, column]) try: column_type = cur.fetchone()[0] except: logging.info("columns does not exist") return None logging.info("Type %s.%s => %s", table, column, column_type) return column_type def geocodeTable( input_table, name_column, address_column, id_column, source_name, new_table_name, extra_columns, max_process=None, address_like=None, address_like_column=None, geocoded_table=None, input_table_search_path='public'): global api_calls, master # Here's to the crazy ones: # Create `input_table'_geocoded if id_column is not None: logging.info("Creating geocoded table") # Default to *_geocoded_, unless provided otherwise. geocoded_id_table = input_table + "_geocoded_" \ if geocoded_table is None else geocoded_table if not checkTableExists(geocoded_id_table): with getCursor() as new_cursor: getColumnType(input_table, id_column) executeAndLog(new_cursor, "CREATE TABLE " + geocoded_id_table + "(" "orig_id " + getColumnType(input_table, id_column) + " PRIMARY KEY," "new_id INTEGER)") # Create table with extra column if ((new_table_name is not None) and (not checkTableExists(new_table_name))): logging.info("Creatting table: " + new_table_name) new_table_sql = "CREATE TABLE " + new_table_name + " (id INTEGER" for column in extra_columns: new_table_sql += ( ", " + extra_columns[column] + " " + getColumnType(input_table, column)) new_table_sql += ", FOREIGN KEY (id) REFERENCES entities(id))" new_index_sql = "CREATE INDEX " + new_table_name + "_id_idx" \ " ON " + new_table_name + " (id)" with getCursor() as new_cursor: executeAndLog(new_cursor, new_table_sql) executeAndLog(new_cursor, new_index_sql) # add tag column into entities denoting the source # todo: having columns is quite bad, turn this into a table as it should be. if (getColumnType("entities", source_name) is None): logging.info("Adding column to entities") with getCursor() as new_cursor: executeAndLog(new_cursor, "ALTER TABLE entities ADD " + source_name + " BOOL") # Build select statement, which extracts id, name, addres and extra column # from the input table select_sql = \ "SELECT " + \ (", ".join([id_column, name_column + " as name", address_column + " as address"] + \ extra_columns.keys())) + \ " FROM " + input_table if address_like is not None: select_sql += " WHERE " + \ (address_column if address_like_column is None else address_like_column) + \ " LIKE \"%" + address_like + "%\"" else: # Requires address is not null select_sql += " WHERE " + address_column + " IS NOT NULL" logging.info(select_sql) not_geocoded = 0 geocoded_ids = getGeocodedIds(geocoded_id_table) remaining = max_process if max_process is not None else 100 processed = 0 not_geocoded = 0 skipped = 0 # dirty trick to escape from 'mysql' search_path. Todo: figure out proper # search_paths for different kinds of tables cur = getCursor(set_search_path=input_table_search_path) executeAndLog(cur, select_sql) for row in cur: # For each row, if not procesed already, # geocode, add new row into entities, # copy extra_columns into 'new_table_name' # and mark new record in entities as coming from this source. api_calls_before = api_calls processed += 1 if (processed % 1000 == 0): logging.info("Total number of rows processed: " + str(processed) + ", remaining: " + str(remaining) + ", skipped: " + str(skipped)) logging.info("Not geocoded: " + str(not_geocoded)) if (row[id_column] in geocoded_ids): skipped += 1 continue to_id = master.getId(row["name"], row["address"]) if (to_id is None): not_geocoded += 1 if id_column is not None: insertDictionary(geocoded_id_table, {"orig_id": row[id_column], "new_id": to_id}) if (new_table_name is not None): new_data = {"id": to_id} for column in extra_columns: new_data[extra_columns[column]] = row[column] insertDictionary(new_table_name, new_data) with getCursor() as new_cursor: executeAndLog(new_cursor, "UPDATE entities SET " + source_name + "=TRUE WHERE id=%s", [to_id]) geocoded_ids.add(row[id_column]) remaining -= (api_calls - api_calls_before) # processed requested number of api calls. stop if (remaining <= 0): break cur.close() def nullOrEmpty(x): return "CASE WHEN " + x + " is NULL THEN '' ELSE " + x + " END" def getConcat(x, y, z=None): s="concat(" + nullOrEmpty(x) + ", \" \", " + nullOrEmpty(y) if not z is None: s += ", \" \", " + nullOrEmpty(z) s += ")" return s def getConcatList(l): s = ", ' ', ".join([nullOrEmpty(x) for x in l]) return "TRIM(CONCAT(" + s + "))" def getNewId(table_name): return table_name + ".new_id" def getMapping(table_name): result = {} cur = db.getCursor() from_index = 0 batch_size = 33333 while True: sql = "SELECT new_id, orig_id FROM " + table_name + \ " LIMIT " + str(batch_size) + " OFFSET " + str(from_index) db.execute(cur, sql) processed = False; for row in cur.fetchall(): processed = True result[row["orig_id"]] = row["new_id"] if not processed: break from_index += batch_size return result # TODO: rewrite this to postgres def populateRelated(relationship_table, colA, colB, tableA, tableB): print "populateRelated", relationship_table cur = db.getCursor() mapSql = "SELECT id, eid FROM entities" db.execute(cur, mapSql) id_to_eid = {} for row in cur.fetchall(): id_to_eid[row["id"]] = row["eid"] print "loading mapping" mapA = getMapping(tableA) mapB = getMapping(tableB) print "mapping loaded" sql = "SELECT " + colA + ", " + colB + " FROM " + relationship_table cur = db.getCursor() db.execute(cur, sql) index = 0 for row in cur.fetchall(): index += 1 if (index % 50 == 0): print "index", index db.db.commit() valA = row.get(colA, None) valB = row.get(colB, None) if (valA is not None) and (valB is not None) and (valA in mapA) and (valB in mapB): newA = mapA[valA] newB = mapB[valB] if not newA in id_to_eid: logging.info("Missing " + str(newA) + " in id_to_eid") continue if not newB in id_to_eid: logging.info("Missing " + str(newB) + " in id_to_eid") continue db.insertDictionary("related", {"id1": newA, "eid1": id_to_eid[newA], "id2": newB, "eid2": id_to_eid[newB]}) db.db.commit() # TODO: rewrite this to postgres def processRelated(): logging.info("processRelated") cur = db.getCursor() db.execute(cur, "DELETE FROM related") db.db.commit() populateRelated("people_esd", "organization_id", "record_id", "orsresd_geocoded_", "people_esd_geocoded_") populateRelated("orsr_relationships", "id_osoby", "id_firmy", "orsr_persons_geocoded_", "firmy_unified2_geocoded_") populateRelated("relation", "_record_id", "_record_id", "relation_from_geocoded_", "relation_to_geocoded_") def process(limit): afp_original_db_id = "_record_id" esd_address = ( "if(address_formatted is not null, address_formatted, " + \ getConcatList(["address_street", "address_building_number", "address_municipality", "address_postal_code", "address_country"]) + ")" ) geocodeTable("people_esd", "IF(full_name IS NOT NULL, full_name, person_formatted_name)", esd_address, "record_id", "people_esd", "people_esd_data", {"note": "note"}, limit, address_like=None) esd_address = ( "if(formatted_address is not null, formatted_address, " + \ getConcatList(["street", "building_number", "municipality", "postal_code", "country"]) + ")" ) # orsr from ekosystem.slovensko.digital geocodeTable("orsresd", "name", esd_address, "id", "orsresd", "orsresd_data", {"ipo": "ico"}, limit, address_like=None) # Old ORSR # before geocoding this, check whether there's an overlap with the other ORSR # geocodeTable("orsr_persons", # "meno", "adresa", "id_osoby", "orsr_persons", None, {}, limit, address_like=None) # # geocodeTable("firmy_unified2", # "nazov", "adresa", "id", "orsr_companies", "firmy_data", # {"ico": "ico" , "pravna_forma": "pravna_forma", "start": "start", "end": "end"}, # limit, address_like=None) #Zivnostentsky register geocodeTable("zrsr", "name", getConcat("address1", "address2"), "id", "zrsr", "zrsr_data", {"ico": "ico", "active": "active"}, limit, address_like=None) return #Uzivatelia vyhody - ludia geocodeTable("uzivatelia_vyhody", "meno", "adresa", "record_id", "uzivatelia_vyhody_clovek", "uzivatelia_vyhody_ludia_data", {"funkcionar": "funkcionar"}, limit, geocoded_table="vyhodia_ludia_geocode", address_like=None) #Uzivatelia vyhody - firmy geocodeTable("uzivatelia_vyhody", "spolocnost", "adresa_spolocnosti", "record_id", "uzivatelia_vyhody_firma", "uzivatelia_vyhody_firmy_data", {"ico": "ico", "forma": "forma"}, limit, geocoded_table="vyhody_firmy_geocode", address_like=None) return # DataNest tables geocodeTable("ds_sponzori_stran", getConcat("meno_darcu", "priezvisko_darcu", "firma_darcu"), "adresa_darcu", afp_original_db_id, "ds_sponzori_stran", "sponzori_stran_data", {"hodnota_daru": "hodnota_daru", "strana": "strana", "rok": "rok"}, limit, address_like=None) geocodeTable("ds_stranicke_clenske_prispevky", getConcat("meno", "priezvisko"), getConcat("adresa", "mesto"), afp_original_db_id, "ds_stranicke_prispevky", "stranicke_prispevky_data", {"strana": "strana", "rok": "rok", "vyska_prispevku": "vyska_prispevku", "mena": "mena"}, limit, address_like=None) geocodeTable("ds_advokati", getConcat("meno_advokata", "priezvisko_advokata"), getConcat("adresa", "mesto", "psc"), "afp_original_db_id", "ds_advokati", "advokati_data", {"telefonne_cislo" : "telefonne_cislo"}, limit, address_like=None) geocodeTable("ds_nadacie", getConcat("meno_spravcu", "priezvisko_spravcu"), "adresa_spravcu", afp_original_db_id, "ds_nadacie_spravca", None, {}, limit, address_like=None, geocoded_table="ds_nadacie_spravca_geocoded_") geocodeTable("ds_nadacie", "nazov_nadacie", "adresa_nadacie", afp_original_db_id, "ds_nadacie", "nadacie_data", {"ico_nadacie": "ico_nadacie", "hodnota_imania": "hodnota_imania", "poznamka": "poznamka", "ucel_nadacie": "ucel_nadacie"}, limit, address_like=None) geocodeTable("ds_dotacie_audiovizfond", getConcat("first_name", "last_name", "company"), getConcat("address", "zip_code", "town"), "_record_id", "ds_dotacie_audiovizfond", "audiovizfond_data", {"amount": "amount", "currency": "currency", "subsidy_subject": "subsidy_subject", "year": "year"}, limit, address_like=None) geocodeTable("ds_auditori", getConcat("meno", "priezvisko", "firma"), getConcat("adresa", "mesto", "psc"), afp_original_db_id, "ds_auditori", "auditori_data", {"cislo_licencie" : "cislo_licencie", "typ_auditora" : "typ_auditora"}, limit, address_like=None) return geocodeTable("ds_danovi_dlznici", getConcat("meno", "priezvisko"), getConcat("adresa", "mesto"), afp_original_db_id, "ds_danovi_dlznici", "danovi_dlznici_data", {"danovy_nedoplatok": "danovy_nedoplatok", "zdroj": "zdroj", "mena": "mena"}, limit, address_like=None) # ORSR data geocodeTable("relation", "rel_name", "rel_address", afp_original_db_id, "new_orsr", None, {}, limit, address_like=None, address_like_column="city", geocoded_table="relation_to_geocoded_") geocodeTable("relation", "name", getConcat("street", "city", "psc"), afp_original_db_id, "new_orsr", "new_orsr_data", {"ico": "ico", "url": "url", "start": "start"}, limit, address_like=None, address_like_column="rel_address", geocoded_table="relation_from_geocoded_") def populateHasDataForTable(table): with open("../verejne/datasources.yaml", "r") as f: data_sources = yaml.load(f, Loader=yaml.FullLoader) with getCursor() as cur: print "Populating has data for", table condition = " OR ".join([column + " IS NOT NULL" for column in data_sources[table]]) sql = "UPDATE entities " + \ "SET has_data=1 " + \ "FROM " + table + \ " WHERE entities.id = " + table + ".id " + \ " AND (" + condition + ")" executeAndLog(cur, sql) def populateHasData(): with open("../verejne/datasources.yaml", "r") as f: data_sources = yaml.load(f, Loader=yaml.FullLoader) with getCursor() as cur: #executeAndLog(cur, "UPDATE entities SET has_data = NULL") for table in data_sources: if table == "entities": continue populateHasDataForTable(table) print "Populating has_data based on contracts" sql = "UPDATE entities SET has_data=1 FROM contracts WHERE entities.id = contracts.id" executeAndLog(cur, sql) print "Populating has_data based on related" sql = "UPDATE entities SET has_data=1 FROM related WHERE entities.id = related.id1" executeAndLog(cur, sql) sql = "UPDATE entities SET has_data=1 FROM related WHERE entities.id = related.id2" executeAndLog(cur, sql) #if db.args.related: # logging.info("OnlyComputingRelated") # processRelated() #elif db.args.populate_has_data: # logging.info("populate_has_data") # populateHasData() #else: if False: # Here's an example how to geocode a particular table. master = IdMaster() geocodeTable( input_table='politicians', name_column=getConcatList(["title", "firstname", "surname"]), address_column="address", id_column="id", source_name="politicians", new_table_name="politicians_data", extra_columns={ "title": "title", "firstname": "firstname", "surname": "surname", "email": "email", "office_id": "office_id", "term_start": "term_start", "term_end": "term_end", "party_nom": "party_nom", "party": "party", "source": "source", "picture": "picture"}, max_process=1000000 ) populateHasDataForTable('politicians_data') # Everything done, commit and close the connection logging.info("Commiting!") db.commit() #db.rollback() db.close() logging.info("API Calls: " + str(api_calls))
""" Test the ColumnTransformer. """ import re import pickle import numpy as np from scipy import sparse import pytest from numpy.testing import assert_allclose from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_allclose_dense_sparse from sklearn.utils._testing import assert_almost_equal from sklearn.base import BaseEstimator from sklearn.compose import ( ColumnTransformer, make_column_transformer, make_column_selector ) from sklearn.exceptions import NotFittedError from sklearn.preprocessing import FunctionTransformer from sklearn.preprocessing import StandardScaler, Normalizer, OneHotEncoder from sklearn.feature_extraction import DictVectorizer class Trans(BaseEstimator): def fit(self, X, y=None): return self def transform(self, X, y=None): # 1D Series -> 2D DataFrame if hasattr(X, 'to_frame'): return X.to_frame() # 1D array -> 2D array if X.ndim == 1: return np.atleast_2d(X).T return X class DoubleTrans(BaseEstimator): def fit(self, X, y=None): return self def transform(self, X): return 2*X class SparseMatrixTrans(BaseEstimator): def fit(self, X, y=None): return self def transform(self, X, y=None): n_samples = len(X) return sparse.eye(n_samples, n_samples).tocsr() class TransNo2D(BaseEstimator): def fit(self, X, y=None): return self def transform(self, X, y=None): return X class TransRaise(BaseEstimator): def fit(self, X, y=None): raise ValueError("specific message") def transform(self, X, y=None): raise ValueError("specific message") def test_column_transformer(): X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_res_first1D = np.array([0, 1, 2]) X_res_second1D = np.array([2, 4, 6]) X_res_first = X_res_first1D.reshape(-1, 1) X_res_both = X_array cases = [ # single column 1D / 2D (0, X_res_first), ([0], X_res_first), # list-like ([0, 1], X_res_both), (np.array([0, 1]), X_res_both), # slice (slice(0, 1), X_res_first), (slice(0, 2), X_res_both), # boolean mask (np.array([True, False]), X_res_first), ([True, False], X_res_first), (np.array([True, True]), X_res_both), ([True, True], X_res_both), ] for selection, res in cases: ct = ColumnTransformer([('trans', Trans(), selection)], remainder='drop') assert_array_equal(ct.fit_transform(X_array), res) assert_array_equal(ct.fit(X_array).transform(X_array), res) # callable that returns any of the allowed specifiers ct = ColumnTransformer([('trans', Trans(), lambda x: selection)], remainder='drop') assert_array_equal(ct.fit_transform(X_array), res) assert_array_equal(ct.fit(X_array).transform(X_array), res) ct = ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])]) assert_array_equal(ct.fit_transform(X_array), X_res_both) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) assert len(ct.transformers_) == 2 # test with transformer_weights transformer_weights = {'trans1': .1, 'trans2': 10} both = ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])], transformer_weights=transformer_weights) res = np.vstack([transformer_weights['trans1'] * X_res_first1D, transformer_weights['trans2'] * X_res_second1D]).T assert_array_equal(both.fit_transform(X_array), res) assert_array_equal(both.fit(X_array).transform(X_array), res) assert len(both.transformers_) == 2 both = ColumnTransformer([('trans', Trans(), [0, 1])], transformer_weights={'trans': .1}) assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both) assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both) assert len(both.transformers_) == 1 def test_column_transformer_dataframe(): pd = pytest.importorskip('pandas') X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_df = pd.DataFrame(X_array, columns=['first', 'second']) X_res_first = np.array([0, 1, 2]).reshape(-1, 1) X_res_both = X_array cases = [ # String keys: label based # scalar ('first', X_res_first), # list (['first'], X_res_first), (['first', 'second'], X_res_both), # slice (slice('first', 'second'), X_res_both), # int keys: positional # scalar (0, X_res_first), # list ([0], X_res_first), ([0, 1], X_res_both), (np.array([0, 1]), X_res_both), # slice (slice(0, 1), X_res_first), (slice(0, 2), X_res_both), # boolean mask (np.array([True, False]), X_res_first), (pd.Series([True, False], index=['first', 'second']), X_res_first), ([True, False], X_res_first), ] for selection, res in cases: ct = ColumnTransformer([('trans', Trans(), selection)], remainder='drop') assert_array_equal(ct.fit_transform(X_df), res) assert_array_equal(ct.fit(X_df).transform(X_df), res) # callable that returns any of the allowed specifiers ct = ColumnTransformer([('trans', Trans(), lambda X: selection)], remainder='drop') assert_array_equal(ct.fit_transform(X_df), res) assert_array_equal(ct.fit(X_df).transform(X_df), res) ct = ColumnTransformer([('trans1', Trans(), ['first']), ('trans2', Trans(), ['second'])]) assert_array_equal(ct.fit_transform(X_df), X_res_both) assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] != 'remainder' ct = ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])]) assert_array_equal(ct.fit_transform(X_df), X_res_both) assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] != 'remainder' # test with transformer_weights transformer_weights = {'trans1': .1, 'trans2': 10} both = ColumnTransformer([('trans1', Trans(), ['first']), ('trans2', Trans(), ['second'])], transformer_weights=transformer_weights) res = np.vstack([transformer_weights['trans1'] * X_df['first'], transformer_weights['trans2'] * X_df['second']]).T assert_array_equal(both.fit_transform(X_df), res) assert_array_equal(both.fit(X_df).transform(X_df), res) assert len(both.transformers_) == 2 assert both.transformers_[-1][0] != 'remainder' # test multiple columns both = ColumnTransformer([('trans', Trans(), ['first', 'second'])], transformer_weights={'trans': .1}) assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) assert len(both.transformers_) == 1 assert both.transformers_[-1][0] != 'remainder' both = ColumnTransformer([('trans', Trans(), [0, 1])], transformer_weights={'trans': .1}) assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both) assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both) assert len(both.transformers_) == 1 assert both.transformers_[-1][0] != 'remainder' # ensure pandas object is passed through class TransAssert(BaseEstimator): def fit(self, X, y=None): return self def transform(self, X, y=None): assert isinstance(X, (pd.DataFrame, pd.Series)) if isinstance(X, pd.Series): X = X.to_frame() return X ct = ColumnTransformer([('trans', TransAssert(), 'first')], remainder='drop') ct.fit_transform(X_df) ct = ColumnTransformer([('trans', TransAssert(), ['first', 'second'])]) ct.fit_transform(X_df) # integer column spec + integer column names -> still use positional X_df2 = X_df.copy() X_df2.columns = [1, 0] ct = ColumnTransformer([('trans', Trans(), 0)], remainder='drop') assert_array_equal(ct.fit_transform(X_df2), X_res_first) assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][1] == 'drop' assert_array_equal(ct.transformers_[-1][2], [1]) @pytest.mark.parametrize("pandas", [True, False], ids=['pandas', 'numpy']) @pytest.mark.parametrize("column_selection", [[], np.array([False, False]), [False, False]], ids=['list', 'bool', 'bool_int']) @pytest.mark.parametrize("callable_column", [False, True]) def test_column_transformer_empty_columns(pandas, column_selection, callable_column): # test case that ensures that the column transformer does also work when # a given transformer doesn't have any columns to work on X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_res_both = X_array if pandas: pd = pytest.importorskip('pandas') X = pd.DataFrame(X_array, columns=['first', 'second']) else: X = X_array if callable_column: column = lambda X: column_selection # noqa else: column = column_selection ct = ColumnTransformer([('trans1', Trans(), [0, 1]), ('trans2', TransRaise(), column)]) assert_array_equal(ct.fit_transform(X), X_res_both) assert_array_equal(ct.fit(X).transform(X), X_res_both) assert len(ct.transformers_) == 2 assert isinstance(ct.transformers_[1][1], TransRaise) ct = ColumnTransformer([('trans1', TransRaise(), column), ('trans2', Trans(), [0, 1])]) assert_array_equal(ct.fit_transform(X), X_res_both) assert_array_equal(ct.fit(X).transform(X), X_res_both) assert len(ct.transformers_) == 2 assert isinstance(ct.transformers_[0][1], TransRaise) ct = ColumnTransformer([('trans', TransRaise(), column)], remainder='passthrough') assert_array_equal(ct.fit_transform(X), X_res_both) assert_array_equal(ct.fit(X).transform(X), X_res_both) assert len(ct.transformers_) == 2 # including remainder assert isinstance(ct.transformers_[0][1], TransRaise) fixture = np.array([[], [], []]) ct = ColumnTransformer([('trans', TransRaise(), column)], remainder='drop') assert_array_equal(ct.fit_transform(X), fixture) assert_array_equal(ct.fit(X).transform(X), fixture) assert len(ct.transformers_) == 2 # including remainder assert isinstance(ct.transformers_[0][1], TransRaise) def test_column_transformer_output_indices(): # Checks for the output_indices_ attribute X_array = np.arange(6).reshape(3, 2) ct = ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])]) X_trans = ct.fit_transform(X_array) assert ct.output_indices_ == {'trans1': slice(0, 1), 'trans2': slice(1, 2), 'remainder': slice(0, 0)} assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_['trans1']]) assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_['trans2']]) # test with transformer_weights and multiple columns ct = ColumnTransformer([('trans', Trans(), [0, 1])], transformer_weights={'trans': .1}) X_trans = ct.fit_transform(X_array) assert ct.output_indices_ == {'trans': slice(0, 2), 'remainder': slice(0, 0)} assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_['trans']]) assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_['remainder']]) # test case that ensures that the attribute does also work when # a given transformer doesn't have any columns to work on ct = ColumnTransformer([('trans1', Trans(), [0, 1]), ('trans2', TransRaise(), [])]) X_trans = ct.fit_transform(X_array) assert ct.output_indices_ == {'trans1': slice(0, 2), 'trans2': slice(0, 0), 'remainder': slice(0, 0)} assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_['trans1']]) assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_['trans2']]) assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_['remainder']]) ct = ColumnTransformer([('trans', TransRaise(), [])], remainder='passthrough') X_trans = ct.fit_transform(X_array) assert ct.output_indices_ == {'trans': slice(0, 0), 'remainder': slice(0, 2)} assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_['trans']]) assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_['remainder']]) def test_column_transformer_output_indices_df(): # Checks for the output_indices_ attribute with data frames pd = pytest.importorskip('pandas') X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=['first', 'second']) ct = ColumnTransformer([('trans1', Trans(), ['first']), ('trans2', Trans(), ['second'])]) X_trans = ct.fit_transform(X_df) assert ct.output_indices_ == {'trans1': slice(0, 1), 'trans2': slice(1, 2), 'remainder': slice(0, 0)} assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_['trans1']]) assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_['trans2']]) assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_['remainder']]) ct = ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])]) X_trans = ct.fit_transform(X_df) assert ct.output_indices_ == {'trans1': slice(0, 1), 'trans2': slice(1, 2), 'remainder': slice(0, 0)} assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_['trans1']]) assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_['trans2']]) assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_['remainder']]) def test_column_transformer_sparse_array(): X_sparse = sparse.eye(3, 2).tocsr() # no distinction between 1D and 2D X_res_first = X_sparse[:, 0] X_res_both = X_sparse for col in [0, [0], slice(0, 1)]: for remainder, res in [('drop', X_res_first), ('passthrough', X_res_both)]: ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder, sparse_threshold=0.8) assert sparse.issparse(ct.fit_transform(X_sparse)) assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res) assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res) for col in [[0, 1], slice(0, 2)]: ct = ColumnTransformer([('trans', Trans(), col)], sparse_threshold=0.8) assert sparse.issparse(ct.fit_transform(X_sparse)) assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both) assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both) def test_column_transformer_list(): X_list = [ [1, float('nan'), 'a'], [0, 0, 'b'] ] expected_result = np.array([ [1, float('nan'), 1, 0], [-1, 0, 0, 1], ]) ct = ColumnTransformer([ ('numerical', StandardScaler(), [0, 1]), ('categorical', OneHotEncoder(), [2]), ]) assert_array_equal(ct.fit_transform(X_list), expected_result) assert_array_equal(ct.fit(X_list).transform(X_list), expected_result) def test_column_transformer_sparse_stacking(): X_array = np.array([[0, 1, 2], [2, 4, 6]]).T col_trans = ColumnTransformer([('trans1', Trans(), [0]), ('trans2', SparseMatrixTrans(), 1)], sparse_threshold=0.8) col_trans.fit(X_array) X_trans = col_trans.transform(X_array) assert sparse.issparse(X_trans) assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0])) assert len(col_trans.transformers_) == 2 assert col_trans.transformers_[-1][0] != 'remainder' col_trans = ColumnTransformer([('trans1', Trans(), [0]), ('trans2', SparseMatrixTrans(), 1)], sparse_threshold=0.1) col_trans.fit(X_array) X_trans = col_trans.transform(X_array) assert not sparse.issparse(X_trans) assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1) assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0])) def test_column_transformer_mixed_cols_sparse(): df = np.array([['a', 1, True], ['b', 2, False]], dtype='O') ct = make_column_transformer( (OneHotEncoder(), [0]), ('passthrough', [1, 2]), sparse_threshold=1.0 ) # this shouldn't fail, since boolean can be coerced into a numeric # See: https://github.com/scikit-learn/scikit-learn/issues/11912 X_trans = ct.fit_transform(df) assert X_trans.getformat() == 'csr' assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]])) ct = make_column_transformer( (OneHotEncoder(), [0]), ('passthrough', [0]), sparse_threshold=1.0 ) with pytest.raises(ValueError, match="For a sparse output, all columns should"): # this fails since strings `a` and `b` cannot be # coerced into a numeric. ct.fit_transform(df) def test_column_transformer_sparse_threshold(): X_array = np.array([['a', 'b'], ['A', 'B']], dtype=object).T # above data has sparsity of 4 / 8 = 0.5 # apply threshold even if all sparse col_trans = ColumnTransformer([('trans1', OneHotEncoder(), [0]), ('trans2', OneHotEncoder(), [1])], sparse_threshold=0.2) res = col_trans.fit_transform(X_array) assert not sparse.issparse(res) assert not col_trans.sparse_output_ # mixed -> sparsity of (4 + 2) / 8 = 0.75 for thres in [0.75001, 1]: col_trans = ColumnTransformer( [('trans1', OneHotEncoder(sparse=True), [0]), ('trans2', OneHotEncoder(sparse=False), [1])], sparse_threshold=thres) res = col_trans.fit_transform(X_array) assert sparse.issparse(res) assert col_trans.sparse_output_ for thres in [0.75, 0]: col_trans = ColumnTransformer( [('trans1', OneHotEncoder(sparse=True), [0]), ('trans2', OneHotEncoder(sparse=False), [1])], sparse_threshold=thres) res = col_trans.fit_transform(X_array) assert not sparse.issparse(res) assert not col_trans.sparse_output_ # if nothing is sparse -> no sparse for thres in [0.33, 0, 1]: col_trans = ColumnTransformer( [('trans1', OneHotEncoder(sparse=False), [0]), ('trans2', OneHotEncoder(sparse=False), [1])], sparse_threshold=thres) res = col_trans.fit_transform(X_array) assert not sparse.issparse(res) assert not col_trans.sparse_output_ def test_column_transformer_error_msg_1D(): X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T col_trans = ColumnTransformer([('trans', StandardScaler(), 0)]) msg = '1D data passed to a transformer' with pytest.raises(ValueError, match=msg): col_trans.fit(X_array) with pytest.raises(ValueError, match=msg): col_trans.fit_transform(X_array) col_trans = ColumnTransformer([('trans', TransRaise(), 0)]) for func in [col_trans.fit, col_trans.fit_transform]: with pytest.raises(ValueError, match="specific message"): func(X_array) def test_2D_transformer_output(): X_array = np.array([[0, 1, 2], [2, 4, 6]]).T # if one transformer is dropped, test that name is still correct ct = ColumnTransformer([('trans1', 'drop', 0), ('trans2', TransNo2D(), 1)]) msg = "the 'trans2' transformer should be 2D" with pytest.raises(ValueError, match=msg): ct.fit_transform(X_array) # because fit is also doing transform, this raises already on fit with pytest.raises(ValueError, match=msg): ct.fit(X_array) def test_2D_transformer_output_pandas(): pd = pytest.importorskip('pandas') X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_df = pd.DataFrame(X_array, columns=['col1', 'col2']) # if one transformer is dropped, test that name is still correct ct = ColumnTransformer([('trans1', TransNo2D(), 'col1')]) msg = "the 'trans1' transformer should be 2D" with pytest.raises(ValueError, match=msg): ct.fit_transform(X_df) # because fit is also doing transform, this raises already on fit with pytest.raises(ValueError, match=msg): ct.fit(X_df) @pytest.mark.parametrize("remainder", ['drop', 'passthrough']) def test_column_transformer_invalid_columns(remainder): X_array = np.array([[0, 1, 2], [2, 4, 6]]).T # general invalid for col in [1.5, ['string', 1], slice(1, 's'), np.array([1.])]: ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder) with pytest.raises(ValueError, match="No valid specification"): ct.fit(X_array) # invalid for arrays for col in ['string', ['string', 'other'], slice('a', 'b')]: ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder) with pytest.raises(ValueError, match="Specifying the columns"): ct.fit(X_array) # transformed n_features does not match fitted n_features col = [0, 1] ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder) ct.fit(X_array) X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T msg = ("X has 3 features, but ColumnTransformer is expecting 2 features " "as input.") with pytest.raises(ValueError, match=msg): ct.transform(X_array_more) X_array_fewer = np.array([[0, 1, 2], ]).T err_msg = ("X has 1 features, but ColumnTransformer is expecting 2 " "features as input.") with pytest.raises(ValueError, match=err_msg): ct.transform(X_array_fewer) def test_column_transformer_invalid_transformer(): class NoTrans(BaseEstimator): def fit(self, X, y=None): return self def predict(self, X): return X X_array = np.array([[0, 1, 2], [2, 4, 6]]).T ct = ColumnTransformer([('trans', NoTrans(), [0])]) msg = "All estimators should implement fit and transform" with pytest.raises(TypeError, match=msg): ct.fit(X_array) def test_make_column_transformer(): scaler = StandardScaler() norm = Normalizer() ct = make_column_transformer((scaler, 'first'), (norm, ['second'])) names, transformers, columns = zip(*ct.transformers) assert names == ("standardscaler", "normalizer") assert transformers == (scaler, norm) assert columns == ('first', ['second']) def test_make_column_transformer_pandas(): pd = pytest.importorskip('pandas') X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_df = pd.DataFrame(X_array, columns=['first', 'second']) norm = Normalizer() ct1 = ColumnTransformer([('norm', Normalizer(), X_df.columns)]) ct2 = make_column_transformer((norm, X_df.columns)) assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df)) def test_make_column_transformer_kwargs(): scaler = StandardScaler() norm = Normalizer() ct = make_column_transformer((scaler, 'first'), (norm, ['second']), n_jobs=3, remainder='drop', sparse_threshold=0.5) assert ct.transformers == make_column_transformer( (scaler, 'first'), (norm, ['second'])).transformers assert ct.n_jobs == 3 assert ct.remainder == 'drop' assert ct.sparse_threshold == 0.5 # invalid keyword parameters should raise an error message msg = re.escape( "make_column_transformer() got an unexpected " "keyword argument 'transformer_weights'" ) with pytest.raises(TypeError, match=msg): make_column_transformer((scaler, 'first'), (norm, ['second']), transformer_weights={'pca': 10, 'Transf': 1}) def test_make_column_transformer_remainder_transformer(): scaler = StandardScaler() norm = Normalizer() remainder = StandardScaler() ct = make_column_transformer((scaler, 'first'), (norm, ['second']), remainder=remainder) assert ct.remainder == remainder def test_column_transformer_get_set_params(): ct = ColumnTransformer([('trans1', StandardScaler(), [0]), ('trans2', StandardScaler(), [1])]) exp = {'n_jobs': None, 'remainder': 'drop', 'sparse_threshold': 0.3, 'trans1': ct.transformers[0][1], 'trans1__copy': True, 'trans1__with_mean': True, 'trans1__with_std': True, 'trans2': ct.transformers[1][1], 'trans2__copy': True, 'trans2__with_mean': True, 'trans2__with_std': True, 'transformers': ct.transformers, 'transformer_weights': None, 'verbose': False} assert ct.get_params() == exp ct.set_params(trans1__with_mean=False) assert not ct.get_params()['trans1__with_mean'] ct.set_params(trans1='passthrough') exp = {'n_jobs': None, 'remainder': 'drop', 'sparse_threshold': 0.3, 'trans1': 'passthrough', 'trans2': ct.transformers[1][1], 'trans2__copy': True, 'trans2__with_mean': True, 'trans2__with_std': True, 'transformers': ct.transformers, 'transformer_weights': None, 'verbose': False} assert ct.get_params() == exp def test_column_transformer_named_estimators(): X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T ct = ColumnTransformer([('trans1', StandardScaler(), [0]), ('trans2', StandardScaler(with_std=False), [1])]) assert not hasattr(ct, 'transformers_') ct.fit(X_array) assert hasattr(ct, 'transformers_') assert isinstance(ct.named_transformers_['trans1'], StandardScaler) assert isinstance(ct.named_transformers_.trans1, StandardScaler) assert isinstance(ct.named_transformers_['trans2'], StandardScaler) assert isinstance(ct.named_transformers_.trans2, StandardScaler) assert not ct.named_transformers_.trans2.with_std # check it are fitted transformers assert ct.named_transformers_.trans1.mean_ == 1. def test_column_transformer_cloning(): X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T ct = ColumnTransformer([('trans', StandardScaler(), [0])]) ct.fit(X_array) assert not hasattr(ct.transformers[0][1], 'mean_') assert hasattr(ct.transformers_[0][1], 'mean_') ct = ColumnTransformer([('trans', StandardScaler(), [0])]) ct.fit_transform(X_array) assert not hasattr(ct.transformers[0][1], 'mean_') assert hasattr(ct.transformers_[0][1], 'mean_') def test_column_transformer_get_feature_names_raises(): X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T ct = ColumnTransformer([('trans', Trans(), [0, 1])]) # raise correct error when not fitted with pytest.raises(NotFittedError): ct.get_feature_names() # raise correct error when no feature names are available ct.fit(X_array) msg = r"Transformer trans \(type Trans\) does not provide " \ r"get_feature_names" with pytest.raises(AttributeError, match=msg): ct.get_feature_names() @pytest.mark.parametrize("X, keys", [ (np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], [{'c': 5}, {'c': 6}]], dtype=object).T, ('a', 'b', 'c')), (np.array([[{1: 1, 2: 2}, {1: 3, 2: 4}], [{3: 5}, {3: 6}]], dtype=object).T, ('1', '2', '3')), ]) def test_column_transformer_get_feature_names(X, keys): ct = ColumnTransformer( [('col' + str(i), DictVectorizer(), i) for i in range(2)]) ct.fit(X) assert ct.get_feature_names() == [f'col0__{key}' for key in keys[:2]] + \ [f'col1__{keys[2]}'] # drop transformer ct = ColumnTransformer( [('col0', DictVectorizer(), 0), ('col1', 'drop', 1)]) ct.fit(X) assert ct.get_feature_names() == [f'col0__{key}' for key in keys[:2]] # passthrough transformer ct = ColumnTransformer([('trans', 'passthrough', [0, 1])]) ct.fit(X) assert ct.get_feature_names() == ['x0', 'x1'] ct = ColumnTransformer([('trans', DictVectorizer(), 0)], remainder='passthrough') ct.fit(X) assert ct.get_feature_names() == [f'trans__{key}' for key in keys[:2]] + \ ['x1'] ct = ColumnTransformer([('trans', 'passthrough', [1])], remainder='passthrough') ct.fit(X) assert ct.get_feature_names() == ['x1', 'x0'] ct = ColumnTransformer([('trans', 'passthrough', lambda x: [1])], remainder='passthrough') ct.fit(X) assert ct.get_feature_names() == ['x1', 'x0'] ct = ColumnTransformer([('trans', 'passthrough', np.array([False, True]))], remainder='passthrough') ct.fit(X) assert ct.get_feature_names() == ['x1', 'x0'] ct = ColumnTransformer([('trans', 'passthrough', slice(1, 2))], remainder='passthrough') ct.fit(X) assert ct.get_feature_names() == ['x1', 'x0'] def test_column_transformer_get_feature_names_dataframe(): # passthough transformer with a dataframe pd = pytest.importorskip('pandas') X = np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], [{'c': 5}, {'c': 6}]], dtype=object).T X_df = pd.DataFrame(X, columns=['col0', 'col1']) ct = ColumnTransformer([('trans', 'passthrough', ['col0', 'col1'])]) ct.fit(X_df) assert ct.get_feature_names() == ['col0', 'col1'] ct = ColumnTransformer([('trans', 'passthrough', [0, 1])]) ct.fit(X_df) assert ct.get_feature_names() == ['col0', 'col1'] ct = ColumnTransformer([('col0', DictVectorizer(), 0)], remainder='passthrough') ct.fit(X_df) assert ct.get_feature_names() == ['col0__a', 'col0__b', 'col1'] ct = ColumnTransformer([('trans', 'passthrough', ['col1'])], remainder='passthrough') ct.fit(X_df) assert ct.get_feature_names() == ['col1', 'col0'] ct = ColumnTransformer([('trans', 'passthrough', lambda x: x[['col1']].columns)], remainder='passthrough') ct.fit(X_df) assert ct.get_feature_names() == ['col1', 'col0'] ct = ColumnTransformer([('trans', 'passthrough', np.array([False, True]))], remainder='passthrough') ct.fit(X_df) assert ct.get_feature_names() == ['col1', 'col0'] ct = ColumnTransformer([('trans', 'passthrough', slice(1, 2))], remainder='passthrough') ct.fit(X_df) assert ct.get_feature_names() == ['col1', 'col0'] ct = ColumnTransformer([('trans', 'passthrough', [1])], remainder='passthrough') ct.fit(X_df) assert ct.get_feature_names() == ['col1', 'col0'] def test_column_transformer_special_strings(): # one 'drop' -> ignore X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T ct = ColumnTransformer( [('trans1', Trans(), [0]), ('trans2', 'drop', [1])]) exp = np.array([[0.], [1.], [2.]]) assert_array_equal(ct.fit_transform(X_array), exp) assert_array_equal(ct.fit(X_array).transform(X_array), exp) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] != 'remainder' # all 'drop' -> return shape 0 array ct = ColumnTransformer( [('trans1', 'drop', [0]), ('trans2', 'drop', [1])]) assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0)) assert_array_equal(ct.fit_transform(X_array).shape, (3, 0)) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] != 'remainder' # 'passthrough' X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T ct = ColumnTransformer( [('trans1', Trans(), [0]), ('trans2', 'passthrough', [1])]) exp = X_array assert_array_equal(ct.fit_transform(X_array), exp) assert_array_equal(ct.fit(X_array).transform(X_array), exp) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] != 'remainder' # None itself / other string is not valid for val in [None, 'other']: ct = ColumnTransformer( [('trans1', Trans(), [0]), ('trans2', None, [1])]) msg = "All estimators should implement" with pytest.raises(TypeError, match=msg): ct.fit_transform(X_array) with pytest.raises(TypeError, match=msg): ct.fit(X_array) def test_column_transformer_remainder(): X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_res_first = np.array([0, 1, 2]).reshape(-1, 1) X_res_second = np.array([2, 4, 6]).reshape(-1, 1) X_res_both = X_array # default drop ct = ColumnTransformer([('trans1', Trans(), [0])]) assert_array_equal(ct.fit_transform(X_array), X_res_first) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][1] == 'drop' assert_array_equal(ct.transformers_[-1][2], [1]) # specify passthrough ct = ColumnTransformer([('trans', Trans(), [0])], remainder='passthrough') assert_array_equal(ct.fit_transform(X_array), X_res_both) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][1] == 'passthrough' assert_array_equal(ct.transformers_[-1][2], [1]) # column order is not preserved (passed through added to end) ct = ColumnTransformer([('trans1', Trans(), [1])], remainder='passthrough') assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1]) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1]) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][1] == 'passthrough' assert_array_equal(ct.transformers_[-1][2], [0]) # passthrough when all actual transformers are skipped ct = ColumnTransformer([('trans1', 'drop', [0])], remainder='passthrough') assert_array_equal(ct.fit_transform(X_array), X_res_second) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][1] == 'passthrough' assert_array_equal(ct.transformers_[-1][2], [1]) # error on invalid arg ct = ColumnTransformer([('trans1', Trans(), [0])], remainder=1) msg = ( "remainder keyword needs to be one of \'drop\', \'passthrough\', " "or estimator." ) with pytest.raises(ValueError, match=msg): ct.fit(X_array) with pytest.raises(ValueError, match=msg): ct.fit_transform(X_array) # check default for make_column_transformer ct = make_column_transformer((Trans(), [0])) assert ct.remainder == 'drop' @pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1), np.array([True, False])]) def test_column_transformer_remainder_numpy(key): # test different ways that columns are specified with passthrough X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_res_both = X_array ct = ColumnTransformer([('trans1', Trans(), key)], remainder='passthrough') assert_array_equal(ct.fit_transform(X_array), X_res_both) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][1] == 'passthrough' assert_array_equal(ct.transformers_[-1][2], [1]) @pytest.mark.parametrize( "key", [[0], slice(0, 1), np.array([True, False]), ['first'], 'pd-index', np.array(['first']), np.array(['first'], dtype=object), slice(None, 'first'), slice('first', 'first')]) def test_column_transformer_remainder_pandas(key): # test different ways that columns are specified with passthrough pd = pytest.importorskip('pandas') if isinstance(key, str) and key == 'pd-index': key = pd.Index(['first']) X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_df = pd.DataFrame(X_array, columns=['first', 'second']) X_res_both = X_array ct = ColumnTransformer([('trans1', Trans(), key)], remainder='passthrough') assert_array_equal(ct.fit_transform(X_df), X_res_both) assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][1] == 'passthrough' assert_array_equal(ct.transformers_[-1][2], [1]) @pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1), np.array([True, False, False])]) def test_column_transformer_remainder_transformer(key): X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T X_res_both = X_array.copy() # second and third columns are doubled when remainder = DoubleTrans X_res_both[:, 1:3] *= 2 ct = ColumnTransformer([('trans1', Trans(), key)], remainder=DoubleTrans()) assert_array_equal(ct.fit_transform(X_array), X_res_both) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert isinstance(ct.transformers_[-1][1], DoubleTrans) assert_array_equal(ct.transformers_[-1][2], [1, 2]) def test_column_transformer_no_remaining_remainder_transformer(): X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T ct = ColumnTransformer([('trans1', Trans(), [0, 1, 2])], remainder=DoubleTrans()) assert_array_equal(ct.fit_transform(X_array), X_array) assert_array_equal(ct.fit(X_array).transform(X_array), X_array) assert len(ct.transformers_) == 1 assert ct.transformers_[-1][0] != 'remainder' def test_column_transformer_drops_all_remainder_transformer(): X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T # columns are doubled when remainder = DoubleTrans X_res_both = 2 * X_array.copy()[:, 1:3] ct = ColumnTransformer([('trans1', 'drop', [0])], remainder=DoubleTrans()) assert_array_equal(ct.fit_transform(X_array), X_res_both) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert isinstance(ct.transformers_[-1][1], DoubleTrans) assert_array_equal(ct.transformers_[-1][2], [1, 2]) def test_column_transformer_sparse_remainder_transformer(): X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T ct = ColumnTransformer([('trans1', Trans(), [0])], remainder=SparseMatrixTrans(), sparse_threshold=0.8) X_trans = ct.fit_transform(X_array) assert sparse.issparse(X_trans) # SparseMatrixTrans creates 3 features for each column. There is # one column in ``transformers``, thus: assert X_trans.shape == (3, 3 + 1) exp_array = np.hstack( (X_array[:, 0].reshape(-1, 1), np.eye(3))) assert_array_equal(X_trans.toarray(), exp_array) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) assert_array_equal(ct.transformers_[-1][2], [1, 2]) def test_column_transformer_drop_all_sparse_remainder_transformer(): X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T ct = ColumnTransformer([('trans1', 'drop', [0])], remainder=SparseMatrixTrans(), sparse_threshold=0.8) X_trans = ct.fit_transform(X_array) assert sparse.issparse(X_trans) # SparseMatrixTrans creates 3 features for each column, thus: assert X_trans.shape == (3, 3) assert_array_equal(X_trans.toarray(), np.eye(3)) assert len(ct.transformers_) == 2 assert ct.transformers_[-1][0] == 'remainder' assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans) assert_array_equal(ct.transformers_[-1][2], [1, 2]) def test_column_transformer_get_set_params_with_remainder(): ct = ColumnTransformer([('trans1', StandardScaler(), [0])], remainder=StandardScaler()) exp = {'n_jobs': None, 'remainder': ct.remainder, 'remainder__copy': True, 'remainder__with_mean': True, 'remainder__with_std': True, 'sparse_threshold': 0.3, 'trans1': ct.transformers[0][1], 'trans1__copy': True, 'trans1__with_mean': True, 'trans1__with_std': True, 'transformers': ct.transformers, 'transformer_weights': None, 'verbose': False} assert ct.get_params() == exp ct.set_params(remainder__with_std=False) assert not ct.get_params()['remainder__with_std'] ct.set_params(trans1='passthrough') exp = {'n_jobs': None, 'remainder': ct.remainder, 'remainder__copy': True, 'remainder__with_mean': True, 'remainder__with_std': False, 'sparse_threshold': 0.3, 'trans1': 'passthrough', 'transformers': ct.transformers, 'transformer_weights': None, 'verbose': False} assert ct.get_params() == exp def test_column_transformer_no_estimators(): X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).astype('float').T ct = ColumnTransformer([], remainder=StandardScaler()) params = ct.get_params() assert params['remainder__with_mean'] X_trans = ct.fit_transform(X_array) assert X_trans.shape == X_array.shape assert len(ct.transformers_) == 1 assert ct.transformers_[-1][0] == 'remainder' assert ct.transformers_[-1][2] == [0, 1, 2] @pytest.mark.parametrize( ['est', 'pattern'], [(ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])], remainder=DoubleTrans()), (r'\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n' r'\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n' r'\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$' )), (ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])], remainder='passthrough'), (r'\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n' r'\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n' r'\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$' )), (ColumnTransformer([('trans1', Trans(), [0]), ('trans2', 'drop', [1])], remainder='passthrough'), (r'\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n' r'\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$' )), (ColumnTransformer([('trans1', Trans(), [0]), ('trans2', 'passthrough', [1])], remainder='passthrough'), (r'\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n' r'\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n' r'\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$' )), (ColumnTransformer([('trans1', Trans(), [0])], remainder='passthrough'), (r'\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n' r'\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$' )), (ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])], remainder='drop'), (r'\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n' r'\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$')), (ColumnTransformer([('trans1', Trans(), [0])], remainder='drop'), (r'\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$'))]) @pytest.mark.parametrize('method', ['fit', 'fit_transform']) def test_column_transformer_verbose(est, pattern, method, capsys): X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T func = getattr(est, method) est.set_params(verbose=False) func(X_array) assert not capsys.readouterr().out, 'Got output for verbose=False' est.set_params(verbose=True) func(X_array) assert re.match(pattern, capsys.readouterr()[0]) def test_column_transformer_no_estimators_set_params(): ct = ColumnTransformer([]).set_params(n_jobs=2) assert ct.n_jobs == 2 def test_column_transformer_callable_specifier(): # assert that function gets the full array X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_res_first = np.array([[0, 1, 2]]).T def func(X): assert_array_equal(X, X_array) return [0] ct = ColumnTransformer([('trans', Trans(), func)], remainder='drop') assert_array_equal(ct.fit_transform(X_array), X_res_first) assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first) assert callable(ct.transformers[0][2]) assert ct.transformers_[0][2] == [0] def test_column_transformer_callable_specifier_dataframe(): # assert that function gets the full dataframe pd = pytest.importorskip('pandas') X_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_res_first = np.array([[0, 1, 2]]).T X_df = pd.DataFrame(X_array, columns=['first', 'second']) def func(X): assert_array_equal(X.columns, X_df.columns) assert_array_equal(X.values, X_df.values) return ['first'] ct = ColumnTransformer([('trans', Trans(), func)], remainder='drop') assert_array_equal(ct.fit_transform(X_df), X_res_first) assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first) assert callable(ct.transformers[0][2]) assert ct.transformers_[0][2] == ['first'] def test_column_transformer_negative_column_indexes(): X = np.random.randn(2, 2) X_categories = np.array([[1], [2]]) X = np.concatenate([X, X_categories], axis=1) ohe = OneHotEncoder() tf_1 = ColumnTransformer([('ohe', ohe, [-1])], remainder='passthrough') tf_2 = ColumnTransformer([('ohe', ohe, [2])], remainder='passthrough') assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X)) @pytest.mark.parametrize("array_type", [np.asarray, sparse.csr_matrix]) def test_column_transformer_mask_indexing(array_type): # Regression test for #14510 # Boolean array-like does not behave as boolean array with NumPy < 1.12 # and sparse matrices as well X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]]) X = array_type(X) column_transformer = ColumnTransformer( [('identity', FunctionTransformer(), [False, True, False, True])] ) X_trans = column_transformer.fit_transform(X) assert X_trans.shape == (3, 2) def test_n_features_in(): # make sure n_features_in is what is passed as input to the column # transformer. X = [[1, 2], [3, 4], [5, 6]] ct = ColumnTransformer([('a', DoubleTrans(), [0]), ('b', DoubleTrans(), [1])]) assert not hasattr(ct, 'n_features_in_') ct.fit(X) assert ct.n_features_in_ == 2 @pytest.mark.parametrize('cols, pattern, include, exclude', [ (['col_int', 'col_float'], None, np.number, None), (['col_int', 'col_float'], None, None, object), (['col_int', 'col_float'], None, [int, float], None), (['col_str'], None, [object], None), (['col_str'], None, object, None), (['col_float'], None, float, None), (['col_float'], 'at$', [np.number], None), (['col_int'], None, [int], None), (['col_int'], '^col_int', [np.number], None), (['col_float', 'col_str'], 'float|str', None, None), (['col_str'], '^col_s', None, [int]), ([], 'str$', float, None), (['col_int', 'col_float', 'col_str'], None, [np.number, object], None), ]) def test_make_column_selector_with_select_dtypes(cols, pattern, include, exclude): pd = pytest.importorskip('pandas') X_df = pd.DataFrame({ 'col_int': np.array([0, 1, 2], dtype=int), 'col_float': np.array([0.0, 1.0, 2.0], dtype=float), 'col_str': ["one", "two", "three"], }, columns=['col_int', 'col_float', 'col_str']) selector = make_column_selector( dtype_include=include, dtype_exclude=exclude, pattern=pattern) assert_array_equal(selector(X_df), cols) def test_column_transformer_with_make_column_selector(): # Functional test for column transformer + column selector pd = pytest.importorskip('pandas') X_df = pd.DataFrame({ 'col_int': np.array([0, 1, 2], dtype=int), 'col_float': np.array([0.0, 1.0, 2.0], dtype=float), 'col_cat': ["one", "two", "one"], 'col_str': ["low", "middle", "high"] }, columns=['col_int', 'col_float', 'col_cat', 'col_str']) X_df['col_str'] = X_df['col_str'].astype('category') cat_selector = make_column_selector(dtype_include=['category', object]) num_selector = make_column_selector(dtype_include=np.number) ohe = OneHotEncoder() scaler = StandardScaler() ct_selector = make_column_transformer((ohe, cat_selector), (scaler, num_selector)) ct_direct = make_column_transformer((ohe, ['col_cat', 'col_str']), (scaler, ['col_float', 'col_int'])) X_selector = ct_selector.fit_transform(X_df) X_direct = ct_direct.fit_transform(X_df) assert_allclose(X_selector, X_direct) def test_make_column_selector_error(): selector = make_column_selector(dtype_include=np.number) X = np.array([[0.1, 0.2]]) msg = ("make_column_selector can only be applied to pandas dataframes") with pytest.raises(ValueError, match=msg): selector(X) def test_make_column_selector_pickle(): pd = pytest.importorskip('pandas') X_df = pd.DataFrame({ 'col_int': np.array([0, 1, 2], dtype=int), 'col_float': np.array([0.0, 1.0, 2.0], dtype=float), 'col_str': ["one", "two", "three"], }, columns=['col_int', 'col_float', 'col_str']) selector = make_column_selector(dtype_include=[object]) selector_picked = pickle.loads(pickle.dumps(selector)) assert_array_equal(selector(X_df), selector_picked(X_df)) @pytest.mark.parametrize( 'empty_col', [[], np.array([], dtype=int), lambda x: []], ids=['list', 'array', 'callable'] ) def test_feature_names_empty_columns(empty_col): pd = pytest.importorskip('pandas') df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]}) ct = ColumnTransformer( transformers=[ ("ohe", OneHotEncoder(), ["col1", "col2"]), ("empty_features", OneHotEncoder(), empty_col), ], ) ct.fit(df) assert ct.get_feature_names() == ['ohe__x0_a', 'ohe__x0_b', 'ohe__x1_z'] @pytest.mark.parametrize('remainder', ["passthrough", StandardScaler()]) def test_sk_visual_block_remainder(remainder): # remainder='passthrough' or an estimator will be shown in repr_html ohe = OneHotEncoder() ct = ColumnTransformer(transformers=[('ohe', ohe, ["col1", "col2"])], remainder=remainder) visual_block = ct._sk_visual_block_() assert visual_block.names == ('ohe', 'remainder') assert visual_block.name_details == (['col1', 'col2'], '') assert visual_block.estimators == (ohe, remainder) def test_sk_visual_block_remainder_drop(): # remainder='drop' is not shown in repr_html ohe = OneHotEncoder() ct = ColumnTransformer(transformers=[('ohe', ohe, ["col1", "col2"])]) visual_block = ct._sk_visual_block_() assert visual_block.names == ('ohe',) assert visual_block.name_details == (['col1', 'col2'],) assert visual_block.estimators == (ohe,) @pytest.mark.parametrize('remainder', ["passthrough", StandardScaler()]) def test_sk_visual_block_remainder_fitted_pandas(remainder): # Remainder shows the columns after fitting pd = pytest.importorskip('pandas') ohe = OneHotEncoder() ct = ColumnTransformer(transformers=[('ohe', ohe, ["col1", "col2"])], remainder=remainder) df = pd.DataFrame({"col1": ["a", "b", "c"], "col2": ["z", "z", "z"], "col3": [1, 2, 3], "col4": [3, 4, 5]}) ct.fit(df) visual_block = ct._sk_visual_block_() assert visual_block.names == ('ohe', 'remainder') assert visual_block.name_details == (['col1', 'col2'], ['col3', 'col4']) assert visual_block.estimators == (ohe, remainder) @pytest.mark.parametrize('remainder', ["passthrough", StandardScaler()]) def test_sk_visual_block_remainder_fitted_numpy(remainder): # Remainder shows the indices after fitting X = np.array([[1, 2, 3], [4, 5, 6]], dtype=float) scaler = StandardScaler() ct = ColumnTransformer(transformers=[('scale', scaler, [0, 2])], remainder=remainder) ct.fit(X) visual_block = ct._sk_visual_block_() assert visual_block.names == ('scale', 'remainder') assert visual_block.name_details == ([0, 2], [1]) assert visual_block.estimators == (scaler, remainder) @pytest.mark.parametrize("explicit_colname", ['first', 'second', 0, 1]) @pytest.mark.parametrize("remainder", [Trans(), 'passthrough', 'drop']) def test_column_transformer_reordered_column_names_remainder(explicit_colname, remainder): """Test the interaction between remainder and column transformer""" pd = pytest.importorskip('pandas') X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T X_fit_df = pd.DataFrame(X_fit_array, columns=['first', 'second']) X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T X_trans_df = pd.DataFrame(X_trans_array, columns=['second', 'first']) tf = ColumnTransformer([('bycol', Trans(), explicit_colname)], remainder=remainder) tf.fit(X_fit_df) X_fit_trans = tf.transform(X_fit_df) # Changing the order still works X_trans = tf.transform(X_trans_df) assert_allclose(X_trans, X_fit_trans) # extra columns are ignored X_extended_df = X_fit_df.copy() X_extended_df['third'] = [3, 6, 9] X_trans = tf.transform(X_extended_df) assert_allclose(X_trans, X_fit_trans) if isinstance(explicit_colname, str): # Raise error if columns are specified by names but input only allows # to specify by position, e.g. numpy array instead of a pandas df. X_array = X_fit_array.copy() err_msg = 'Specifying the columns' with pytest.raises(ValueError, match=err_msg): tf.transform(X_array) def test_feature_name_validation_missing_columns_drop_passthough(): """Test the interaction between {'drop', 'passthrough'} and missing column names.""" pd = pytest.importorskip("pandas") X = np.ones(shape=(3, 4)) df = pd.DataFrame(X, columns=['a', 'b', 'c', 'd']) df_dropped = df.drop('c', axis=1) # with remainder='passthrough', all columns seen during `fit` must be # present tf = ColumnTransformer([('bycol', Trans(), [1])], remainder='passthrough') tf.fit(df) msg = r"columns are missing: {'c'}" with pytest.raises(ValueError, match=msg): tf.transform(df_dropped) # with remainder='drop', it is allowed to have column 'c' missing tf = ColumnTransformer([('bycol', Trans(), [1])], remainder='drop') tf.fit(df) df_dropped_trans = tf.transform(df_dropped) df_fit_trans = tf.transform(df) assert_allclose(df_dropped_trans, df_fit_trans) # bycol drops 'c', thus it is allowed for 'c' to be missing tf = ColumnTransformer([('bycol', 'drop', ['c'])], remainder='passthrough') tf.fit(df) df_dropped_trans = tf.transform(df_dropped) df_fit_trans = tf.transform(df) assert_allclose(df_dropped_trans, df_fit_trans) @pytest.mark.parametrize("selector", [[], [False, False]]) def test_get_feature_names_empty_selection(selector): """Test that get_feature_names is only called for transformers that were selected. Non-regression test for #19550. """ ct = ColumnTransformer([('ohe', OneHotEncoder(drop='first'), selector)]) ct.fit([[1, 2], [3, 4]]) assert ct.get_feature_names() == []
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding index on 'Comment', fields ['visibility'] db.create_index('canvas_comment', ['visibility']) # Adding index on 'Category', fields ['visibility'] db.create_index('canvas_category', ['visibility']) # Adding index on 'Content', fields ['visibility'] db.create_index('canvas_content', ['visibility']) def backwards(self, orm): # Removing index on 'Content', fields ['visibility'] db.delete_index('canvas_content', ['visibility']) # Removing index on 'Category', fields ['visibility'] db.delete_index('canvas_category', ['visibility']) # Removing index on 'Comment', fields ['visibility'] db.delete_index('canvas_comment', ['visibility']) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'canvas.apiapp': { 'Meta': {'object_name': 'APIApp'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'canvas.apiauthtoken': { 'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'}, 'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.bestof': { 'Meta': {'object_name': 'BestOf'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}), 'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}) }, 'canvas.category': { 'Meta': {'object_name': 'Category'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}), 'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'canvas.comment': { 'Meta': {'object_name': 'Comment'}, 'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}), 'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}), 'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}), 'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'canvas.commentflag': { 'Meta': {'object_name': 'CommentFlag'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'type_id': ('django.db.models.fields.IntegerField', [], {}), 'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"}) }, 'canvas.commentmoderationlog': { 'Meta': {'object_name': 'CommentModerationLog'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'note': ('django.db.models.fields.TextField', [], {}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}), 'visibility': ('django.db.models.fields.IntegerField', [], {}) }, 'canvas.commentpin': { 'Meta': {'object_name': 'CommentPin'}, 'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.commentsticker': { 'Meta': {'object_name': 'CommentSticker'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}), 'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'type_id': ('django.db.models.fields.IntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'canvas.commentstickerlog': { 'Meta': {'object_name': 'CommentStickerLog'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.content': { 'Meta': {'object_name': 'Content'}, 'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}), 'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}), 'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'canvas.contenturlmapping': { 'Meta': {'object_name': 'ContentUrlMapping'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'canvas.emailunsubscribe': { 'Meta': {'object_name': 'EmailUnsubscribe'}, 'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'canvas.externalcontent': { 'Meta': {'object_name': 'ExternalContent'}, '_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}), 'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'}) }, 'canvas.facebookinvite': { 'Meta': {'object_name': 'FacebookInvite'}, 'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}) }, 'canvas.facebookuser': { 'Meta': {'object_name': 'FacebookUser'}, 'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, 'canvas.followcategory': { 'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"}) }, 'canvas.invitecode': { 'Meta': {'object_name': 'InviteCode'}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}) }, 'canvas.remixplugin': { 'Meta': {'object_name': 'RemixPlugin'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}) }, 'canvas.stashcontent': { 'Meta': {'object_name': 'StashContent'}, 'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'canvas.userinfo': { 'Meta': {'object_name': 'UserInfo'}, 'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']", 'null': 'True'}), 'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']", 'null': 'True'}), 'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}), 'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'canvas.usermoderationlog': { 'Meta': {'object_name': 'UserModerationLog'}, 'action': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'note': ('django.db.models.fields.TextField', [], {}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"}) }, 'canvas.userwarning': { 'Meta': {'object_name': 'UserWarning'}, 'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}), 'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'custom_message': ('django.db.models.fields.TextField', [], {}), 'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'issued': ('canvas.util.UnixTimestampField', [], {}), 'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}), 'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'}) }, 'canvas.welcomeemailrecipient': { 'Meta': {'object_name': 'WelcomeEmailRecipient'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'canvas_auth.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'} }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['canvas']
#!/usr/bin/python # Copyright (c) 2015 Matthew Earl # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN # NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE # USE OR OTHER DEALINGS IN THE SOFTWARE. """ See the following post for a description of the code: http://matthewearl.github.io/2015/12/10/gchq-xmas-card/ To run the script you'll need to install `pycosat`. Invoke with: ./gchq-xmas.py Modify the variables below to have the script work on other Nonogram puzzles. """ import pycosat # Problem definition. WIDTH = 25 HEIGHT = 25 ROW_RUNS = [ [7, 3, 1, 1, 7,], [1, 1, 2, 2, 1, 1,], [1, 3, 1, 3, 1, 1, 3, 1,], [1, 3, 1, 1, 6, 1, 3, 1,], [1, 3, 1, 5, 2, 1, 3, 1,], [1, 1, 2, 1, 1,], [7, 1, 1, 1, 1, 1, 7,], [3, 3,], [1, 2, 3, 1, 1, 3, 1, 1, 2,], [1, 1, 3, 2, 1, 1,], [4, 1, 4, 2, 1, 2,], [1, 1, 1, 1, 1, 4, 1, 3,], [2, 1, 1, 1, 2, 5,], [3, 2, 2, 6, 3, 1,], [1, 9, 1, 1, 2, 1,], [2, 1, 2, 2, 3, 1,], [3, 1, 1, 1, 1, 5, 1,], [1, 2, 2, 5,], [7, 1, 2, 1, 1, 1, 3,], [1, 1, 2, 1, 2, 2, 1,], [1, 3, 1, 4, 5, 1,], [1, 3, 1, 3, 10, 2,], [1, 3, 1, 1, 6, 6,], [1, 1, 2, 1, 1, 2,], [7, 2, 1, 2, 5,], ] COL_RUNS = [ [7, 2, 1, 1, 7,], [1, 1, 2, 2, 1, 1,], [1, 3, 1, 3, 1, 3, 1, 3, 1,], [1, 3, 1, 1, 5, 1, 3, 1,], [1, 3, 1, 1, 4, 1, 3, 1,], [1, 1, 1, 2, 1, 1,], [7, 1, 1, 1, 1, 1, 7,], [1, 1, 3,], [2, 1, 2, 1, 8, 2, 1,], [2, 2, 1, 2, 1, 1, 1, 2,], [1, 7, 3, 2, 1,], [1, 2, 3, 1, 1, 1, 1, 1,], [4, 1, 1, 2, 6,], [3, 3, 1, 1, 1, 3, 1,], [1, 2, 5, 2, 2,], [2, 2, 1, 1, 1, 1, 1, 2, 1,], [1, 3, 3, 2, 1, 8, 1,], [6, 2, 1,], [7, 1, 4, 1, 1, 3,], [1, 1, 1, 1, 4,], [1, 3, 1, 3, 7, 1,], [1, 3, 1, 1, 1, 2, 1, 1, 4,], [1, 3, 1, 4, 3, 3,], [1, 1, 2, 2, 2, 6, 1,], [7, 1, 3, 2, 1, 1,], ] GIVENS = [ (3, 3), (3, 4), (3, 12), (3, 13), (3, 21), (8, 6), (8, 7), (8, 10), (8, 14), (8, 15), (8, 18), (16, 6), (16, 11), (16, 16), (16, 20), (21, 3), (21, 4), (21, 9), (21, 10), (21, 15), (21, 20), (21, 21), ] assert len(COL_RUNS) >= WIDTH assert len(ROW_RUNS) >= HEIGHT # Utility classes and functions. class Var(object): _num_vars = 0 idx_to_var = {} @staticmethod def _add_var(var): Var._num_vars += 1 idx = Var._num_vars Var.idx_to_var[idx] = var return idx def __init__(self, label): self.idx = self._add_var(self) self.label = label def __repr__(self): return "{}(idx={!r}, name={!r}".format(type(self).__name__, self.idx, self.label) def __str__(self): return self.label class ShadedVar(Var): def __init__(self, row, col): super(ShadedVar, self).__init__("Shaded @ {}, {}".format(row, col)) self.row = row self.col = col def pretty_print_solution(sol): positive_indices = {t for t in sol if t > 0} for row in range(HEIGHT): print "".join(".#"[shaded_vars[row, col].idx in positive_indices] for col in range(WIDTH)) print # Variable definitions. shaded_vars = {(row, col): ShadedVar(row, col) for row in range(HEIGHT) for col in range(WIDTH)} row_run_vars = { (row, run_idx, start_col): Var("Row,run {},{} starts at col {}".format( row, run_idx, start_col)) for row in range(HEIGHT) for run_idx in range(len(ROW_RUNS[row])) for start_col in range(WIDTH) } col_run_vars = { (col, run_idx, start_row): Var("Col,run {},{} starts at row {}".format( col, run_idx, start_row)) for col in range(WIDTH) for run_idx in range(len(COL_RUNS[col])) for start_row in range(HEIGHT) } # Functions for generating clauses. # A row run being present at a particular column implies the corresponding # cells are shaded. def row_run_implies_shaded(): clauses = [] for (row, run_idx, start_col), run_var in row_run_vars.items(): run_len = ROW_RUNS[row][run_idx] for col in range(start_col, min(start_col + run_len, WIDTH)): clauses.append([-run_var.idx, shaded_vars[row, col].idx]) return clauses # Similar for column runs. def col_run_implies_shaded(): clauses = [] for (col, run_idx, start_row), run_var in col_run_vars.items(): run_len = COL_RUNS[col][run_idx] for row in range(start_row, min(start_row + run_len, HEIGHT)): clauses.append([-run_var.idx, shaded_vars[row, col].idx]) return clauses # Conversely, a cell being shaded implies a row run must exist that covers it. def shaded_implies_row_run(): clauses = [] for (row, col), shaded_var in shaded_vars.items(): clause = [-shaded_var.idx] for run_idx, run_len in enumerate(ROW_RUNS[row]): clause += [row_run_vars[row, run_idx, start_col].idx for start_col in range(max(0, col - run_len + 1), col + 1)] clauses.append(clause) return clauses # Similarly for column runs. def shaded_implies_col_run(): clauses = [] for (row, col), shaded_var in shaded_vars.items(): clause = [-shaded_var.idx] for run_idx, run_len in enumerate(COL_RUNS[col]): clause += [col_run_vars[col, run_idx, start_row].idx for start_row in range(max(0, row - run_len + 1), row + 1)] clauses.append(clause) return clauses # A row run being in a particular position means the next row run can't be in # an earlier position. def row_run_ordering(): clauses = [] for (row, run_idx, start_col), run_var in row_run_vars.items(): if run_idx < len(ROW_RUNS[row]) - 1: first_valid_col = start_col + ROW_RUNS[row][run_idx] + 1 for other_start_col in range(min(first_valid_col, WIDTH)): other_run_var = row_run_vars[row, run_idx + 1, other_start_col] clauses.append([-run_var.idx, -other_run_var.idx]) return clauses # Similarly for column runs. def col_run_ordering(): clauses = [] for (col, run_idx, start_row), run_var in col_run_vars.items(): if run_idx < len(COL_RUNS[col]) - 1: first_valid_row = start_row + COL_RUNS[col][run_idx] + 1 for other_start_row in range(min(first_valid_row, HEIGHT)): other_run_var = col_run_vars[col, run_idx + 1, other_start_row] clauses.append([-run_var.idx, -other_run_var.idx]) return clauses # A row run can only be in at most one position. def row_run_at_most_one_position(): clauses = [] for (row, run_idx, start_col), run_var in row_run_vars.items(): for other_start_col in range(WIDTH): if other_start_col != start_col: other_run_var = row_run_vars[row, run_idx, other_start_col] clauses.append([-run_var.idx, -other_run_var.idx]) return clauses # Similarly for column runs. def col_run_at_most_one_position(): clauses = [] for (col, run_idx, start_row), run_var in col_run_vars.items(): for other_start_row in range(HEIGHT): if other_start_row != start_row: other_run_var = col_run_vars[col, run_idx, other_start_row] clauses.append([-run_var.idx, -other_run_var.idx]) return clauses # Each row run must be in at least one position. def row_run_at_least_one_position(): clauses = [] for row in range(HEIGHT): for run_idx, run_len in enumerate(ROW_RUNS[row]): clause = [] for start_col in range(WIDTH): clause.append(row_run_vars[row, run_idx, start_col].idx) clauses.append(clause) return clauses # Similarly for column runs. def col_run_at_least_one_position(): clauses = [] for col in range(WIDTH): for run_idx, run_len in enumerate(COL_RUNS[col]): clause = [] for start_row in range(HEIGHT): clause.append(col_run_vars[col, run_idx, start_row].idx) clauses.append(clause) return clauses # Exclude invalid row run positions. def exclude_invalid_row_run_positions(): clauses = [] for row in range(HEIGHT): for run_idx, run_len in enumerate(ROW_RUNS[row]): for start_col in range(WIDTH - run_len + 1, WIDTH): clauses.append([-row_run_vars[row, run_idx, start_col].idx]) return clauses # Similarly for column runs. def exclude_invalid_col_run_positions(): clauses = [] for col in range(WIDTH): for run_idx, run_len in enumerate(COL_RUNS[col]): for start_row in range(HEIGHT - run_len + 1, HEIGHT): clauses.append([-col_run_vars[col, run_idx, start_row].idx]) return clauses # Ensure the given cells are shaded. def fix_givens(): clauses = [] for row, col in GIVENS: clauses.append([shaded_vars[row, col].idx]) return clauses # Put together all the clauses, and then find the results. all_clauses = ( row_run_implies_shaded() + col_run_implies_shaded() + shaded_implies_row_run() + shaded_implies_col_run() + row_run_ordering() + col_run_ordering() + row_run_at_most_one_position() + col_run_at_most_one_position() + row_run_at_least_one_position() + col_run_at_least_one_position() + exclude_invalid_row_run_positions() + exclude_invalid_col_run_positions() + fix_givens() ) for sol_idx, sol in enumerate(pycosat.itersolve(all_clauses)): pretty_print_solution(sol)
#!/usr/bin/env python # # Copyright 2008 Google Inc. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import optparse import os from os.path import abspath, join, dirname import sys import subprocess ENABLED_LINT_RULES = """ build/class build/deprecated build/endif_comment build/forward_decl build/include_order build/include_what_you_use build/printf_format build/storage_class legal/copyright readability/boost readability/braces readability/casting readability/check readability/constructors readability/fn_size readability/function readability/multiline_comment readability/multiline_string readability/streams readability/todo readability/utf8 runtime/arrays runtime/casting runtime/deprecated_fn runtime/explicit runtime/int runtime/memset runtime/mutex runtime/nonconf runtime/printf runtime/printf_format runtime/references runtime/rtti runtime/sizeof runtime/string runtime/virtual runtime/vlog whitespace/blank_line whitespace/braces whitespace/comma whitespace/comments whitespace/end_of_line whitespace/ending_newline whitespace/indent whitespace/labels whitespace/line_length whitespace/newline whitespace/operators whitespace/parens whitespace/tab whitespace/todo """.split() class SourceFileProcessor(object): """ Utility class that can run through a directory structure, find all relevant files and invoke a custom check on the files. """ def Run(self, path): all_files = [] for file in self.GetPathsToSearch(): all_files += self.FindFilesIn(join(path, file)) if not self.ProcessFiles(all_files): return False return True def IgnoreDir(self, name): return name.startswith('.') def IgnoreFile(self, name): return name.startswith('.') def FindFilesIn(self, path): result = [] for (root, dirs, files) in os.walk(path): for ignored in [x for x in dirs if self.IgnoreDir(x)]: dirs.remove(ignored) for file in files: if not self.IgnoreFile(file) and self.IsRelevant(file): result.append(join(root, file)) return result class CppLintProcessor(SourceFileProcessor): """ Lint files to check that they follow the google code style. """ def IsRelevant(self, name): return name.endswith('.cc') or name.endswith('.h') def IgnoreDir(self, name): return (super(CppLintProcessor, self).IgnoreDir(name) or (name == 'third_party')) def GetPathsToSearch(self): return ['src', 'public', 'samples', join('test', 'cctest')] def ProcessFiles(self, files): filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES]) command = ['cpplint.py', '--filter', filt] + join(files) process = subprocess.Popen(command) return process.wait() == 0 class CopyrightProcessor(SourceFileProcessor): """ Check that all files include a copyright notice. """ RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript', 'SConstruct', '.status'] FILES_TO_IGNORE = ['pcre_chartables.c', 'config.h', 'earley-boyer.js', 'raytrace.js'] def IsRelevant(self, name): if name in CopyrightProcessor.FILES_TO_IGNORE: return False for ext in CopyrightProcessor.RELEVANT_EXTENSIONS: if name.endswith(ext): return True return False def GetPathsToSearch(self): return ['.'] def ProcessContents(self, name, contents): if not 'Copyright' in contents: print "No copyright in %s." % name return False return True def ProcessFiles(self, files): success = True for file in files: try: handle = open(file) contents = handle.read() success = self.ProcessContents(file, contents) and success finally: handle.close() return success def GetOptions(): result = optparse.OptionParser() result.add_option('--no-lint', help="Do not run cpplint", default=False, action="store_true") return result def Main(): workspace = abspath(join(dirname(sys.argv[0]), '..')) parser = GetOptions() (options, args) = parser.parse_args() success = True if not options.no_lint: success = CppLintProcessor().Run(workspace) and success success = CopyrightProcessor().Run(workspace) and success if success: return 0 else: return 1 if __name__ == '__main__': sys.exit(Main())
# Copyright (c) 2014 OpenStack Foundation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils import six from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_const from neutron.common import exceptions as n_exc from neutron.common import utils as n_utils from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db from neutron.db import models_v2 from neutron.extensions import l3 from neutron.extensions import portbindings from neutron.i18n import _LI from neutron import manager from neutron.plugins.common import constants LOG = logging.getLogger(__name__) DEVICE_OWNER_DVR_INTERFACE = l3_const.DEVICE_OWNER_DVR_INTERFACE DEVICE_OWNER_DVR_SNAT = l3_const.DEVICE_OWNER_ROUTER_SNAT FLOATINGIP_AGENT_INTF_KEY = l3_const.FLOATINGIP_AGENT_INTF_KEY DEVICE_OWNER_AGENT_GW = l3_const.DEVICE_OWNER_AGENT_GW SNAT_ROUTER_INTF_KEY = l3_const.SNAT_ROUTER_INTF_KEY router_distributed_opts = [ cfg.BoolOpt('router_distributed', default=False, help=_("System-wide flag to determine the type of router " "that tenants can create. Only admin can override.")), ] cfg.CONF.register_opts(router_distributed_opts) class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, l3_attrs_db.ExtraAttributesMixin): """Mixin class to enable DVR support.""" router_device_owners = ( l3_db.L3_NAT_db_mixin.router_device_owners + (DEVICE_OWNER_DVR_INTERFACE, DEVICE_OWNER_DVR_SNAT, DEVICE_OWNER_AGENT_GW)) extra_attributes = ( l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{ 'name': "distributed", 'default': cfg.CONF.router_distributed }]) def _create_router_db(self, context, router, tenant_id): """Create a router db object with dvr additions.""" router['distributed'] = is_distributed_router(router) with context.session.begin(subtransactions=True): router_db = super( L3_NAT_with_dvr_db_mixin, self)._create_router_db( context, router, tenant_id) self._process_extra_attr_router_create(context, router_db, router) return router_db def _validate_router_migration(self, context, router_db, router_res): """Allow centralized -> distributed state transition only.""" if (router_db.extra_attributes.distributed and router_res.get('distributed') is False): LOG.info(_LI("Centralizing distributed router %s " "is not supported"), router_db['id']) raise n_exc.NotSupported(msg=_("Migration from distributed router " "to centralized")) elif (not router_db.extra_attributes.distributed and router_res.get('distributed')): # Notify advanced services of the imminent state transition # for the router. try: kwargs = {'context': context, 'router': router_db} registry.notify( resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs) except exceptions.CallbackFailure as e: with excutils.save_and_reraise_exception(): # NOTE(armax): preserve old check's behavior if len(e.errors) == 1: raise e.errors[0].error raise l3.RouterInUse(router_id=router_db['id'], reason=e) def _update_distributed_attr( self, context, router_id, router_db, data, gw_info): """Update the model to support the dvr case of a router.""" if data.get('distributed'): old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF new_owner = DEVICE_OWNER_DVR_INTERFACE for rp in router_db.attached_ports.filter_by(port_type=old_owner): rp.port_type = new_owner rp.port.device_owner = new_owner def _update_router_db(self, context, router_id, data, gw_info): with context.session.begin(subtransactions=True): router_db = super( L3_NAT_with_dvr_db_mixin, self)._update_router_db( context, router_id, data, gw_info) migrating_to_distributed = ( not router_db.extra_attributes.distributed and data.get('distributed') is True) self._validate_router_migration(context, router_db, data) router_db.extra_attributes.update(data) self._update_distributed_attr( context, router_id, router_db, data, gw_info) if migrating_to_distributed: if router_db['gw_port_id']: # If the Legacy router is getting migrated to a DVR # router, make sure to create corresponding # snat interface ports that are to be consumed by # the Service Node. if not self._create_snat_intf_ports_if_not_exists( context.elevated(), router_db): LOG.debug("SNAT interface ports not created: %s", router_db['id']) cur_agents = self.list_l3_agents_hosting_router( context, router_db['id'])['agents'] for agent in cur_agents: self._unbind_router(context, router_db['id'], agent['id']) return router_db def _delete_current_gw_port(self, context, router_id, router, new_network): super(L3_NAT_with_dvr_db_mixin, self)._delete_current_gw_port(context, router_id, router, new_network) if router.extra_attributes.distributed: self.delete_csnat_router_interface_ports( context.elevated(), router) def _create_gw_port(self, context, router_id, router, new_network, ext_ips): super(L3_NAT_with_dvr_db_mixin, self)._create_gw_port(context, router_id, router, new_network, ext_ips) # Make sure that the gateway port exists before creating the # snat interface ports for distributed router. if router.extra_attributes.distributed and router.gw_port: snat_p_list = self._create_snat_intf_ports_if_not_exists( context.elevated(), router) if not snat_p_list: LOG.debug("SNAT interface ports not created: %s", snat_p_list) def _get_device_owner(self, context, router=None): """Get device_owner for the specified router.""" router_is_uuid = isinstance(router, six.string_types) if router_is_uuid: router = self._get_router(context, router) if is_distributed_router(router): return DEVICE_OWNER_DVR_INTERFACE return super(L3_NAT_with_dvr_db_mixin, self)._get_device_owner(context, router) def _get_interface_ports_for_network(self, context, network_id): router_intf_qry = context.session.query(l3_db.RouterPort) router_intf_qry = router_intf_qry.join(models_v2.Port) return router_intf_qry.filter( models_v2.Port.network_id == network_id, l3_db.RouterPort.port_type.in_(l3_const.ROUTER_INTERFACE_OWNERS) ) def _update_fip_assoc(self, context, fip, floatingip_db, external_port): """Override to create and delete floating agent gw port for DVR. Floating IP Agent gateway port will be created when a floatingIP association happens. Floating IP Agent gateway port will be deleted when a floatingIP disassociation happens. """ fip_port = fip.get('port_id') unused_fip_agent_gw_port = ( fip_port is None and floatingip_db['fixed_port_id']) if unused_fip_agent_gw_port and floatingip_db.get('router_id'): admin_ctx = context.elevated() router_dict = self.get_router( admin_ctx, floatingip_db['router_id']) # Check if distributed router and then delete the # FloatingIP agent gateway port if router_dict.get('distributed'): self._clear_unused_fip_agent_gw_port( admin_ctx, floatingip_db) super(L3_NAT_with_dvr_db_mixin, self)._update_fip_assoc( context, fip, floatingip_db, external_port) associate_fip = fip_port and floatingip_db['id'] if associate_fip and floatingip_db.get('router_id'): admin_ctx = context.elevated() router_dict = self.get_router( admin_ctx, floatingip_db['router_id']) # Check if distributed router and then create the # FloatingIP agent gateway port if router_dict.get('distributed'): vm_hostid = self._get_vm_port_hostid( context, fip_port) if vm_hostid: # FIXME (Swami): This FIP Agent Gateway port should be # created only once and there should not be a duplicate # for the same host. Until we find a good solution for # augmenting multiple server requests we should use the # existing flow. fip_agent_port = ( self.create_fip_agent_gw_port_if_not_exists( admin_ctx, external_port['network_id'], vm_hostid)) LOG.debug("FIP Agent gateway port: %s", fip_agent_port) def _clear_unused_fip_agent_gw_port( self, context, floatingip_db): """Helper function to check for fip agent gw port and delete. This function checks on compute nodes to make sure if there are any VMs using the FIP agent gateway port. If no VMs are using the FIP agent gateway port, it will go ahead and delete the FIP agent gateway port. If even a single VM is using the port it will not delete. """ fip_hostid = self._get_vm_port_hostid( context, floatingip_db['fixed_port_id']) if fip_hostid and self._check_fips_availability_on_host_ext_net( context, fip_hostid, floatingip_db['floating_network_id']): LOG.debug('Deleting the Agent GW Port for ext-net: ' '%s', floatingip_db['floating_network_id']) self._delete_floatingip_agent_gateway_port( context, fip_hostid, floatingip_db['floating_network_id']) def delete_floatingip(self, context, id): floatingip = self._get_floatingip(context, id) if floatingip['fixed_port_id']: admin_ctx = context.elevated() self._clear_unused_fip_agent_gw_port( admin_ctx, floatingip) super(L3_NAT_with_dvr_db_mixin, self).delete_floatingip(context, id) def _get_floatingip_on_port(self, context, port_id=None): """Helper function to retrieve the fip associated with port.""" fip_qry = context.session.query(l3_db.FloatingIP) floating_ip = fip_qry.filter_by(fixed_port_id=port_id) return floating_ip.first() def disassociate_floatingips(self, context, port_id, do_notify=True): """Override disassociate floatingips to delete fip agent gw port.""" with context.session.begin(subtransactions=True): fip = self._get_floatingip_on_port( context, port_id=port_id) if fip: admin_ctx = context.elevated() self._clear_unused_fip_agent_gw_port( admin_ctx, fip) return super(L3_NAT_with_dvr_db_mixin, self).disassociate_floatingips(context, port_id, do_notify=do_notify) def add_router_interface(self, context, router_id, interface_info): add_by_port, add_by_sub = self._validate_interface_info(interface_info) router = self._get_router(context, router_id) device_owner = self._get_device_owner(context, router) # This should be True unless adding an IPv6 prefix to an existing port new_port = True if add_by_port: port, subnets = self._add_interface_by_port( context, router, interface_info['port_id'], device_owner) elif add_by_sub: port, subnets, new_port = self._add_interface_by_subnet( context, router, interface_info['subnet_id'], device_owner) if new_port: with context.session.begin(subtransactions=True): router_port = l3_db.RouterPort( port_id=port['id'], router_id=router.id, port_type=device_owner ) context.session.add(router_port) if router.extra_attributes.distributed and router.gw_port: self._add_csnat_router_interface_port( context.elevated(), router, port['network_id'], port['fixed_ips'][-1]['subnet_id']) router_interface_info = self._make_router_interface_info( router_id, port['tenant_id'], port['id'], subnets[-1]['id'], [subnet['id'] for subnet in subnets]) self.notify_router_interface_action( context, router_interface_info, 'add') return router_interface_info def _port_has_ipv6_address(self, port): """Overridden to return False if DVR SNAT port.""" if port['device_owner'] == DEVICE_OWNER_DVR_SNAT: return False return super(L3_NAT_with_dvr_db_mixin, self)._port_has_ipv6_address(port) def remove_router_interface(self, context, router_id, interface_info): remove_by_port, remove_by_subnet = ( self._validate_interface_info(interface_info, for_removal=True) ) port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') router = self._get_router(context, router_id) device_owner = self._get_device_owner(context, router) if remove_by_port: port, subnets = self._remove_interface_by_port( context, router_id, port_id, subnet_id, device_owner) # remove_by_subnet is not used here, because the validation logic of # _validate_interface_info ensures that at least one of remote_by_* # is True. else: port, subnets = self._remove_interface_by_subnet( context, router_id, subnet_id, device_owner) if router.extra_attributes.distributed: if router.gw_port: self.delete_csnat_router_interface_ports( context.elevated(), router, subnet_id=subnet_id) plugin = manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT) l3_agents = plugin.get_l3_agents_hosting_routers(context, [router_id]) for l3_agent in l3_agents: if not plugin.check_ports_exist_on_l3agent(context, l3_agent, router_id): plugin.remove_router_from_l3_agent( context, l3_agent['id'], router_id) router_interface_info = self._make_router_interface_info( router_id, port['tenant_id'], port['id'], subnets[0]['id'], [subnet['id'] for subnet in subnets]) self.notify_router_interface_action( context, router_interface_info, 'remove') return router_interface_info def _get_snat_sync_interfaces(self, context, router_ids): """Query router interfaces that relate to list of router_ids.""" if not router_ids: return [] qry = context.session.query(l3_db.RouterPort) qry = qry.filter( l3_db.RouterPort.router_id.in_(router_ids), l3_db.RouterPort.port_type == DEVICE_OWNER_DVR_SNAT ) interfaces = collections.defaultdict(list) for rp in qry: interfaces[rp.router_id].append( self._core_plugin._make_port_dict(rp.port, None)) LOG.debug("Return the SNAT ports: %s", interfaces) return interfaces def _build_routers_list(self, context, routers, gw_ports): # Perform a single query up front for all routers if not routers: return [] router_ids = [r['id'] for r in routers] snat_binding = l3_dvrsched_db.CentralizedSnatL3AgentBinding query = (context.session.query(snat_binding). filter(snat_binding.router_id.in_(router_ids))).all() bindings = dict((b.router_id, b) for b in query) for rtr in routers: gw_port_id = rtr['gw_port_id'] # Collect gw ports only if available if gw_port_id and gw_ports.get(gw_port_id): rtr['gw_port'] = gw_ports[gw_port_id] if 'enable_snat' in rtr[l3.EXTERNAL_GW_INFO]: rtr['enable_snat'] = ( rtr[l3.EXTERNAL_GW_INFO]['enable_snat']) binding = bindings.get(rtr['id']) if not binding: rtr['gw_port_host'] = None LOG.debug('No snat is bound to router %s', rtr['id']) continue rtr['gw_port_host'] = binding.l3_agent.host return routers def _process_routers(self, context, routers): routers_dict = {} snat_intfs_by_router_id = self._get_snat_sync_interfaces( context, [r['id'] for r in routers]) for router in routers: routers_dict[router['id']] = router if router['gw_port_id']: snat_router_intfs = snat_intfs_by_router_id[router['id']] LOG.debug("SNAT ports returned: %s ", snat_router_intfs) router[SNAT_ROUTER_INTF_KEY] = snat_router_intfs return routers_dict def _process_floating_ips_dvr(self, context, routers_dict, floating_ips, host, agent): fip_sync_interfaces = None LOG.debug("FIP Agent : %s ", agent.id) for floating_ip in floating_ips: router = routers_dict.get(floating_ip['router_id']) if router: router_floatingips = router.get(l3_const.FLOATINGIP_KEY, []) if router['distributed']: if floating_ip.get('host', None) != host: continue LOG.debug("Floating IP host: %s", floating_ip['host']) router_floatingips.append(floating_ip) router[l3_const.FLOATINGIP_KEY] = router_floatingips if not fip_sync_interfaces: fip_sync_interfaces = self._get_fip_sync_interfaces( context, agent.id) LOG.debug("FIP Agent ports: %s", fip_sync_interfaces) router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = ( fip_sync_interfaces) def _get_fip_sync_interfaces(self, context, fip_agent_id): """Query router interfaces that relate to list of router_ids.""" if not fip_agent_id: return [] filters = {'device_id': [fip_agent_id], 'device_owner': [DEVICE_OWNER_AGENT_GW]} interfaces = self._core_plugin.get_ports(context.elevated(), filters) LOG.debug("Return the FIP ports: %s ", interfaces) return interfaces def _get_dvr_sync_data(self, context, host, agent, router_ids=None, active=None): routers, interfaces, floating_ips = self._get_router_info_list( context, router_ids=router_ids, active=active, device_owners=l3_const.ROUTER_INTERFACE_OWNERS) port_filter = {portbindings.HOST_ID: [host]} ports = self._core_plugin.get_ports(context, port_filter) port_dict = dict((port['id'], port) for port in ports) # Add the port binding host to the floatingip dictionary for fip in floating_ips: vm_port = port_dict.get(fip['port_id'], None) if vm_port: fip['host'] = self._get_vm_port_hostid(context, fip['port_id'], port=vm_port) routers_dict = self._process_routers(context, routers) self._process_floating_ips_dvr(context, routers_dict, floating_ips, host, agent) ports_to_populate = [] for router in routers_dict.values(): if router.get('gw_port'): ports_to_populate.append(router['gw_port']) if router.get(l3_const.FLOATINGIP_AGENT_INTF_KEY): ports_to_populate += router[l3_const.FLOATINGIP_AGENT_INTF_KEY] if router.get(SNAT_ROUTER_INTF_KEY): ports_to_populate += router[SNAT_ROUTER_INTF_KEY] ports_to_populate += interfaces self._populate_subnets_for_ports(context, ports_to_populate) self._process_interfaces(routers_dict, interfaces) return routers_dict.values() def _get_vm_port_hostid(self, context, port_id, port=None): """Return the portbinding host_id.""" vm_port_db = port or self._core_plugin.get_port(context, port_id) device_owner = vm_port_db['device_owner'] if vm_port_db else "" if (n_utils.is_dvr_serviced(device_owner) or device_owner == DEVICE_OWNER_AGENT_GW): return vm_port_db[portbindings.HOST_ID] def _get_agent_gw_ports_exist_for_network( self, context, network_id, host, agent_id): """Return agent gw port if exist, or None otherwise.""" if not network_id: LOG.debug("Network not specified") return filters = { 'network_id': [network_id], 'device_id': [agent_id], 'device_owner': [DEVICE_OWNER_AGENT_GW] } ports = self._core_plugin.get_ports(context, filters) if ports: return ports[0] def _get_router_ids(self, context): """Function to retrieve router IDs for a context without joins""" query = self._model_query(context, l3_db.Router.id) return [row[0] for row in query] def _check_fips_availability_on_host_ext_net( self, context, host_id, fip_ext_net_id): """Query all floating_ips and filter on host and external net.""" fip_count_on_host = 0 with context.session.begin(subtransactions=True): router_ids = self._get_router_ids(context) floating_ips = self._get_sync_floating_ips(context, router_ids) # Check for the active floatingip in the host for fip in floating_ips: f_host = self._get_vm_port_hostid(context, fip['port_id']) if (f_host == host_id and (fip['floating_network_id'] == fip_ext_net_id)): fip_count_on_host += 1 # If fip_count greater than 1 or equal to zero no action taken # if the fip_count is equal to 1, then this would be last active # fip in the host, so the agent gateway port can be deleted. if fip_count_on_host == 1: return True return False def _delete_floatingip_agent_gateway_port( self, context, host_id, ext_net_id): """Function to delete FIP gateway port with given ext_net_id.""" # delete any fip agent gw port device_filter = {'device_owner': [DEVICE_OWNER_AGENT_GW], 'network_id': [ext_net_id]} ports = self._core_plugin.get_ports(context, filters=device_filter) for p in ports: if self._get_vm_port_hostid(context, p['id'], p) == host_id: self._core_plugin.ipam.delete_port(context, p['id']) return def create_fip_agent_gw_port_if_not_exists( self, context, network_id, host): """Function to return the FIP Agent GW port. This function will create a FIP Agent GW port if required. If the port already exists, it will return the existing port and will not create a new one. """ l3_agent_db = self._get_agent_by_type_and_host( context, l3_const.AGENT_TYPE_L3, host) if l3_agent_db: LOG.debug("Agent ID exists: %s", l3_agent_db['id']) f_port = self._get_agent_gw_ports_exist_for_network( context, network_id, host, l3_agent_db['id']) if not f_port: LOG.info(_LI('Agent Gateway port does not exist,' ' so create one: %s'), f_port) agent_port = self._core_plugin.create_port( context, {'port': {'tenant_id': '', 'network_id': network_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': attributes.ATTR_NOT_SPECIFIED, 'device_id': l3_agent_db['id'], 'device_owner': DEVICE_OWNER_AGENT_GW, 'binding:host_id': host, 'admin_state_up': True, 'name': ''}}) if agent_port: self._populate_subnets_for_ports(context, [agent_port]) return agent_port msg = _("Unable to create the Agent Gateway Port") raise n_exc.BadRequest(resource='router', msg=msg) else: self._populate_subnets_for_ports(context, [f_port]) return f_port def _get_snat_interface_ports_for_router(self, context, router_id): """Return all existing snat_router_interface ports.""" qry = context.session.query(l3_db.RouterPort) qry = qry.filter_by( router_id=router_id, port_type=DEVICE_OWNER_DVR_SNAT ) ports = [self._core_plugin._make_port_dict(rp.port, None) for rp in qry] return ports def _add_csnat_router_interface_port( self, context, router, network_id, subnet_id, do_pop=True): """Add SNAT interface to the specified router and subnet.""" snat_port = self._core_plugin.create_port( context, {'port': {'tenant_id': '', 'network_id': network_id, 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'fixed_ips': [{'subnet_id': subnet_id}], 'device_id': router.id, 'device_owner': DEVICE_OWNER_DVR_SNAT, 'admin_state_up': True, 'name': ''}}) if not snat_port: msg = _("Unable to create the SNAT Interface Port") raise n_exc.BadRequest(resource='router', msg=msg) with context.session.begin(subtransactions=True): router_port = l3_db.RouterPort( port_id=snat_port['id'], router_id=router.id, port_type=DEVICE_OWNER_DVR_SNAT ) context.session.add(router_port) if do_pop: return self._populate_subnets_for_ports(context, [snat_port]) return snat_port def _create_snat_intf_ports_if_not_exists(self, context, router): """Function to return the snat interface port list. This function will return the snat interface port list if it exists. If the port does not exist it will create new ports and then return the list. """ port_list = self._get_snat_interface_ports_for_router( context, router.id) if port_list: self._populate_subnets_for_ports(context, port_list) return port_list port_list = [] int_ports = ( rp.port for rp in router.attached_ports.filter_by( port_type=DEVICE_OWNER_DVR_INTERFACE ) ) LOG.info(_LI('SNAT interface port list does not exist,' ' so create one: %s'), port_list) for intf in int_ports: if intf.fixed_ips: # Passing the subnet for the port to make sure the IP's # are assigned on the right subnet if multiple subnet # exists snat_port = self._add_csnat_router_interface_port( context, router, intf['network_id'], intf['fixed_ips'][0]['subnet_id'], do_pop=False) port_list.append(snat_port) if port_list: self._populate_subnets_for_ports(context, port_list) return port_list def dvr_vmarp_table_update(self, context, port_dict, action): """Notify L3 agents of VM ARP table changes. When a VM goes up or down, look for one DVR router on the port's subnet, and send the VM's ARP details to all L3 agents hosting the router. """ # Check this is a valid VM port if ("compute:" not in port_dict['device_owner'] or not port_dict['fixed_ips']): return ip_address = port_dict['fixed_ips'][0]['ip_address'] subnet = port_dict['fixed_ips'][0]['subnet_id'] filters = {'fixed_ips': {'subnet_id': [subnet]}} ports = self._core_plugin.get_ports(context, filters=filters) for port in ports: if port['device_owner'] == DEVICE_OWNER_DVR_INTERFACE: router_id = port['device_id'] router_dict = self._get_router(context, router_id) if router_dict.extra_attributes.distributed: arp_table = {'ip_address': ip_address, 'mac_address': port_dict['mac_address'], 'subnet_id': subnet} if action == "add": notify_action = self.l3_rpc_notifier.add_arp_entry elif action == "del": notify_action = self.l3_rpc_notifier.del_arp_entry notify_action(context, router_id, arp_table) return def delete_csnat_router_interface_ports(self, context, router, subnet_id=None): # Each csnat router interface port is associated # with a subnet, so we need to pass the subnet id to # delete the right ports. # TODO(markmcclain): This is suboptimal but was left to reduce # changeset size since it is late in cycle ports = ( rp.port.id for rp in router.attached_ports.filter_by(port_type=DEVICE_OWNER_DVR_SNAT) if rp.port ) c_snat_ports = self._core_plugin.get_ports( context, filters={'id': ports} ) for p in c_snat_ports: if subnet_id is None: self._core_plugin.delete_port(context, p['id'], l3_port_check=False) else: if p['fixed_ips'][0]['subnet_id'] == subnet_id: LOG.debug("Subnet matches: %s", subnet_id) self._core_plugin.delete_port(context, p['id'], l3_port_check=False) def is_distributed_router(router): """Return True if router to be handled is distributed.""" try: # See if router is a DB object first requested_router_type = router.extra_attributes.distributed except AttributeError: # if not, try to see if it is a request body requested_router_type = router.get('distributed') if attributes.is_attr_set(requested_router_type): return requested_router_type return cfg.CONF.router_distributed
import functools import logging from collections import defaultdict from inspect import signature from warnings import warn from twisted.internet.defer import Deferred, DeferredList from twisted.python.failure import Failure from scrapy.settings import Settings from scrapy.utils.datatypes import SequenceExclude from scrapy.utils.defer import mustbe_deferred, defer_result from scrapy.utils.deprecate import ScrapyDeprecationWarning from scrapy.utils.request import request_fingerprint from scrapy.utils.misc import arg_to_iter from scrapy.utils.log import failure_to_exc_info logger = logging.getLogger(__name__) class MediaPipeline: LOG_FAILED_RESULTS = True class SpiderInfo: def __init__(self, spider): self.spider = spider self.downloading = set() self.downloaded = {} self.waiting = defaultdict(list) def __init__(self, download_func=None, settings=None): self.download_func = download_func self._expects_item = {} if isinstance(settings, dict) or settings is None: settings = Settings(settings) resolve = functools.partial(self._key_for_pipe, base_class_name="MediaPipeline", settings=settings) self.allow_redirects = settings.getbool( resolve('MEDIA_ALLOW_REDIRECTS'), False ) self._handle_statuses(self.allow_redirects) # Check if deprecated methods are being used and make them compatible self._make_compatible() def _handle_statuses(self, allow_redirects): self.handle_httpstatus_list = None if allow_redirects: self.handle_httpstatus_list = SequenceExclude(range(300, 400)) def _key_for_pipe(self, key, base_class_name=None, settings=None): """ >>> MediaPipeline()._key_for_pipe("IMAGES") 'IMAGES' >>> class MyPipe(MediaPipeline): ... pass >>> MyPipe()._key_for_pipe("IMAGES", base_class_name="MediaPipeline") 'MYPIPE_IMAGES' """ class_name = self.__class__.__name__ formatted_key = f"{class_name.upper()}_{key}" if ( not base_class_name or class_name == base_class_name or settings and not settings.get(formatted_key) ): return key return formatted_key @classmethod def from_crawler(cls, crawler): try: pipe = cls.from_settings(crawler.settings) except AttributeError: pipe = cls() pipe.crawler = crawler return pipe def open_spider(self, spider): self.spiderinfo = self.SpiderInfo(spider) def process_item(self, item, spider): info = self.spiderinfo requests = arg_to_iter(self.get_media_requests(item, info)) dlist = [self._process_request(r, info, item) for r in requests] dfd = DeferredList(dlist, consumeErrors=1) return dfd.addCallback(self.item_completed, item, info) def _process_request(self, request, info, item): fp = request_fingerprint(request) cb = request.callback or (lambda _: _) eb = request.errback request.callback = None request.errback = None # Return cached result if request was already seen if fp in info.downloaded: return defer_result(info.downloaded[fp]).addCallbacks(cb, eb) # Otherwise, wait for result wad = Deferred().addCallbacks(cb, eb) info.waiting[fp].append(wad) # Check if request is downloading right now to avoid doing it twice if fp in info.downloading: return wad # Download request checking media_to_download hook output first info.downloading.add(fp) dfd = mustbe_deferred(self.media_to_download, request, info, item=item) dfd.addCallback(self._check_media_to_download, request, info, item=item) dfd.addBoth(self._cache_result_and_execute_waiters, fp, info) dfd.addErrback(lambda f: logger.error( f.value, exc_info=failure_to_exc_info(f), extra={'spider': info.spider}) ) return dfd.addBoth(lambda _: wad) # it must return wad at last def _make_compatible(self): """Make overridable methods of MediaPipeline and subclasses backwards compatible""" methods = [ "file_path", "media_to_download", "media_downloaded", "file_downloaded", "image_downloaded", "get_images" ] for method_name in methods: method = getattr(self, method_name, None) if callable(method): setattr(self, method_name, self._compatible(method)) def _compatible(self, func): """Wrapper for overridable methods to allow backwards compatibility""" self._check_signature(func) @functools.wraps(func) def wrapper(*args, **kwargs): if self._expects_item[func.__name__]: return func(*args, **kwargs) kwargs.pop('item', None) return func(*args, **kwargs) return wrapper def _check_signature(self, func): sig = signature(func) self._expects_item[func.__name__] = True if 'item' not in sig.parameters: old_params = str(sig)[1:-1] new_params = old_params + ", *, item=None" warn(f'{func.__name__}(self, {old_params}) is deprecated, ' f'please use {func.__name__}(self, {new_params})', ScrapyDeprecationWarning, stacklevel=2) self._expects_item[func.__name__] = False def _modify_media_request(self, request): if self.handle_httpstatus_list: request.meta['handle_httpstatus_list'] = self.handle_httpstatus_list else: request.meta['handle_httpstatus_all'] = True def _check_media_to_download(self, result, request, info, item): if result is not None: return result if self.download_func: # this ugly code was left only to support tests. TODO: remove dfd = mustbe_deferred(self.download_func, request, info.spider) dfd.addCallbacks( callback=self.media_downloaded, callbackArgs=(request, info), callbackKeywords={'item': item}, errback=self.media_failed, errbackArgs=(request, info)) else: self._modify_media_request(request) dfd = self.crawler.engine.download(request, info.spider) dfd.addCallbacks( callback=self.media_downloaded, callbackArgs=(request, info), callbackKeywords={'item': item}, errback=self.media_failed, errbackArgs=(request, info)) return dfd def _cache_result_and_execute_waiters(self, result, fp, info): if isinstance(result, Failure): # minimize cached information for failure result.cleanFailure() result.frames = [] result.stack = None # This code fixes a memory leak by avoiding to keep references to # the Request and Response objects on the Media Pipeline cache. # # What happens when the media_downloaded callback raises an # exception, for example a FileException('download-error') when # the Response status code is not 200 OK, is that the original # StopIteration exception (which in turn contains the failed # Response and by extension, the original Request) gets encapsulated # within the FileException context. # # Originally, Scrapy was using twisted.internet.defer.returnValue # inside functions decorated with twisted.internet.defer.inlineCallbacks, # encapsulating the returned Response in a _DefGen_Return exception # instead of a StopIteration. # # To avoid keeping references to the Response and therefore Request # objects on the Media Pipeline cache, we should wipe the context of # the encapsulated exception when it is a StopIteration instance # # This problem does not occur in Python 2.7 since we don't have # Exception Chaining (https://www.python.org/dev/peps/pep-3134/). context = getattr(result.value, '__context__', None) if isinstance(context, StopIteration): setattr(result.value, '__context__', None) info.downloading.remove(fp) info.downloaded[fp] = result # cache result for wad in info.waiting.pop(fp): defer_result(result).chainDeferred(wad) # Overridable Interface def media_to_download(self, request, info, *, item=None): """Check request before starting download""" pass def get_media_requests(self, item, info): """Returns the media requests to download""" pass def media_downloaded(self, response, request, info, *, item=None): """Handler for success downloads""" return response def media_failed(self, failure, request, info): """Handler for failed downloads""" return failure def item_completed(self, results, item, info): """Called per item when all media requests has been processed""" if self.LOG_FAILED_RESULTS: for ok, value in results: if not ok: logger.error( '%(class)s found errors processing %(item)s', {'class': self.__class__.__name__, 'item': item}, exc_info=failure_to_exc_info(value), extra={'spider': info.spider} ) return item def file_path(self, request, response=None, info=None, *, item=None): """Returns the path where downloaded media should be stored""" pass
from __future__ import print_function from __future__ import unicode_literals from inspect import getdoc from operator import attrgetter import logging import re import signal import sys from docker.errors import APIError import dockerpty from .. import __version__, legacy from ..project import NoSuchService, ConfigurationError from ..service import BuildError, CannotBeScaledError, NeedsBuildError from ..config import parse_environment from .command import Command from .docopt_command import NoSuchCommand from .errors import UserError from .formatter import Formatter from .log_printer import LogPrinter from .utils import yesno, get_version_info log = logging.getLogger(__name__) def main(): setup_logging() try: command = TopLevelCommand() command.sys_dispatch() except KeyboardInterrupt: log.error("\nAborting.") sys.exit(1) except (UserError, NoSuchService, ConfigurationError, legacy.LegacyContainersError) as e: log.error(e.msg) sys.exit(1) except NoSuchCommand as e: log.error("No such command: %s", e.command) log.error("") log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))) sys.exit(1) except APIError as e: log.error(e.explanation) sys.exit(1) except BuildError as e: log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason)) sys.exit(1) except NeedsBuildError as e: log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name) sys.exit(1) def setup_logging(): console_handler = logging.StreamHandler(sys.stderr) console_handler.setFormatter(logging.Formatter()) console_handler.setLevel(logging.INFO) root_logger = logging.getLogger() root_logger.addHandler(console_handler) root_logger.setLevel(logging.DEBUG) # Disable requests logging logging.getLogger("requests").propagate = False # stolen from docopt master def parse_doc_section(name, source): pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)', re.IGNORECASE | re.MULTILINE) return [s.strip() for s in pattern.findall(source)] class TopLevelCommand(Command): """Define and run multi-container applications with Docker. Usage: docker-compose [options] [COMMAND] [ARGS...] docker-compose -h|--help Options: -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) -p, --project-name NAME Specify an alternate project name (default: directory name) --verbose Show more output -v, --version Print version and exit Commands: build Build or rebuild services help Get help on a command kill Kill containers logs View output from containers port Print the public port for a port binding ps List containers pull Pulls service images restart Restart services rm Remove stopped containers run Run a one-off command scale Set number of containers for a service start Start services stop Stop services up Create and start containers migrate-to-labels Recreate containers to add labels version Show the Docker-Compose version information """ def docopt_options(self): options = super(TopLevelCommand, self).docopt_options() options['version'] = get_version_info('compose') return options def build(self, project, options): """ Build or rebuild services. Services are built once and then tagged as `project_service`, e.g. `composetest_db`. If you change a service's `Dockerfile` or the contents of its build directory, you can run `docker-compose build` to rebuild it. Usage: build [options] [SERVICE...] Options: --no-cache Do not use cache when building the image. """ no_cache = bool(options.get('--no-cache', False)) project.build(service_names=options['SERVICE'], no_cache=no_cache) def help(self, project, options): """ Get help on a command. Usage: help COMMAND """ command = options['COMMAND'] if not hasattr(self, command): raise NoSuchCommand(command, self) raise SystemExit(getdoc(getattr(self, command))) def kill(self, project, options): """ Force stop service containers. Usage: kill [options] [SERVICE...] Options: -s SIGNAL SIGNAL to send to the container. Default signal is SIGKILL. """ signal = options.get('-s', 'SIGKILL') project.kill(service_names=options['SERVICE'], signal=signal) def logs(self, project, options): """ View output from containers. Usage: logs [options] [SERVICE...] Options: --no-color Produce monochrome output. """ containers = project.containers(service_names=options['SERVICE'], stopped=True) monochrome = options['--no-color'] print("Attaching to", list_containers(containers)) LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run() def port(self, project, options): """ Print the public port for a port binding. Usage: port [options] SERVICE PRIVATE_PORT Options: --protocol=proto tcp or udp (defaults to tcp) --index=index index of the container if there are multiple instances of a service (defaults to 1) """ service = project.get_service(options['SERVICE']) try: container = service.get_container(number=options.get('--index') or 1) except ValueError as e: raise UserError(str(e)) print(container.get_local_port( options['PRIVATE_PORT'], protocol=options.get('--protocol') or 'tcp') or '') def ps(self, project, options): """ List containers. Usage: ps [options] [SERVICE...] Options: -q Only display IDs """ containers = sorted( project.containers(service_names=options['SERVICE'], stopped=True) + project.containers(service_names=options['SERVICE'], one_off=True), key=attrgetter('name')) if options['-q']: for container in containers: print(container.id) else: headers = [ 'Name', 'Command', 'State', 'Ports', ] rows = [] for container in containers: command = container.human_readable_command if len(command) > 30: command = '%s ...' % command[:26] rows.append([ container.name, command, container.human_readable_state, container.human_readable_ports, ]) print(Formatter().table(headers, rows)) def pull(self, project, options): """ Pulls images for services. Usage: pull [options] [SERVICE...] Options: --allow-insecure-ssl Allow insecure connections to the docker registry """ insecure_registry = options['--allow-insecure-ssl'] project.pull( service_names=options['SERVICE'], insecure_registry=insecure_registry ) def rm(self, project, options): """ Remove stopped service containers. Usage: rm [options] [SERVICE...] Options: -f, --force Don't ask to confirm removal -v Remove volumes associated with containers """ all_containers = project.containers(service_names=options['SERVICE'], stopped=True) stopped_containers = [c for c in all_containers if not c.is_running] if len(stopped_containers) > 0: print("Going to remove", list_containers(stopped_containers)) if options.get('--force') \ or yesno("Are you sure? [yN] ", default=False): project.remove_stopped( service_names=options['SERVICE'], v=options.get('-v', False) ) else: print("No stopped containers") def run(self, project, options): """ Run a one-off command on a service. For example: $ docker-compose run web python manage.py shell By default, linked services will be started, unless they are already running. If you do not want to start linked services, use `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`. Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...] Options: --allow-insecure-ssl Allow insecure connections to the docker registry -d Detached mode: Run container in the background, print new container name. --entrypoint CMD Override the entrypoint of the image. -e KEY=VAL Set an environment variable (can be used multiple times) -u, --user="" Run as specified username or uid --no-deps Don't start linked services. --rm Remove container after run. Ignored in detached mode. --service-ports Run command with the service's ports enabled and mapped to the host. -T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY. """ service = project.get_service(options['SERVICE']) insecure_registry = options['--allow-insecure-ssl'] if not options['--no-deps']: deps = service.get_linked_names() if len(deps) > 0: project.up( service_names=deps, start_deps=True, allow_recreate=False, insecure_registry=insecure_registry, ) tty = True if options['-d'] or options['-T'] or not sys.stdin.isatty(): tty = False if options['COMMAND']: command = [options['COMMAND']] + options['ARGS'] else: command = service.options.get('command') container_options = { 'command': command, 'tty': tty, 'stdin_open': not options['-d'], 'detach': options['-d'], } if options['-e']: container_options['environment'] = parse_environment(options['-e']) if options['--entrypoint']: container_options['entrypoint'] = options.get('--entrypoint') if options['--rm']: container_options['restart'] = None if options['--user']: container_options['user'] = options.get('--user') if not options['--service-ports']: container_options['ports'] = [] container = service.create_container( one_off=True, insecure_registry=insecure_registry, **container_options ) if options['-d']: service.start_container(container) print(container.name) else: dockerpty.start(project.client, container.id, interactive=not options['-T']) exit_code = container.wait() if options['--rm']: log.info("Removing %s..." % container.name) project.client.remove_container(container.id) sys.exit(exit_code) def scale(self, project, options): """ Set number of containers to run for a service. Numbers are specified in the form `service=num` as arguments. For example: $ docker-compose scale web=2 worker=3 Usage: scale [SERVICE=NUM...] """ for s in options['SERVICE=NUM']: if '=' not in s: raise UserError('Arguments to scale should be in the form service=num') service_name, num = s.split('=', 1) try: num = int(num) except ValueError: raise UserError('Number of containers for service "%s" is not a ' 'number' % service_name) try: project.get_service(service_name).scale(num) except CannotBeScaledError: raise UserError( 'Service "%s" cannot be scaled because it specifies a port ' 'on the host. If multiple containers for this service were ' 'created, the port would clash.\n\nRemove the ":" from the ' 'port definition in docker-compose.yml so Docker can choose a random ' 'port for each container.' % service_name) def start(self, project, options): """ Start existing containers. Usage: start [SERVICE...] """ project.start(service_names=options['SERVICE']) def stop(self, project, options): """ Stop running containers without removing them. They can be started again with `docker-compose start`. Usage: stop [options] [SERVICE...] Options: -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10) """ timeout = options.get('--timeout') params = {} if timeout is None else {'timeout': int(timeout)} project.stop(service_names=options['SERVICE'], **params) def restart(self, project, options): """ Restart running containers. Usage: restart [options] [SERVICE...] Options: -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10) """ timeout = options.get('--timeout') params = {} if timeout is None else {'timeout': int(timeout)} project.restart(service_names=options['SERVICE'], **params) def up(self, project, options): """ Build, (re)create, start and attach to containers for a service. By default, `docker-compose up` will aggregate the output of each container, and when it exits, all containers will be stopped. If you run `docker-compose up -d`, it'll start the containers in the background and leave them running. If there are existing containers for a service, `docker-compose up` will stop and recreate them (preserving mounted volumes with volumes-from), so that changes in `docker-compose.yml` are picked up. If you do not want existing containers to be recreated, `docker-compose up --no-recreate` will re-use existing containers. Usage: up [options] [SERVICE...] Options: --allow-insecure-ssl Allow insecure connections to the docker registry -d Detached mode: Run containers in the background, print new container names. --no-color Produce monochrome output. --no-deps Don't start linked services. --x-smart-recreate Only recreate containers whose configuration or image needs to be updated. (EXPERIMENTAL) --no-recreate If containers already exist, don't recreate them. --no-build Don't build an image, even if it's missing -t, --timeout TIMEOUT When attached, use this timeout in seconds for the shutdown. (default: 10) """ insecure_registry = options['--allow-insecure-ssl'] detached = options['-d'] monochrome = options['--no-color'] start_deps = not options['--no-deps'] allow_recreate = not options['--no-recreate'] smart_recreate = options['--x-smart-recreate'] service_names = options['SERVICE'] project.up( service_names=service_names, start_deps=start_deps, allow_recreate=allow_recreate, smart_recreate=smart_recreate, insecure_registry=insecure_registry, do_build=not options['--no-build'], ) to_attach = [c for s in project.get_services(service_names) for c in s.containers()] if not detached: print("Attaching to", list_containers(to_attach)) log_printer = LogPrinter(to_attach, attach_params={"logs": True}, monochrome=monochrome) try: log_printer.run() finally: def handler(signal, frame): project.kill(service_names=service_names) sys.exit(0) signal.signal(signal.SIGINT, handler) print("Gracefully stopping... (press Ctrl+C again to force)") timeout = options.get('--timeout') params = {} if timeout is None else {'timeout': int(timeout)} project.stop(service_names=service_names, **params) def migrate_to_labels(self, project, _options): """ Recreate containers to add labels Usage: migrate-to-labels """ legacy.migrate_project_to_labels(project) def version(self, project, options): """ Show version informations Usage: version [--short] Options: --short Shows only Compose's version number. """ if options['--short']: print(__version__) else: print(get_version_info('full')) def list_containers(containers): return ", ".join(c.name for c in containers)
#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for the Less Frequently Used (LFU) Windows Registry plugin.""" import unittest from plaso.dfwinreg import definitions as dfwinreg_definitions from plaso.dfwinreg import fake as dfwinreg_fake from plaso.formatters import winreg as _ # pylint: disable=unused-import from plaso.lib import timelib from plaso.parsers.winreg_plugins import lfu from tests.parsers.winreg_plugins import test_lib class TestBootExecutePlugin(test_lib.RegistryPluginTestCase): """Tests for the LFU BootExecute Windows Registry plugin.""" def setUp(self): """Sets up the needed objects used throughout the test.""" self._plugin = lfu.BootExecutePlugin() def _CreateTestKey(self, key_path, time_string): """Creates Registry keys and values for testing. Args: key_path: the Windows Registry key path. time_string: string containing the key last written date and time. Returns: A Windows Registry key (instance of dfwinreg.WinRegistryKey). """ filetime = dfwinreg_fake.Filetime() filetime.CopyFromString(time_string) registry_key = dfwinreg_fake.FakeWinRegistryKey( u'Session Manager', key_path=key_path, last_written_time=filetime.timestamp, offset=153) value_data = u'autocheck autochk *\x00'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'BootExecute', data=value_data, data_type=dfwinreg_definitions.REG_MULTI_SZ, offset=123) registry_key.AddValue(registry_value) value_data = u'2592000'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'CriticalSectionTimeout', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=153) registry_key.AddValue(registry_value) value_data = u'\x00'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'ExcludeFromKnownDlls', data=value_data, data_type=dfwinreg_definitions.REG_MULTI_SZ, offset=163) registry_key.AddValue(registry_value) value_data = u'0'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'GlobalFlag', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=173) registry_key.AddValue(registry_value) value_data = u'0'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'HeapDeCommitFreeBlockThreshold', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=183) registry_key.AddValue(registry_value) value_data = u'0'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'HeapDeCommitTotalFreeThreshold', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=203) registry_key.AddValue(registry_value) value_data = u'0'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'HeapSegmentCommit', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=213) registry_key.AddValue(registry_value) value_data = u'0'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'HeapSegmentReserve', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=223) registry_key.AddValue(registry_value) value_data = u'2'.encode(u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'NumberOfInitialSessions', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=243) registry_key.AddValue(registry_value) return registry_key def testProcess(self): """Tests the Process function.""" key_path = u'\\ControlSet001\\Control\\Session Manager' time_string = u'2012-08-31 20:45:29' registry_key = self._CreateTestKey(key_path, time_string) knowledge_base_values = {u'current_control_set': u'ControlSet001'} event_queue_consumer = self._ParseKeyWithPlugin( self._plugin, registry_key, knowledge_base_values=knowledge_base_values) event_objects = self._GetEventObjectsFromQueue(event_queue_consumer) self.assertEqual(len(event_objects), 2) event_object = event_objects[0] # This should just be the plugin name, as we're invoking it directly, # and not through the parser. self.assertEqual(event_object.parser, self._plugin.plugin_name) expected_timestamp = timelib.Timestamp.CopyFromString(time_string) self.assertEqual(event_object.timestamp, expected_timestamp) expected_message = ( u'[{0:s}] BootExecute: autocheck autochk *').format(key_path) self._TestGetMessageStrings( event_object, expected_message, expected_message) event_object = event_objects[1] expected_message = ( u'[{0:s}] ' u'CriticalSectionTimeout: 2592000 ' u'ExcludeFromKnownDlls: [] ' u'GlobalFlag: 0 ' u'HeapDeCommitFreeBlockThreshold: 0 ' u'HeapDeCommitTotalFreeThreshold: 0 ' u'HeapSegmentCommit: 0 ' u'HeapSegmentReserve: 0 ' u'NumberOfInitialSessions: 2').format(key_path) expected_short_message = u'{0:s}...'.format(expected_message[0:77]) self._TestGetMessageStrings( event_object, expected_message, expected_short_message) class TestBootVerificationRegistry(test_lib.RegistryPluginTestCase): """Tests for the LFU BootVerification Windows Registry plugin.""" def setUp(self): """Sets up the needed objects used throughout the test.""" self._plugin = lfu.BootVerificationPlugin() def _CreateTestKey(self, key_path, time_string): """Creates Registry keys and values for testing. Args: key_path: the Windows Registry key path. time_string: string containing the key last written date and time. Returns: A Windows Registry key (instance of dfwinreg.WinRegistryKey). """ filetime = dfwinreg_fake.Filetime() filetime.CopyFromString(time_string) registry_key = dfwinreg_fake.FakeWinRegistryKey( u'BootVerificationProgram', key_path=key_path, last_written_time=filetime.timestamp, offset=153) value_data = u'C:\\WINDOWS\\system32\\googleupdater.exe'.encode( u'utf_16_le') registry_value = dfwinreg_fake.FakeWinRegistryValue( u'ImagePath', data=value_data, data_type=dfwinreg_definitions.REG_SZ, offset=123) registry_key.AddValue(registry_value) return registry_key def testProcess(self): """Tests the Process function.""" key_path = u'\\ControlSet001\\Control\\BootVerificationProgram' time_string = u'2012-08-31 20:45:29' registry_key = self._CreateTestKey(key_path, time_string) knowledge_base_values = {u'current_control_set': u'ControlSet001'} event_queue_consumer = self._ParseKeyWithPlugin( self._plugin, registry_key, knowledge_base_values=knowledge_base_values) event_objects = self._GetEventObjectsFromQueue(event_queue_consumer) self.assertEqual(len(event_objects), 1) event_object = event_objects[0] # This should just be the plugin name, as we're invoking it directly, # and not through the parser. self.assertEqual(event_object.parser, self._plugin.plugin_name) expected_timestamp = timelib.Timestamp.CopyFromString(time_string) self.assertEqual(event_object.timestamp, expected_timestamp) expected_message = ( u'[{0:s}] ' u'ImagePath: C:\\WINDOWS\\system32\\googleupdater.exe').format( key_path) expected_short_message = u'{0:s}...'.format(expected_message[0:77]) self._TestGetMessageStrings( event_object, expected_message, expected_short_message) if __name__ == '__main__': unittest.main()
# django-salesforce # # by Phil Christensen # (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org) # See LICENSE.md for details # """ Salesforce database backend for Django. """ import logging import requests import sys import threading from salesforce import DJANGO_16_PLUS, DJANGO_18_PLUS from django.core.exceptions import ImproperlyConfigured from django.conf import settings from django.db.backends.signals import connection_created if DJANGO_18_PLUS: from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.base.features import BaseDatabaseFeatures else: from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures from salesforce.auth import SalesforceAuth from salesforce.backend.client import DatabaseClient from salesforce.backend.creation import DatabaseCreation from salesforce.backend.introspection import DatabaseIntrospection from salesforce.backend.validation import DatabaseValidation from salesforce.backend.operations import DatabaseOperations from salesforce.backend.driver import IntegrityError, DatabaseError from salesforce.backend import driver as Database from salesforce.backend import MAX_RETRIES from salesforce.backend.adapter import SslHttpAdapter try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse log = logging.getLogger(__name__) connect_lock = threading.Lock() class SalesforceError(DatabaseError): """ DatabaseError that usually gets detailed error information from SF response in the second parameter, decoded from REST, that frequently need not to be displayed. """ def __init__(self, message='', data=None, response=None, verbose=False): DatabaseError.__init__(self, message) self.data = data self.response = response self.verbose = verbose if verbose: log.info("Error (debug details) %s\n%s", response.text, response.__dict__) class DatabaseFeatures(BaseDatabaseFeatures): """ Features this database provides. """ allows_group_by_pk = True supports_unspecified_pk = False can_return_id_from_insert = False # TODO If the following would be True, it requires a good relation name resolution supports_select_related = False # Though Salesforce doesn't support transactions, the setting # `supports_transactions` is used only for switching between rollback or # cleaning the database in testrunner after every test and loading fixtures # before it, however SF does not support any of these and all test data must # be loaded and cleaned by the testcase code. From the viewpoint of SF it is # irrelevant, but due to issue #28 it should be True. supports_transactions = True # Never use `interprets_empty_strings_as_nulls=True`. It is an opposite # setting for Oracle, while Salesforce saves nulls as empty strings not vice # versa. class DatabaseWrapper(BaseDatabaseWrapper): """ Core class that provides all DB support. """ vendor = 'salesforce' # Operators [contains, startswithm, endswith] are incorrectly # case insensitive like sqlite3. operators = { 'exact': '= %s', 'iexact': 'LIKE %s', 'contains': 'LIKE %s', 'icontains': 'LIKE %s', #'regex': 'REGEXP %s', # unsupported #'iregex': 'REGEXP %s', 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'LIKE %s', 'endswith': 'LIKE %s', 'istartswith': 'LIKE %s', 'iendswith': 'LIKE %s', } Database = Database def __init__(self, settings_dict, alias=None): if alias is None: alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce') super(DatabaseWrapper, self).__init__(settings_dict, alias) self.validate_settings(settings_dict) self.features = DatabaseFeatures(self) self.ops = DatabaseOperations(self) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = DatabaseValidation(self) self._sf_session = None self._is_sandbox = None # The SFDC database is connected as late as possible if only tests # are running. Some tests don't require a connection. if not getattr(settings, 'SF_LAZY_CONNECT', 'test' in sys.argv): self.make_session() def make_session(self): """Authenticate and get the name of assigned SFDC data server""" with connect_lock: if self._sf_session is None: sf_session = requests.Session() sf_session.auth = SalesforceAuth(db_alias=self.alias, settings_dict=self.settings_dict) if self.settings_dict['USER'] == 'dynamic auth': sf_instance_url = self._sf_session or self.settings_dict['HOST'] else: sf_instance_url = sf_session.auth.authenticate()['instance_url'] sf_requests_adapter = SslHttpAdapter(max_retries=MAX_RETRIES) sf_session.mount(sf_instance_url, sf_requests_adapter) # Additional header works, but the improvement unmeasurable for me. # (less than SF speed fluctuation) #sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} self._sf_session = sf_session @property def sf_session(self): if self._sf_session is None: self.make_session() return self._sf_session def get_connection_params(self): settings_dict = self.settings_dict params = settings_dict.copy() params.update(settings_dict['OPTIONS']) return params def get_new_connection(self, conn_params): # only simulated a connection interface without connecting really return Database.connect(**conn_params) def init_connection_state(self): pass # nothing to init def _set_autocommit(self, autocommit): # SF REST API uses autocommit, but until rollback it is not a # serious problem to ignore autocommit off pass def validate_settings(self, d): for k in ('ENGINE', 'CONSUMER_KEY', 'CONSUMER_SECRET', 'USER', 'PASSWORD', 'HOST'): if(k not in d): raise ImproperlyConfigured("Required '%s' key missing from '%s' database settings." % (k, self.alias)) elif not(d[k]): raise ImproperlyConfigured("'%s' key is the empty string in '%s' database settings." % (k, self.alias)) try: urlparse(d['HOST']) except Exception as e: raise ImproperlyConfigured("'HOST' key in '%s' database settings should be a valid URL: %s" % (self.alias, e)) def cursor(self, query=None): """ Return a fake cursor for accessing the Salesforce API with SOQL. """ from salesforce.backend.query import CursorWrapper cursor = CursorWrapper(self, query) # prior to 1.6 you were expected to send this signal # just after the cursor was constructed if not DJANGO_16_PLUS: connection_created.send(self.__class__, connection=self) return cursor def quote_name(self, name): """ Do not quote column and table names in the SOQL dialect. """ return name @property def is_sandbox(self): if self._is_sandbox is None: cur = self.cursor() cur.execute("SELECT IsSandbox FROM Organization") self._is_sandbox = cur.fetchone()['IsSandbox'] return self._is_sandbox
# from https://github.com/DJuttmann/SM3E/blob/master/SM3E/Tools/Compression.cs import utils.log class Compressor: def __init__(self, computeLimit=5): self.log = utils.log.get('Compressor') self.computeLimit = computeLimit def _concatBytes(self, b0, b1): return b0 + (b1 << 8) def _nextByte(self): return self.romFile.readByte() def decompress(self, romFile, address): self.romFile = romFile self.romFile.seek(address) startAddress = address curAddress = address output = [] while curAddress < startAddress + 0x8000: curByte = self._nextByte() curAddress += 1 # End of compressed data if curByte == 0xFF: return (curAddress - startAddress, output) command = curByte >> 5 length = (curByte & 0b11111) + 1 self.log.debug("@: {} curByte: {} cmd: {} len: {}".format(curAddress-startAddress-1, curByte, bin(command), length)) while True: isLongLength = False if command == 0b000: # Copy source bytes for i in range(length): output.append(self._nextByte()) curAddress += length self.log.debug("Uncompressed: {}".format(output[-length:])) elif command == 0b001: # Repeat one byte <length> times copyByte = self._nextByte() curAddress += 1 for i in range(length): output.append(copyByte) self.log.debug("Repeat: {}".format(output[-length:])) elif command == 0b010: # Alternate between two bytes <length> times copyByte1 = self._nextByte() copyByte2 = self._nextByte() curAddress += 2 for i in range(length): output.append(copyByte1 if i % 2 == 0 else copyByte2) self.log.debug("Word: {}".format(output[-length:])) elif command == 0b011: # Sequence of increasing bytes copyByte = self._nextByte() curAddress += 1 for i in range(length): output.append(copyByte) copyByte += 1 self.log.debug("Increment: {}".format(output[-length:])) elif command == 0b100: # Copy from output stream outAddress = self._concatBytes(self._nextByte(), self._nextByte()) curAddress += 2 for i in range(length): output.append(output[outAddress + i]) self.log.debug("Copy: {}".format(output[-length:])) elif command == 0b101: # Copy from output stream, flip bits outAddress = self._concatBytes(self._nextByte(), self._nextByte()) curAddress += 2 for i in range(length): output.append(output[outAddress + i] ^ 0xFF) self.log.debug("CopyXOR: {}".format(output[-length:])) elif command == 0b110: # Copy from output stream, relative to current index outAddress = len(output) - self._nextByte() curAddress += 1 for i in range(length): output.append(output[outAddress + i]) self.log.debug("RelativeCopy: {}".format(output[-length:])) elif command == 0b111: # Long length (10 bits) command command = (curByte >> 2) & 0b111; length = ((curByte & 0b11) << 8) + self._nextByte() + 1; curAddress += 1 self.log.debug("Long command") if command == 0b111: # Copy output relative to current index, flip bits outAddress = len(output) - self._nextByte() curAddress += 1 for i in range(length): output.append(output[outAddress + i] ^ 0xFF) self.log.debug("LongRelativeCopyXOR: {}".format(output[-length:])) else: isLongLength = True; if isLongLength == False: break def compress(self, inputData): # compress the data in input array, return array of compressed bytes self.inputData = inputData self.output = [] # brute force all the cases for every byte in the input. # For every inputData address, these arrays save the max number of bytes that can be # compressed with a single chunk, starting at that address. self._computeByteFill(inputData) self._computeWordFill(inputData) self._computeByteIncrement(inputData) self._computeCopy(inputData) i = 0 while i < len(inputData): length = max(self.byteFillLengths[i], self.wordFillLengths[i], self.byteIncrementLengths[i], self.copyLengths[i].length) self.log.debug("i:{} bf: {} wf: {} bi: {} c: {}".format(i, self.byteFillLengths[i], self.wordFillLengths[i], self.byteIncrementLengths[i], self.copyLengths[i].length)) if length < 3: j = i while j < len(inputData) and length < 3: length = max(self.byteFillLengths[j], self.wordFillLengths[j], self.byteIncrementLengths[j], self.copyLengths[j].length) j += 1 length = j - i if j == len(inputData) else j - i - 1 self._writeUncompressed(inputData, i, length) elif length == self.byteFillLengths[i]: length = min(length, 1024) self._writeByteFill(inputData[i], length) elif length == self.wordFillLengths[i]: length = min(length, 1024) self._writeWordFill(inputData[i], inputData[i+1], length) elif length == self.byteIncrementLengths[i]: length = min(length, 1024) self._writeByteIncrement(inputData[i], length) elif length == self.copyLengths[i].length: length = min(length, 1024) if i - self.copyLengths[i].address < 0xFF: self._writeNegativeCopy(i, i - self.copyLengths[i].address, length) else: self._writeCopy(self.copyLengths[i].address, length) i += length # end of compressed data marker self.output.append(0xFF) if len(self.output) > len(inputData): print("WARNING !!! len compressed {} > original data {}".format(len(self.output), len(inputData))) print("original: {}".format(inputData)) print("compressed: {}".format(self.output)) return self.output[:] def _writeChunkHeader(self, type, length): length -= 1 if length < 32: # regular command self.output.append(type << 5 | length) self.log.debug("_writeChunkHeader: cmd: {} len: {} value: {}".format(bin(type), length, type << 5 | length)) else: # long command self.output.append(0b11100000 | type << 2 | length >> 8) self.output.append(length & 0xFF) self.log.debug("_writeChunkHeader: long cmd: {} len: {} value: {} {}".format(bin(type), length, 0b11100000 | type << 2 | length >> 8, length & 0xFF)) def _writeUncompressed(self, inputData, index, length): self._writeChunkHeader(0b000, length) self.output += inputData[index:index+length] self.log.debug("_writeUncompressed: len: {} index: {} data: {}".format(length, index, inputData[index:index+length])) def _writeByteFill(self, byte, length): self._writeChunkHeader(0b001, length) self.output.append(byte) self.log.debug("_writeByteFill: len: {} byte: {}: {}".format(length, byte, [byte for i in range(length)])) def _writeWordFill(self, b0, b1, length): self._writeChunkHeader(0b010, length) self.output.append(b0) self.output.append(b1) self.log.debug("_writeWordFill: len: {} b0: {} b1: {}: {}".format(length, b0, b1, [b0 if i%2==0 else b1 for i in range(length)])) def _writeByteIncrement(self, byte, length): self._writeChunkHeader(0b011, length) self.output.append(byte) self.log.debug("_writeByteIncrement: len: {} byte: {}: {}".format(length, byte, [byte+i for i in range(length)])) def _writeCopy(self, address, length): self._writeChunkHeader(0b100, length) self.output.append(address & 0xFF) self.output.append(address >> 8) self.log.debug("_writeCopy: {}".format(self.output[-3:])) self.log.debug("_writeCopy: len: {} address: {}: {}".format(length, address, self.inputData[address:address+length])) def _writeNegativeCopy(self, i, address, length): self._writeChunkHeader(0b110, length) self.output.append(address) self.log.debug("_writeNegativeCopy: len: {} address: {}: {}".format(length, address, self.inputData[i-address:i-address+length])) def _computeByteFill(self, inputData): self.byteFillLengths = [] carry = 0 for i in range(len(inputData)): if carry == 0: value = inputData[i] # count how many repeating value we have while i + carry < len(inputData) and inputData[i + carry] == value: carry += 1 self.byteFillLengths.append(carry) carry -= 1 def _computeWordFill(self, inputData): self.wordFillLengths = [] carry = 1 for i in range(len(inputData)-1): if carry == 1: value = (inputData[i], inputData[i+1]) while i + carry < len(inputData) and inputData[i + carry] == value[carry & 1]: carry += 1 if carry < 4: # no compression when replacing [b0, b1, b0] with [cmd, b0, b1] self.wordFillLengths.append(2) else: self.wordFillLengths.append(carry) carry -= 1 # missing last value self.wordFillLengths.append(carry) def _computeByteIncrement(self, inputData): self.byteIncrementLengths = [] carry = 0 for i in range(len(inputData)): if carry == 0: value = inputData[i] while i + carry < len(inputData) and inputData[i + carry] == value: carry += 1 value += 1 self.byteIncrementLengths.append(carry) carry -= 1 class _Interval: def __init__(self, address, length): self.address = address self.length = length def __repr__(self): return "({},{})".format(self.address, self.length) def _computeCopy(self, inputData): self.copyLengths = [] # for each possible value store the positions of the value in the input data start = [[] for i in range(len(inputData))] for i in range(len(inputData)-1): start[inputData[i]].append(i) for i, value in enumerate(inputData, start=0): maxLength = 0 maxAddress = -1 for j, address in enumerate(start[inputData[i]], start=0): # for performance reasons limit the number of addresses if j >= self.computeLimit: break # only in previous addresses if address >= i: break length = self._matchSubSequences(address, i, inputData) if length > maxLength: maxLength = length maxAddress = address self.copyLengths.append(Compressor._Interval(maxAddress, maxLength)) # Find the max length of two matching sequences starting at a and b in Input array. # Make sure that 0 <= a < b, otherwise bad stuff will happen. def _matchSubSequences(self, a, b, inputData): if a >= b: return 0 i = 0 length = len(inputData) while b+i < length and inputData[a+i] == inputData[b+i]: i += 1 #self.log.debug("_matchSubSequences a: {} b: {} i: {}".format(a,b,i)) return i
#!/usr/bin/python # # Copyright (c) 2012 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unittests for the gs.py module.""" import functools import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname( os.path.abspath(__file__))))) from chromite.lib import cros_build_lib from chromite.lib import cros_build_lib_unittest from chromite.lib import cros_test_lib from chromite.lib import gs from chromite.lib import osutils from chromite.lib import partial_mock # TODO(build): Finish test wrapper (http://crosbug.com/37517). # Until then, this has to be after the chromite imports. import mock def PatchGS(*args, **kwargs): """Convenience method for patching GSContext.""" return mock.patch.object(gs.GSContext, *args, **kwargs) class GSContextMock(partial_mock.PartialCmdMock): """Used to mock out the GSContext class.""" TARGET = 'chromite.lib.gs.GSContext' ATTRS = ('__init__', '_DoCommand', 'DEFAULT_SLEEP_TIME', 'DEFAULT_RETRIES', 'DEFAULT_BOTO_FILE', 'DEFAULT_GSUTIL_BIN', 'DEFAULT_GSUTIL_BUILDER_BIN', 'GSUTIL_URL') DEFAULT_ATTR = '_DoCommand' GSResponsePreconditionFailed = """ [Setting Content-Type=text/x-python] GSResponseError:: status=412, code=PreconditionFailed, reason=Precondition Failed.""" DEFAULT_SLEEP_TIME = 0 DEFAULT_RETRIES = 2 TMP_ROOT = '/tmp/cros_unittest' DEFAULT_BOTO_FILE = '%s/boto_file' % TMP_ROOT DEFAULT_GSUTIL_BIN = '%s/gsutil_bin' % TMP_ROOT DEFAULT_GSUTIL_BUILDER_BIN = DEFAULT_GSUTIL_BIN GSUTIL_URL = None def __init__(self): partial_mock.PartialCmdMock.__init__(self, create_tempdir=True) def _SetGSUtilUrl(self): tempfile = os.path.join(self.tempdir, 'tempfile') osutils.WriteFile(tempfile, 'some content') gsutil_path = os.path.join(self.tempdir, gs.GSContext.GSUTIL_TAR) cros_build_lib.CreateTarball(gsutil_path, self.tempdir, inputs=[tempfile]) self.GSUTIL_URL = 'file://%s' % gsutil_path def PreStart(self): os.environ.pop("BOTO_CONFIG", None) # Set it here for now, instead of mocking out Cached() directly because # python-mock has a bug with mocking out class methods with autospec=True. # TODO(rcui): Change this when this is fixed in PartialMock. self._SetGSUtilUrl() def _target__init__(self, *args, **kwargs): with PatchGS('_CheckFile', return_value=True): self.backup['__init__'](*args, **kwargs) def _DoCommand(self, inst, gsutil_cmd, **kwargs): result = self._results['_DoCommand'].LookupResult( (gsutil_cmd,), hook_args=(inst, gsutil_cmd,), hook_kwargs=kwargs) rc_mock = cros_build_lib_unittest.RunCommandMock() rc_mock.AddCmdResult( partial_mock.ListRegex('gsutil'), result.returncode, result.output, result.error) with rc_mock: return self.backup['_DoCommand'](inst, gsutil_cmd, **kwargs) class AbstractGSContextTest(cros_test_lib.MockTempDirTestCase): """Base class for GSContext tests.""" def setUp(self): self.gs_mock = self.StartPatcher(GSContextMock()) self.gs_mock.SetDefaultCmdResult() self.ctx = gs.GSContext() class CopyTest(AbstractGSContextTest): """Tests GSContext.Copy() functionality.""" LOCAL_PATH = '/tmp/file' GIVEN_REMOTE = EXPECTED_REMOTE = 'gs://test/path/file' ACL_FILE = '/my/file/acl' ACL_FILE2 = '/my/file/other' def _Copy(self, ctx, src, dst, **kwargs): return ctx.Copy(src, dst, **kwargs) def Copy(self, ctx=None, **kwargs): if ctx is None: ctx = self.ctx return self._Copy(ctx, self.LOCAL_PATH, self.GIVEN_REMOTE, **kwargs) def testBasic(self): """Simple copy test.""" self.Copy() self.gs_mock.assertCommandContains( ['cp', '--', self.LOCAL_PATH, self.EXPECTED_REMOTE]) def testWithACLFile(self): """ACL specified during init.""" ctx = gs.GSContext(acl_file=self.ACL_FILE) self.Copy(ctx=ctx) self.gs_mock.assertCommandContains(['cp', '-a', self.ACL_FILE]) def testWithACLFile2(self): """ACL specified during invocation.""" self.Copy(acl=self.ACL_FILE) self.gs_mock.assertCommandContains(['cp', '-a', self.ACL_FILE]) def testWithACLFile3(self): """ACL specified during invocation that overrides init.""" ctx = gs.GSContext(acl_file=self.ACL_FILE) self.Copy(ctx=ctx, acl=self.ACL_FILE2) self.gs_mock.assertCommandContains(['cp', '-a', self.ACL_FILE2]) def testVersion(self): """Test version field.""" for version in xrange(7): self.Copy(version=version) self.gs_mock.assertCommandContains( [], headers=['x-goog-if-generation-match:%s' % version]) def testRunCommandError(self): """Test RunCommandError is propagated.""" self.gs_mock.AddCmdResult(partial_mock.In('cp'), returncode=1) self.assertRaises(cros_build_lib.RunCommandError, self.Copy) def testGSContextException(self): """GSContextException is raised properly.""" self.gs_mock.AddCmdResult( partial_mock.In('cp'), returncode=1, error=self.gs_mock.GSResponsePreconditionFailed) self.assertRaises(gs.GSContextException, self.Copy) class CopyIntoTest(CopyTest): """Test CopyInto functionality.""" FILE = 'ooga' GIVEN_REMOTE = 'gs://test/path/file' EXPECTED_REMOTE = '%s/%s' % (GIVEN_REMOTE, FILE) def _Copy(self, ctx, *args, **kwargs): return ctx.CopyInto(*args, filename=self.FILE, **kwargs) #pylint: disable=E1101,W0212 class GSContextInitTest(cros_test_lib.MockTempDirTestCase): """Tests GSContext.__init__() functionality.""" def setUp(self): os.environ.pop("BOTO_CONFIG", None) self.bad_path = os.path.join(self.tempdir, 'nonexistent') file_list = ['gsutil_bin', 'boto_file', 'acl_file'] cros_test_lib.CreateOnDiskHierarchy(self.tempdir, file_list) for f in file_list: setattr(self, f, os.path.join(self.tempdir, f)) self.StartPatcher(PatchGS('DEFAULT_BOTO_FILE', new=self.boto_file)) self.StartPatcher(PatchGS('DEFAULT_GSUTIL_BIN', new=self.gsutil_bin)) def testInitGsutilBin(self): """Test we use the given gsutil binary, erroring where appropriate.""" self.assertEquals(gs.GSContext().gsutil_bin, self.gsutil_bin) self.assertRaises(gs.GSContextException, gs.GSContext, gsutil_bin=self.bad_path) def testBadGSUtilBin(self): """Test exception thrown for bad gsutil paths.""" with PatchGS('DEFAULT_GSUTIL_BIN', new=self.bad_path): self.assertRaises(gs.GSContextException, gs.GSContext) def testInitBotoFileEnv(self): os.environ['BOTO_CONFIG'] = self.gsutil_bin self.assertTrue(gs.GSContext().boto_file, self.gsutil_bin) self.assertEqual(gs.GSContext(boto_file=self.acl_file).boto_file, self.acl_file) self.assertRaises(gs.GSContextException, gs.GSContext, boto_file=self.bad_path) def testInitBotoFileEnvError(self): """Boto file through env var error.""" self.assertEquals(gs.GSContext().boto_file, self.boto_file) # Check env usage next; no need to cleanup, teardown handles it, # and we want the env var to persist for the next part of this test. os.environ['BOTO_CONFIG'] = self.bad_path self.assertRaises(gs.GSContextException, gs.GSContext) def testInitBotoFileError(self): """Test bad boto file.""" with PatchGS('DEFAULT_GSUTIL_BIN', self.bad_path): self.assertRaises(gs.GSContextException, gs.GSContext) def testInitAclFile(self): """Test ACL selection logic in __init__.""" self.assertEqual(gs.GSContext().acl_file, None) self.assertEqual(gs.GSContext(acl_file=self.acl_file).acl_file, self.acl_file) self.assertRaises(gs.GSContextException, gs.GSContext, acl_file=self.bad_path) class GSContextTest(AbstractGSContextTest): """Tests for GSContext()""" def _testDoCommand(self, ctx, retries, sleep): with mock.patch.object(cros_build_lib, 'RetryCommand', autospec=True): ctx.Copy('/blah', 'gs://foon') cmd = [self.ctx.gsutil_bin, 'cp', '--', '/blah', 'gs://foon'] cros_build_lib.RetryCommand.assert_called_once_with( mock.ANY, retries, cmd, sleep=sleep, redirect_stderr=True, extra_env={'BOTO_CONFIG': mock.ANY}) def testDoCommandDefault(self): """Verify the internal DoCommand function works correctly.""" self._testDoCommand(self.ctx, retries=self.ctx.DEFAULT_RETRIES, sleep=self.ctx.DEFAULT_SLEEP_TIME) def testDoCommandCustom(self): """Test that retries and sleep parameters are honored.""" ctx = gs.GSContext(retries=4, sleep=1) self._testDoCommand(ctx, retries=4, sleep=1) def testSetAclError(self): """Ensure SetACL blows up if the acl isn't specified.""" self.assertRaises(gs.GSContextException, self.ctx.SetACL, 'gs://abc/3') def testSetDefaultAcl(self): """Test default ACL behavior.""" self.ctx.SetACL('gs://abc/1', 'monkeys') self.gs_mock.assertCommandContains(['setacl', 'monkeys', 'gs://abc/1']) def testSetAcl(self): """Base ACL setting functionality.""" ctx = gs.GSContext(acl_file='/my/file/acl') ctx.SetACL('gs://abc/1') self.gs_mock.assertCommandContains(['setacl', '/my/file/acl', 'gs://abc/1']) def testCreateCached(self): """Test that the function runs through.""" gs.GSContext.Cached(self.tempdir) def testReuseCached(self): """Test that second fetch is a cache hit.""" gs.GSContext.Cached(self.tempdir) gs.GSUTIL_URL = None gs.GSContext.Cached(self.tempdir) class InitBotoTest(AbstractGSContextTest): """Test boto file interactive initialization.""" GS_LS_ERROR = """\ You are attempting to access protected data with no configured credentials. Please see http://code.google.com/apis/storage/docs/signup.html for details about activating the Google Cloud Storage service and then run the "gsutil config" command to configure gsutil to use these credentials.""" GS_LS_BENIGN = """\ "GSResponseError: status=400, code=MissingSecurityHeader, reason=Bad Request, detail=A nonempty x-goog-project-id header is required for this request.""" def setUp(self): self.boto_file = os.path.join(self.tempdir, 'boto_file') self.ctx = gs.GSContext(boto_file=self.boto_file) def testInitGSLsSkippableError(self): """Benign GS error.""" self.gs_mock.AddCmdResult(['ls'], returncode=1, error=self.GS_LS_BENIGN) self.ctx._InitBoto() def _WriteBotoFile(self, contents, *_args, **_kwargs): osutils.WriteFile(self.ctx.boto_file, contents) def testInitGSLsFailButSuccess(self): """Invalid GS Config, but we config properly.""" self.gs_mock.AddCmdResult(['ls'], returncode=1, error=self.GS_LS_ERROR) self.ctx._InitBoto() def _AddLsConfigResult(self, side_effect=None): self.gs_mock.AddCmdResult(['ls'], returncode=1, error=self.GS_LS_ERROR) self.gs_mock.AddCmdResult(['config'], returncode=1, side_effect=side_effect) def testGSLsFailAndConfigError(self): """Invalid GS Config, and we fail to config.""" self._AddLsConfigResult( side_effect=functools.partial(self._WriteBotoFile, 'monkeys')) self.assertRaises(cros_build_lib.RunCommandError, self.ctx._InitBoto) def testGSLsFailAndEmptyConfigFile(self): """Invalid GS Config, and we raise error on empty config file.""" self._AddLsConfigResult( side_effect=functools.partial(self._WriteBotoFile, '')) self.assertRaises(gs.GSContextException, self.ctx._InitBoto) if __name__ == '__main__': cros_test_lib.main()
import logging import warnings from django.contrib.auth.models import User from django.db import models from django.db.models import Q from django.utils import timezone from django.utils.functional import cached_property from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from djblets.db.fields import CounterField, JSONField from djblets.db.query import get_object_or_none from reviewboard.diffviewer.models import DiffSet from reviewboard.reviews.errors import RevokeShipItError from reviewboard.reviews.managers import ReviewManager from reviewboard.reviews.models.base_comment import BaseComment from reviewboard.reviews.models.diff_comment import Comment from reviewboard.reviews.models.file_attachment_comment import \ FileAttachmentComment from reviewboard.reviews.models.general_comment import GeneralComment from reviewboard.reviews.models.review_request import (ReviewRequest, fetch_issue_counts) from reviewboard.reviews.models.screenshot_comment import ScreenshotComment from reviewboard.reviews.signals import (reply_publishing, reply_published, review_publishing, review_published, review_ship_it_revoking, review_ship_it_revoked) logger = logging.getLogger(__name__) @python_2_unicode_compatible class Review(models.Model): """A review of a review request.""" # Constants used in e-mails when a review contains a Ship It designation. # These are explicitly not marked for localization to prevent taking the # submitting user's local into account when generating the e-mail. SHIP_IT_TEXT = 'Ship It!' REVOKED_SHIP_IT_TEXT = '~~Ship It!~~' FIX_IT_THEN_SHIP_IT_TEXT = 'Fix it, then Ship it!' review_request = models.ForeignKey(ReviewRequest, related_name="reviews", verbose_name=_("review request")) user = models.ForeignKey(User, verbose_name=_("user"), related_name="reviews") timestamp = models.DateTimeField(_('timestamp'), default=timezone.now) public = models.BooleanField(_("public"), default=False) ship_it = models.BooleanField( _("ship it"), default=False, help_text=_("Indicates whether the reviewer thinks this code is " "ready to ship.")) base_reply_to = models.ForeignKey( "self", blank=True, null=True, related_name="replies", verbose_name=_("Base reply to"), help_text=_("The top-most review in the discussion thread for " "this review reply.")) email_message_id = models.CharField(_("e-mail message ID"), max_length=255, blank=True, null=True) time_emailed = models.DateTimeField(_("time e-mailed"), null=True, default=None, blank=True) body_top = models.TextField( _("body (top)"), blank=True, help_text=_("The review text shown above the diff and screenshot " "comments.")) body_top_rich_text = models.BooleanField( _("body (top) in rich text"), default=False) body_bottom = models.TextField( _("body (bottom)"), blank=True, help_text=_("The review text shown below the diff and screenshot " "comments.")) body_bottom_rich_text = models.BooleanField( _("body (bottom) in rich text"), default=False) body_top_reply_to = models.ForeignKey( "self", blank=True, null=True, related_name="body_top_replies", verbose_name=_("body (top) reply to"), help_text=_("The review that the body (top) field is in reply to.")) body_bottom_reply_to = models.ForeignKey( "self", blank=True, null=True, related_name="body_bottom_replies", verbose_name=_("body (bottom) reply to"), help_text=_("The review that the body (bottom) field is in reply to.")) comments = models.ManyToManyField(Comment, verbose_name=_("comments"), related_name="review", blank=True) screenshot_comments = models.ManyToManyField( ScreenshotComment, verbose_name=_("screenshot comments"), related_name="review", blank=True) file_attachment_comments = models.ManyToManyField( FileAttachmentComment, verbose_name=_("file attachment comments"), related_name="review", blank=True) general_comments = models.ManyToManyField( GeneralComment, verbose_name=_('general comments'), related_name='review', blank=True) extra_data = JSONField(null=True) # Deprecated and no longer used for new reviews as of 2.0.9. rich_text = models.BooleanField(_("rich text"), default=False) # XXX Deprecated. This will be removed in a future release. reviewed_diffset = models.ForeignKey( DiffSet, verbose_name="Reviewed Diff", blank=True, null=True, help_text=_("This field is unused and will be removed in a future " "version.")) # Set this up with a ReviewManager to help prevent race conditions and # to fix duplicate reviews. objects = ReviewManager() @cached_property def ship_it_only(self): """Return if the review only contains a "Ship It!". Returns: bool: ``True`` if the review is only a "Ship It!" and ``False`` otherwise. """ return (self.ship_it and (not self.body_top or self.body_top == Review.SHIP_IT_TEXT) and not (self.body_bottom or self.has_comments(only_issues=False))) def can_user_revoke_ship_it(self, user): """Return whether a given user can revoke a Ship It. Args: user (django.contrib.auth.models.User): The user to check permissions for. Returns: bool: ``True`` if the user has permissions to revoke a Ship It. ``False`` if they don't. """ return (user.is_authenticated and self.public and (user.pk == self.user_id or user.is_superuser or (self.review_request.local_site and self.review_request.local_site.admins.filter( pk=user.pk).exists())) and self.review_request.is_accessible_by(user)) def revoke_ship_it(self, user): """Revoke the Ship It status on this review. The Ship It status will be removed, and the :py:data:`ReviewRequest.shipit_count <reviewboard.reviews.models.review_request.ReviewRequest.shipit_count>` counter will be decremented. If the :py:attr:`body_top` text is equal to :py:attr:`SHIP_IT_TEXT`, then it will replaced with :py:attr:`REVOKED_SHIP_IT_TEXT`. Callers are responsible for checking whether the user has permission to revoke Ship Its by using :py:meth:`can_user_revoke_ship_it`. Raises: reviewboard.reviews.errors.RevokeShipItError: The Ship It could not be revoked. Details will be in the error message. """ if not self.ship_it: raise RevokeShipItError('This review is not marked Ship It!') # This may raise a RevokeShipItError. try: review_ship_it_revoking.send(sender=self.__class__, user=user, review=self) except RevokeShipItError: raise except Exception as e: logger.exception('Unexpected error notifying listeners before ' 'revoking a Ship It for review ID=%d: %s', self.pk, e) raise RevokeShipItError(e) if self.extra_data is None: self.extra_data = {} self.extra_data['revoked_ship_it'] = True self.ship_it = False update_fields = ['extra_data', 'ship_it'] if self.body_top == self.SHIP_IT_TEXT: self.body_top = self.REVOKED_SHIP_IT_TEXT self.body_top_rich_text = True update_fields += ['body_top', 'body_top_rich_text'] self.save(update_fields=update_fields) self.review_request.decrement_shipit_count() self.review_request.last_review_activity_timestamp = timezone.now() self.review_request.save( update_fields=['last_review_activity_timestamp']) try: review_ship_it_revoked.send(sender=self.__class__, user=user, review=self) except Exception as e: logger.exception('Unexpected error notifying listeners after ' 'revoking a Ship It for review ID=%d: %s', self.pk, e) @cached_property def all_participants(self): """Return all participants in the review's discussion. This will always contain the user who filed the review, plus every user who has published a reply to the review. The result is cached. Repeated calls will return the same result. Returns: set of django.contrib.auth.models.User: The users who participated in the discussion. """ user_ids = ( self.replies .filter(public=True) .values_list('user_id', flat=True) ) user_id_lookup = set(user_ids) - {self.user.pk} users = {self.user} if user_id_lookup: users.update(User.objects.filter(pk__in=user_id_lookup)) return users def is_accessible_by(self, user): """Returns whether the user can access this review.""" return ((self.public or user.is_superuser or self.user_id == user.pk) and self.review_request.is_accessible_by(user)) def is_mutable_by(self, user): """Returns whether the user can modify this review.""" return ((not self.public and (user.is_superuser or self.user_id == user.pk)) and self.review_request.is_accessible_by(user)) def __str__(self): return "Review of '%s'" % self.review_request def is_reply(self): """Returns whether or not this review is a reply to another review.""" return self.base_reply_to_id is not None is_reply.boolean = True def is_new_for_user(self, user, last_visited): """Return whether this review is new for a user. The review is considered new if their last visited time is older than the review's published timestamp and the user is not the one who created the review. Args: user (django.contrib.auth.models.User): The user accessing the review. last_visited (datetime.datetime): The last time the user accessed a page where the review would be shown. Returns: bool: ``True`` if the review is new to this user. ``False`` if it's older than the last visited time or the user created it. """ return user.pk != self.user_id and last_visited < self.timestamp def public_replies(self): """Returns a list of public replies to this review.""" return self.replies.filter(public=True) def public_body_top_replies(self, user=None): """Returns a list of public replies to this review's body top.""" if hasattr(self, '_body_top_replies'): return self._body_top_replies else: q = Q(public=True) if user and user.is_authenticated: q = q | Q(user=user) return self.body_top_replies.filter(q).order_by('timestamp') def public_body_bottom_replies(self, user=None): """Returns a list of public replies to this review's body bottom.""" if hasattr(self, '_body_bottom_replies'): return self._body_bottom_replies else: q = Q(public=True) if user and user.is_authenticated: q = q | Q(user=user) return self.body_bottom_replies.filter(q).order_by('timestamp') def get_pending_reply(self, user): """Returns the pending reply owned by the specified user.""" if user.is_authenticated: return get_object_or_none(Review, user=user, public=False, base_reply_to=self) return None def save(self, **kwargs): if ('update_fields' not in kwargs or 'timestamp' in kwargs['update_fields']): self.timestamp = timezone.now() super(Review, self).save(**kwargs) def publish(self, user=None, trivial=False, to_owner_only=False, request=None): """Publishes this review. This will make the review public and update the timestamps of all contained comments. """ if not user: user = self.user self.public = True if self.is_reply(): reply_publishing.send(sender=self.__class__, user=user, reply=self) else: review_publishing.send(sender=self.__class__, user=user, review=self) self.save() self.comments.update(timestamp=self.timestamp) self.screenshot_comments.update(timestamp=self.timestamp) self.file_attachment_comments.update(timestamp=self.timestamp) self.general_comments.update(timestamp=self.timestamp) # Update the last_updated timestamp and the last review activity # timestamp on the review request. self.review_request.last_review_activity_timestamp = self.timestamp self.review_request.last_updated = self.timestamp self.review_request.save(update_fields=( 'last_review_activity_timestamp', 'last_updated')) if self.is_reply(): reply_published.send(sender=self.__class__, user=user, reply=self, trivial=trivial) else: issue_counts = fetch_issue_counts(self.review_request, Q(pk=self.pk)) # Since we're publishing the review, all filed issues should be # open. assert issue_counts[BaseComment.RESOLVED] == 0 assert issue_counts[BaseComment.DROPPED] == 0 assert issue_counts[BaseComment.VERIFYING_RESOLVED] == 0 assert issue_counts[BaseComment.VERIFYING_DROPPED] == 0 if self.ship_it: ship_it_value = 1 else: ship_it_value = 0 # Atomically update the issue count and Ship It count. CounterField.increment_many( self.review_request, { 'issue_open_count': issue_counts[BaseComment.OPEN], 'issue_dropped_count': 0, 'issue_resolved_count': 0, 'issue_verifying_count': 0, 'shipit_count': ship_it_value, }) review_published.send(sender=self.__class__, user=user, review=self, to_owner_only=to_owner_only, request=request) def delete(self): """Deletes this review. This will enforce that all contained comments are also deleted. """ self.comments.all().delete() self.screenshot_comments.all().delete() self.file_attachment_comments.all().delete() self.general_comments.all().delete() super(Review, self).delete() def get_absolute_url(self): return "%s#review%s" % (self.review_request.get_absolute_url(), self.pk) def get_all_comments(self, **kwargs): """Return a list of all contained comments of all types.""" return (list(self.comments.filter(**kwargs)) + list(self.screenshot_comments.filter(**kwargs)) + list(self.file_attachment_comments.filter(**kwargs)) + list(self.general_comments.filter(**kwargs))) def has_comments(self, only_issues=False): """Return whether the review contains any comments/issues. Args: only_issues (bool, optional): Whether or not to check for comments where ``issue_opened`` is ``True``. ``True`` to check for issues, or ``False`` to check for comments only. Defaults to ``False``. Returns: bool: ``True`` if the review contains any comments/issues and ``False`` otherwise. """ qs = [ self.comments, self.file_attachment_comments, self.screenshot_comments, self.general_comments, ] if only_issues: qs = [ q.filter(issue_opened=True) for q in qs ] return any(q.exists() for q in qs) class Meta: app_label = 'reviews' db_table = 'reviews_review' ordering = ['timestamp'] get_latest_by = 'timestamp' verbose_name = _('Review') verbose_name_plural = _('Reviews')
"""Home Assistant auth provider.""" from __future__ import annotations import asyncio import base64 from collections import OrderedDict import logging from typing import Any, Dict, List, Optional, Set, cast import bcrypt import voluptuous as vol from homeassistant.const import CONF_ID from homeassistant.core import HomeAssistant, callback from homeassistant.exceptions import HomeAssistantError from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow from ..models import Credentials, UserMeta STORAGE_VERSION = 1 STORAGE_KEY = "auth_provider.homeassistant" def _disallow_id(conf: Dict[str, Any]) -> Dict[str, Any]: """Disallow ID in config.""" if CONF_ID in conf: raise vol.Invalid("ID is not allowed for the homeassistant auth provider.") return conf CONFIG_SCHEMA = vol.All(AUTH_PROVIDER_SCHEMA, _disallow_id) @callback def async_get_provider(hass: HomeAssistant) -> HassAuthProvider: """Get the provider.""" for prv in hass.auth.auth_providers: if prv.type == "homeassistant": return cast(HassAuthProvider, prv) raise RuntimeError("Provider not found") class InvalidAuth(HomeAssistantError): """Raised when we encounter invalid authentication.""" class InvalidUser(HomeAssistantError): """Raised when invalid user is specified. Will not be raised when validating authentication. """ class Data: """Hold the user data.""" def __init__(self, hass: HomeAssistant) -> None: """Initialize the user data store.""" self.hass = hass self._store = hass.helpers.storage.Store( STORAGE_VERSION, STORAGE_KEY, private=True ) self._data: Optional[Dict[str, Any]] = None # Legacy mode will allow usernames to start/end with whitespace # and will compare usernames case-insensitive. # Remove in 2020 or when we launch 1.0. self.is_legacy = False @callback def normalize_username(self, username: str) -> str: """Normalize a username based on the mode.""" if self.is_legacy: return username return username.strip().casefold() async def async_load(self) -> None: """Load stored data.""" data = await self._store.async_load() if data is None: data = {"users": []} seen: Set[str] = set() for user in data["users"]: username = user["username"] # check if we have duplicates folded = username.casefold() if folded in seen: self.is_legacy = True logging.getLogger(__name__).warning( "Home Assistant auth provider is running in legacy mode " "because we detected usernames that are case-insensitive" "equivalent. Please change the username: '%s'.", username, ) break seen.add(folded) # check if we have unstripped usernames if username != username.strip(): self.is_legacy = True logging.getLogger(__name__).warning( "Home Assistant auth provider is running in legacy mode " "because we detected usernames that start or end in a " "space. Please change the username: '%s'.", username, ) break self._data = data @property def users(self) -> List[Dict[str, str]]: """Return users.""" return self._data["users"] # type: ignore def validate_login(self, username: str, password: str) -> None: """Validate a username and password. Raises InvalidAuth if auth invalid. """ username = self.normalize_username(username) dummy = b"$2b$12$CiuFGszHx9eNHxPuQcwBWez4CwDTOcLTX5CbOpV6gef2nYuXkY7BO" found = None # Compare all users to avoid timing attacks. for user in self.users: if self.normalize_username(user["username"]) == username: found = user if found is None: # check a hash to make timing the same as if user was found bcrypt.checkpw(b"foo", dummy) raise InvalidAuth user_hash = base64.b64decode(found["password"]) # bcrypt.checkpw is timing-safe if not bcrypt.checkpw(password.encode(), user_hash): raise InvalidAuth def hash_password( # pylint: disable=no-self-use self, password: str, for_storage: bool = False ) -> bytes: """Encode a password.""" hashed: bytes = bcrypt.hashpw(password.encode(), bcrypt.gensalt(rounds=12)) if for_storage: hashed = base64.b64encode(hashed) return hashed def add_auth(self, username: str, password: str) -> None: """Add a new authenticated user/pass.""" username = self.normalize_username(username) if any( self.normalize_username(user["username"]) == username for user in self.users ): raise InvalidUser self.users.append( { "username": username, "password": self.hash_password(password, True).decode(), } ) @callback def async_remove_auth(self, username: str) -> None: """Remove authentication.""" username = self.normalize_username(username) index = None for i, user in enumerate(self.users): if self.normalize_username(user["username"]) == username: index = i break if index is None: raise InvalidUser self.users.pop(index) def change_password(self, username: str, new_password: str) -> None: """Update the password. Raises InvalidUser if user cannot be found. """ username = self.normalize_username(username) for user in self.users: if self.normalize_username(user["username"]) == username: user["password"] = self.hash_password(new_password, True).decode() break else: raise InvalidUser async def async_save(self) -> None: """Save data.""" await self._store.async_save(self._data) @AUTH_PROVIDERS.register("homeassistant") class HassAuthProvider(AuthProvider): """Auth provider based on a local storage of users in Home Assistant config dir.""" DEFAULT_TITLE = "Home Assistant Local" def __init__(self, *args: Any, **kwargs: Any) -> None: """Initialize an Home Assistant auth provider.""" super().__init__(*args, **kwargs) self.data: Optional[Data] = None self._init_lock = asyncio.Lock() async def async_initialize(self) -> None: """Initialize the auth provider.""" async with self._init_lock: if self.data is not None: return data = Data(self.hass) await data.async_load() self.data = data async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow: """Return a flow to login.""" return HassLoginFlow(self) async def async_validate_login(self, username: str, password: str) -> None: """Validate a username and password.""" if self.data is None: await self.async_initialize() assert self.data is not None await self.hass.async_add_executor_job( self.data.validate_login, username, password ) async def async_add_auth(self, username: str, password: str) -> None: """Call add_auth on data.""" if self.data is None: await self.async_initialize() assert self.data is not None await self.hass.async_add_executor_job(self.data.add_auth, username, password) await self.data.async_save() async def async_remove_auth(self, username: str) -> None: """Call remove_auth on data.""" if self.data is None: await self.async_initialize() assert self.data is not None self.data.async_remove_auth(username) await self.data.async_save() async def async_change_password(self, username: str, new_password: str) -> None: """Call change_password on data.""" if self.data is None: await self.async_initialize() assert self.data is not None await self.hass.async_add_executor_job( self.data.change_password, username, new_password ) await self.data.async_save() async def async_get_or_create_credentials( self, flow_result: Dict[str, str] ) -> Credentials: """Get credentials based on the flow result.""" if self.data is None: await self.async_initialize() assert self.data is not None norm_username = self.data.normalize_username username = norm_username(flow_result["username"]) for credential in await self.async_credentials(): if norm_username(credential.data["username"]) == username: return credential # Create new credentials. return self.async_create_credentials({"username": username}) async def async_user_meta_for_credentials( self, credentials: Credentials ) -> UserMeta: """Get extra info for this credential.""" return UserMeta(name=credentials.data["username"], is_active=True) async def async_will_remove_credentials(self, credentials: Credentials) -> None: """When credentials get removed, also remove the auth.""" if self.data is None: await self.async_initialize() assert self.data is not None try: self.data.async_remove_auth(credentials.data["username"]) await self.data.async_save() except InvalidUser: # Can happen if somehow we didn't clean up a credential pass class HassLoginFlow(LoginFlow): """Handler for the login flow.""" async def async_step_init( self, user_input: Optional[Dict[str, str]] = None ) -> Dict[str, Any]: """Handle the step of the form.""" errors = {} if user_input is not None: try: await cast(HassAuthProvider, self._auth_provider).async_validate_login( user_input["username"], user_input["password"] ) except InvalidAuth: errors["base"] = "invalid_auth" if not errors: user_input.pop("password") return await self.async_finish(user_input) schema: Dict[str, type] = OrderedDict() schema["username"] = str schema["password"] = str return self.async_show_form( step_id="init", data_schema=vol.Schema(schema), errors=errors )
# Copyright 2011 OpenStack Foundation # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 Grid Dynamics # Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils import webob from nova.api.openstack import common from nova.api.openstack.compute.schemas.v3 import floating_ips from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api import validation from nova import compute from nova.compute import utils as compute_utils from nova import exception from nova.i18n import _ from nova.i18n import _LW from nova import network LOG = logging.getLogger(__name__) ALIAS = 'os-floating-ips' authorize = extensions.os_compute_authorizer(ALIAS) def _translate_floating_ip_view(floating_ip): result = { 'id': floating_ip['id'], 'ip': floating_ip['address'], 'pool': floating_ip['pool'], } try: result['fixed_ip'] = floating_ip['fixed_ip']['address'] except (TypeError, KeyError, AttributeError): result['fixed_ip'] = None try: result['instance_id'] = floating_ip['fixed_ip']['instance_uuid'] except (TypeError, KeyError, AttributeError): result['instance_id'] = None return {'floating_ip': result} def _translate_floating_ips_view(floating_ips): return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] for ip in floating_ips]} def get_instance_by_floating_ip_addr(self, context, address): try: instance_id =\ self.network_api.get_instance_id_by_floating_address( context, address) except exception.FloatingIpNotFoundForAddress as ex: raise webob.exc.HTTPNotFound(explanation=ex.format_message()) except exception.FloatingIpMultipleFoundForAddress as ex: raise webob.exc.HTTPConflict(explanation=ex.format_message()) if instance_id: return common.get_instance(self.compute_api, context, instance_id) def disassociate_floating_ip(self, context, instance, address): try: self.network_api.disassociate_floating_ip(context, instance, address) except exception.Forbidden: raise webob.exc.HTTPForbidden() except exception.CannotDisassociateAutoAssignedFloatingIP: msg = _('Cannot disassociate auto assigned floating ip') raise webob.exc.HTTPForbidden(explanation=msg) class FloatingIPController(object): """The Floating IPs API controller for the OpenStack API.""" def __init__(self): self.compute_api = compute.API(skip_policy_check=True) self.network_api = network.API(skip_policy_check=True) super(FloatingIPController, self).__init__() @extensions.expected_errors((400, 404)) def show(self, req, id): """Return data about the given floating ip.""" context = req.environ['nova.context'] authorize(context) try: floating_ip = self.network_api.get_floating_ip(context, id) except (exception.NotFound, exception.FloatingIpNotFound): msg = _("Floating ip not found for id %s") % id raise webob.exc.HTTPNotFound(explanation=msg) except exception.InvalidID as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) return _translate_floating_ip_view(floating_ip) @extensions.expected_errors(()) def index(self, req): """Return a list of floating ips allocated to a project.""" context = req.environ['nova.context'] authorize(context) floating_ips = self.network_api.get_floating_ips_by_project(context) return _translate_floating_ips_view(floating_ips) @extensions.expected_errors((403, 404)) def create(self, req, body=None): context = req.environ['nova.context'] authorize(context) pool = None if body and 'pool' in body: pool = body['pool'] try: address = self.network_api.allocate_floating_ip(context, pool) ip = self.network_api.get_floating_ip_by_address(context, address) except exception.NoMoreFloatingIps: if pool: msg = _("No more floating ips in pool %s.") % pool else: msg = _("No more floating ips available.") raise webob.exc.HTTPNotFound(explanation=msg) except exception.FloatingIpLimitExceeded: if pool: msg = _("IP allocation over quota in pool %s.") % pool else: msg = _("IP allocation over quota.") raise webob.exc.HTTPForbidden(explanation=msg) except exception.FloatingIpPoolNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return _translate_floating_ip_view(ip) @wsgi.response(202) @extensions.expected_errors((400, 403, 404, 409)) def delete(self, req, id): context = req.environ['nova.context'] authorize(context) # get the floating ip object try: floating_ip = self.network_api.get_floating_ip(context, id) except (exception.NotFound, exception.FloatingIpNotFound): msg = _("Floating ip not found for id %s") % id raise webob.exc.HTTPNotFound(explanation=msg) except exception.InvalidID as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) address = floating_ip['address'] # get the associated instance object (if any) instance = get_instance_by_floating_ip_addr(self, context, address) try: self.network_api.disassociate_and_release_floating_ip( context, instance, floating_ip) except exception.Forbidden: raise webob.exc.HTTPForbidden() except exception.CannotDisassociateAutoAssignedFloatingIP: msg = _('Cannot disassociate auto assigned floating ip') raise webob.exc.HTTPForbidden(explanation=msg) class FloatingIPActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(FloatingIPActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API(skip_policy_check=True) self.network_api = network.API(skip_policy_check=True) @extensions.expected_errors((400, 403, 404)) @wsgi.action('addFloatingIp') @validation.schema(floating_ips.add_floating_ip) def _add_floating_ip(self, req, id, body): """Associate floating_ip to an instance.""" context = req.environ['nova.context'] authorize(context) address = body['addFloatingIp']['address'] instance = common.get_instance(self.compute_api, context, id) cached_nwinfo = compute_utils.get_nw_info_for_instance(instance) if not cached_nwinfo: msg = _('No nw_info cache associated with instance') raise webob.exc.HTTPBadRequest(explanation=msg) fixed_ips = cached_nwinfo.fixed_ips() if not fixed_ips: msg = _('No fixed ips associated to instance') raise webob.exc.HTTPBadRequest(explanation=msg) fixed_address = None if 'fixed_address' in body['addFloatingIp']: fixed_address = body['addFloatingIp']['fixed_address'] for fixed in fixed_ips: if fixed['address'] == fixed_address: break else: msg = _('Specified fixed address not assigned to instance') raise webob.exc.HTTPBadRequest(explanation=msg) if not fixed_address: fixed_address = fixed_ips[0]['address'] if len(fixed_ips) > 1: LOG.warning(_LW('multiple fixed_ips exist, using the first: ' '%s'), fixed_address) try: self.network_api.associate_floating_ip(context, instance, floating_address=address, fixed_address=fixed_address) except exception.FloatingIpAssociated: msg = _('floating ip is already associated') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.NoFloatingIpInterface: msg = _('l3driver call to add floating ip failed') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.FloatingIpNotFoundForAddress: msg = _('floating ip not found') raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.format_message()) except Exception as e: msg = _('Unable to associate floating ip %(address)s to ' 'fixed ip %(fixed_address)s for instance %(id)s. ' 'Error: %(error)s') % ( {'address': address, 'fixed_address': fixed_address, 'id': id, 'error': e}) LOG.exception(msg) raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) @extensions.expected_errors((400, 403, 404, 409)) @wsgi.action('removeFloatingIp') @validation.schema(floating_ips.remove_floating_ip) def _remove_floating_ip(self, req, id, body): """Dissociate floating_ip from an instance.""" context = req.environ['nova.context'] authorize(context) address = body['removeFloatingIp']['address'] # get the floating ip object try: floating_ip = self.network_api.get_floating_ip_by_address(context, address) except exception.FloatingIpNotFoundForAddress: msg = _("floating ip not found") raise webob.exc.HTTPNotFound(explanation=msg) # get the associated instance object (if any) instance = get_instance_by_floating_ip_addr(self, context, address) # disassociate if associated if (instance and floating_ip.get('fixed_ip_id') and (uuidutils.is_uuid_like(id) and [instance.uuid == id] or [instance.id == id])[0]): try: disassociate_floating_ip(self, context, instance, address) except exception.FloatingIpNotAssociated: msg = _('Floating ip is not associated') raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=202) else: msg = _("Floating ip %(address)s is not associated with instance " "%(id)s.") % {'address': address, 'id': id} raise webob.exc.HTTPConflict(explanation=msg) class FloatingIps(extensions.V3APIExtensionBase): """Floating IPs support.""" name = "FloatingIps" alias = ALIAS version = 1 def get_resources(self): resource = [extensions.ResourceExtension(ALIAS, FloatingIPController())] return resource def get_controller_extensions(self): controller = FloatingIPActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension]
#------------------------------------------------------------------------------ # # Copyright (c) 2006, Enthought, Inc. # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! # # Authors: Prabhu Ramachandran <[email protected]>, # Dave Peterson <[email protected]> # #------------------------------------------------------------------------------ """ A VTK interactor scene which provides a convenient toolbar that allows the user to set the camera view, turn on the axes indicator, etc. """ # System imports. from os.path import dirname import wx # Enthought library imports. from pyface.api import ImageResource, FileDialog, OK from pyface.action.api import ToolBarManager, Group, Action from tvtk.api import tvtk from traits.api import Instance, false, List, Either # Local imports. from .scene import Scene ########################################################################### # 'DecoratedScene' class ########################################################################### class DecoratedScene(Scene): """A VTK interactor scene which provides a convenient toolbar that allows the user to set the camera view, turn on the axes indicator etc. """ ####################################################################### # Traits ####################################################################### if hasattr(tvtk, 'OrientationMarkerWidget'): # The tvtk orientation marker widget. This only exists in VTK # 5.x. marker = Instance(tvtk.OrientationMarkerWidget, ()) # The tvtk axes that will be shown for the orientation. axes = Instance(tvtk.AxesActor, ()) else: marker = None axes = None # Determine if the orientation axis is shown or not. show_axes = false # The list of actions represented in the toolbar actions = List(Either(Action, Group)) ########################################################################## # `object` interface ########################################################################## def __init__(self, parent, **traits): super(DecoratedScene, self).__init__(parent, **traits) self._setup_axes_marker() def __get_pure_state__(self): """Allows us to pickle the scene.""" # The control attribute is not picklable since it is a VTK # object so we remove it. d = super(DecoratedScene, self).__get_pure_state__() for x in ['_content', '_panel', '_sizer', '_tool_bar', 'actions']: d.pop(x, None) return d ########################################################################## # Non-public interface. ########################################################################## def _create_control(self, parent): """ Create the toolkit-specific control that represents the widget. Overridden to wrap the Scene control within a panel that also contains a toolbar. """ # Create a panel as a wrapper of the scene toolkit control. This # allows us to also add additional controls. self._panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN) self._sizer = wx.BoxSizer(wx.VERTICAL) self._panel.SetSizer(self._sizer) # Add our toolbar to the panel. tbm = self._get_tool_bar_manager() self._tool_bar = tbm.create_tool_bar(self._panel) self._sizer.Add(self._tool_bar, 0, wx.EXPAND) # Create the actual scene content self._content = super(DecoratedScene, self)._create_control( self._panel) self._sizer.Add(self._content, 1, wx.EXPAND) # Ensure the child controls are laid-out. self._sizer.Layout() return self._panel def _setup_axes_marker(self): axes = self.axes if axes is None: # For VTK versions < 5.0. return axes.set( normalized_tip_length=(0.4, 0.4, 0.4), normalized_shaft_length=(0.6, 0.6, 0.6), shaft_type='cylinder' ) p = axes.x_axis_caption_actor2d.caption_text_property axes.y_axis_caption_actor2d.caption_text_property = p axes.z_axis_caption_actor2d.caption_text_property = p p.set(color=(1,1,1), shadow=False, italic=False) self._background_changed(self.background) self.marker.set(key_press_activation=False) self.marker.orientation_marker = axes def _get_tool_bar_manager(self): """ Returns the tool_bar_manager for this scene. """ tbm = ToolBarManager( *self.actions ) return tbm def _get_image_path(self): """Returns the directory which contains the images used by the toolbar.""" # So that we can find the images. import tvtk.pyface.api return dirname(tvtk.pyface.api.__file__) def _toggle_projection(self): """ Toggle between perspective and parallel projection, this is used for the toolbar. """ if self._panel is not None: self.parallel_projection = not self.parallel_projection def _toggle_axes(self): """Used by the toolbar to turn on/off the axes indicator. """ if self._panel is not None: self.show_axes = not self.show_axes def _save_snapshot(self): """Invoked by the toolbar menu to save a snapshot of the scene to an image. Note that the extension of the filename determines what image type is saved. The default is PNG. """ if self._panel is not None: wildcard = "PNG images (*.png)|*.png|Determine by extension (*.*)|*.*" dialog = FileDialog( parent = self._panel, title = 'Save scene to image', action = 'save as', default_filename = "snapshot.png", wildcard = wildcard ) if dialog.open() == OK: # The extension of the path will determine the actual # image type saved. self.save(dialog.path) def _configure_scene(self): """Invoked when the toolbar icon for configuration is clicked. """ self.edit_traits() ###################################################################### # Trait handlers. ###################################################################### def _show_axes_changed(self): marker = self.marker if (self._vtk_control is not None) and (marker is not None): if not self.show_axes: marker.interactor = None marker.enabled = False else: marker.interactor = self.interactor marker.enabled = True self.render() def _background_changed(self, value): # Depending on the background, this sets the axes text and # outline color to something that should be visible. axes = self.axes if (self._vtk_control is not None) and (axes is not None): p = self.axes.x_axis_caption_actor2d.caption_text_property m = self.marker s = value[0] + value[1] + value[2] if s <= 1.0: p.color = (1,1,1) m.set_outline_color(1,1,1) else: p.color = (0,0,0) m.set_outline_color(0,0,0) self.render() def _actions_default(self): return [ Group( Action( image = ImageResource('16x16/x-axis', search_path = [self._get_image_path()], ), tooltip = "View along the -X axis", on_perform = self.x_minus_view, ), Action( image = ImageResource('16x16/x-axis', search_path = [self._get_image_path()], ), tooltip = "View along the +X axis", on_perform = self.x_plus_view, ), Action( image = ImageResource('16x16/y-axis', search_path = [self._get_image_path()], ), tooltip = "View along the -Y axis", on_perform = self.y_minus_view, ), Action( image = ImageResource('16x16/y-axis', search_path = [self._get_image_path()], ), tooltip = "View along the +Y axis", on_perform = self.y_plus_view, ), Action( image = ImageResource('16x16/z-axis', search_path = [self._get_image_path()], ), tooltip = "View along the -Z axis", on_perform = self.z_minus_view, ), Action( image = ImageResource('16x16/z-axis', search_path = [self._get_image_path()], ), tooltip = "View along the +Z axis", on_perform = self.z_plus_view, ), Action( image = ImageResource('16x16/isometric', search_path = [self._get_image_path()], ), tooltip = "Obtain an isometric view", on_perform = self.isometric_view, ), ), Group( Action( image = ImageResource('16x16/parallel', search_path = [self._get_image_path()], ), tooltip = 'Toggle parallel projection', style="toggle", on_perform = self._toggle_projection, checked = self.parallel_projection, ), Action( image = ImageResource('16x16/origin_glyph', search_path = [self._get_image_path()], ), tooltip = 'Toggle axes indicator', style="toggle", enabled=(self.marker is not None), on_perform = self._toggle_axes, checked = self.show_axes, ), Action( image = ImageResource('16x16/fullscreen', search_path = [self._get_image_path()], ), tooltip = 'Full Screen (press "q" or "e" or ESC to exit fullscreen)', style="push", on_perform = self._full_screen_fired, ), ), Group( Action( image = ImageResource('16x16/save', search_path = [self._get_image_path()], ), tooltip = "Save a snapshot of this scene", on_perform = self._save_snapshot, ), Action( image = ImageResource('16x16/configure', search_path = [self._get_image_path()], ), tooltip = 'Configure the scene', style="push", on_perform = self._configure_scene, ), ), ]
import pygame import numpy # import is unused but required or we fail later from pygame.constants import K_DOWN, K_UP, KEYDOWN, KEYUP, QUIT import pygame.surfarray import pygame.key def function_intercept(intercepted_func, intercepting_func): """ Intercepts a method call and calls the supplied intercepting_func with the result of it's call and it's arguments Example: def get_event(result_of_real_event_get, *args, **kwargs): # do work return result_of_real_event_get pygame.event.get = function_intercept(pygame.event.get, get_event) :param intercepted_func: The function we are going to intercept :param intercepting_func: The function that will get called after the intercepted func. It is supplied the return value of the intercepted_func as the first argument and it's args and kwargs. :return: a function that combines the intercepting and intercepted function, should normally be set to the intercepted_functions location """ def wrap(*args, **kwargs): real_results = intercepted_func(*args, **kwargs) # call the function we are intercepting and get it's result intercepted_results = intercepting_func(real_results, *args, **kwargs) # call our own function a return intercepted_results return wrap class PyGamePlayer(object): def __init__(self, force_game_fps=10, run_real_time=False, pass_quit_event=True): """ Abstract class for learning agents, such as running reinforcement learning neural nets against PyGame games. The get_keys_pressed and get_feedback methods must be overriden by a subclass to use Call start method to start playing intercepting PyGame and training our machine :param force_game_fps: Fixes the pygame timer functions so the ai will get input as if it were running at this fps :type force_game_fps: int :param run_real_time: If True the game will actually run at the force_game_fps speed :type run_real_time: bool :param pass_quit_event: If True the ai will be asked for the quit event :type pass_quit_event: bool """ self.force_game_fps = force_game_fps """Fixes the pygame timer functions so the ai will get input as if it were running at this fps""" self.run_real_time = run_real_time """If True the game will actually run at the force_game_fps speed""" self.pass_quit_event = pass_quit_event """Decides whether the quit event should be passed on to the game""" self._keys_pressed = [] self._last_keys_pressed = [] self._playing = False self._default_flip = pygame.display.flip self._default_update = pygame.display.update self._default_event_get = pygame.event.get self._default_time_clock = pygame.time.Clock self._default_get_ticks = pygame.time.get_ticks self._game_time = 0.0 def get_keys_pressed(self, screen_array, feedback, terminal): """ Called whenever the screen buffer is refreshed. returns the keys we want pressed in the next until the next screen refresh :param screen_array: 3d numpy.array of float. screen_width * screen_height * rgb :param feedback: result of call to get_feedback :param terminal: boolean, True if we have reached a terminal state, meaning the next frame will be a restart :return: a list of the integer values of the keys we want pressed. See pygame.constants for values """ raise NotImplementedError("Please override this method") def get_feedback(self): """ Overriden method should hook into game events to give feeback to the learning agent :return: First = value we want to give as reward/punishment to our learning agent Second = Boolean true if we have reached a terminal state :rtype: tuple (float, boolean) """ raise NotImplementedError("Please override this method") def start(self): """ Start playing the game. We will now start listening for screen updates calling our play and reward functions and returning our intercepted key presses """ if self._playing: raise Exception("Already playing") pygame.display.flip = function_intercept(pygame.display.flip, self._on_screen_update) pygame.display.update = function_intercept(pygame.display.update, self._on_screen_update) pygame.event.get = function_intercept(pygame.event.get, self._on_event_get) pygame.time.Clock = function_intercept(pygame.time.Clock, self._on_time_clock) pygame.time.get_ticks = function_intercept(pygame.time.get_ticks, self.get_game_time_ms) # TODO: handle pygame.time.set_timer... self._playing = True def stop(self): """ Stop playing the game. Will try and return PyGame to the state it was in before we started """ if not self._playing: raise Exception("Already stopped") pygame.display.flip = self._default_flip pygame.display.update = self._default_update pygame.event.get = self._default_event_get pygame.time.Clock = self._default_time_clock pygame.time.get_ticks = self._default_get_ticks self._playing = False @property def playing(self): """ Returns if we are in a state where we are playing/intercepting PyGame calls :return: boolean """ return self._playing @playing.setter def playing(self, value): if self._playing == value: return if self._playing: self.stop() else: self.start() def get_ms_per_frame(self): return 1000.0 / self.force_game_fps def get_game_time_ms(self): return self._game_time def _on_time_clock(self, real_clock, *args, **kwargs): return self._FixedFPSClock(self, real_clock) def _on_screen_update(self, _, *args, **kwargs): surface_array = pygame.surfarray.array3d(pygame.display.get_surface()) reward, terminal = self.get_feedback() keys = self.get_keys_pressed(surface_array, reward, terminal) self._last_keys_pressed = self._keys_pressed self._keys_pressed = keys # now we have processed a frame increment the game timer self._game_time += self.get_ms_per_frame() def _on_event_get(self, _, *args, **kwargs): key_up_events = [] if len(self._last_keys_pressed) > 0: diff_list = list(set( self._last_keys_pressed) - set(self._keys_pressed)) key_up_events = [pygame.event.Event(KEYUP, {"key": x}) for x in diff_list] key_down_events = [pygame.event.Event(KEYDOWN, {"key": x}) for x in self._keys_pressed] result = [] # have to deal with arg type filters if args: if hasattr(args[0], "__iter__"): args = args[0] for type_filter in args: if type_filter == QUIT: if type_filter == QUIT: if self.pass_quit_event: for e in _: if e.type == QUIT: result.append(e) else: pass # never quit elif type_filter == KEYUP: result = result + key_up_events elif type_filter == KEYDOWN: result = result + key_down_events else: result = key_down_events + key_up_events if self.pass_quit_event: for e in _: if e.type == QUIT: result.append(e) return result def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.stop() class _FixedFPSClock(object): def __init__(self, pygame_player, real_clock): self._pygame_player = pygame_player self._real_clock = real_clock def tick(self, _=None): if self._pygame_player.run_real_time: return self._real_clock.tick(self._pygame_player.force_game_fps) else: return self._pygame_player.get_ms_per_frame() def tick_busy_loop(self, _=None): if self._pygame_player.run_real_time: return self._real_clock.tick_busy_loop(self._pygame_player.force_game_fps) else: return self._pygame_player.get_ms_per_frame() def get_time(self): return self._pygame_player.get_game_time_ms() def get_raw_time(self): return self._pygame_player.get_game_time_ms() def get_fps(self): return int(1.0 / self._pygame_player.get_ms_per_frame())
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from functools import partial from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from karbor.common import constants from karbor import exception from karbor.services.protection.client_factory import ClientFactory from karbor.services.protection import protection_plugin from karbor.services.protection.protection_plugins.pod \ import pod_plugin_schemas from karbor.services.protection.protection_plugins import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) pod_backup_opts = [ cfg.IntOpt( 'poll_interval', default=15, help='Poll interval for Pod backup status' ), ] class ProtectOperation(protection_plugin.Operation): def on_main(self, checkpoint, resource, context, parameters, **kwargs): pod_id = resource.id pod_name = resource.name bank_section = checkpoint.get_resource_bank_section(pod_id) k8s_client = ClientFactory.create_client("k8s", context) resource_definition = {"resource_id": pod_id} LOG.info("Creating pod backup, id: %(pod_id)s) name: " "%(pod_name)s.", {"pod_id": pod_id, "pod_name": pod_name}) try: bank_section.update_object("status", constants.RESOURCE_STATUS_PROTECTING) # get metadata about pod pod_namespace, k8s_pod_name = pod_name.split(":") pod = k8s_client.read_namespaced_pod( k8s_pod_name, pod_namespace) resource_definition["resource_name"] = pod_name resource_definition["namespace"] = pod_namespace mounted_volumes_list = self._get_mounted_volumes( k8s_client, pod, pod_namespace) containers_list = self._get_containers(pod) # save all pod's metadata pod_metadata = { 'apiVersion': pod.api_version, 'kind': pod.kind, 'metadata': { 'labels': pod.metadata.labels, 'name': pod.metadata.name, 'namespace': pod.metadata.namespace, }, 'spec': { 'containers': containers_list, 'volumes': mounted_volumes_list, 'restartPolicy': pod.spec.restart_policy } } resource_definition["pod_metadata"] = pod_metadata LOG.debug("Creating pod backup, pod_metadata: %s.", pod_metadata) bank_section.update_object("metadata", resource_definition) bank_section.update_object("status", constants.RESOURCE_STATUS_AVAILABLE) LOG.info("Finish backup pod, pod_id: %s.", pod_id) except Exception as err: LOG.exception("Create pod backup failed, pod_id: %s.", pod_id) bank_section.update_object("status", constants.RESOURCE_STATUS_ERROR) raise exception.CreateResourceFailed( name="Pod Backup", reason=err, resource_id=pod_id, resource_type=constants.POD_RESOURCE_TYPE) def _get_mounted_volumes(self, k8s_client, pod, pod_namespace): mounted_volumes_list = [] for volume in pod.spec.volumes: volume_pvc = volume.persistent_volume_claim volume_cinder = volume.cinder volume_pvc_name = volume.name if volume_pvc: pvc_name = volume_pvc.claim_name pvc = k8s_client.read_namespaced_persistent_volume_claim( pvc_name, pod_namespace) pv_name = pvc.spec.volume_name if pv_name: pv = k8s_client.read_persistent_volume(pv_name) if pv.spec.cinder: pod_cinder_volume = { 'name': volume_pvc_name, 'cinder': { "volumeID": pv.spec.cinder.volume_id, "fsType": pv.spec.cinder.fs_type, "readOnly": pv.spec.cinder.read_only } } mounted_volumes_list.append(pod_cinder_volume) elif volume_cinder: pod_cinder_volume = { 'name': volume_pvc_name, 'cinder': { "volumeID": volume_cinder.volume_id, "fsType": volume_cinder.fs_type, "readOnly": volume_cinder.read_only } } mounted_volumes_list.append(pod_cinder_volume) return mounted_volumes_list def _get_containers(self, pod): containers_list = [] for spec_container in pod.spec.containers: resources = (spec_container.resources.to_dict() if spec_container.resources else None) volume_mounts_list = [] if spec_container.volume_mounts: for spec_volume_mount in spec_container.volume_mounts: if 'serviceaccount' in spec_volume_mount.mount_path: continue volume_mount = { 'name': spec_volume_mount.name, 'mountPath': spec_volume_mount.mount_path, 'readOnly': spec_volume_mount.read_only, } volume_mounts_list.append(volume_mount) container = { 'command': spec_container.command, 'image': spec_container.image, 'name': spec_container.name, 'resources': resources, 'volumeMounts': volume_mounts_list } containers_list.append(container) return containers_list class DeleteOperation(protection_plugin.Operation): def on_main(self, checkpoint, resource, context, parameters, **kwargs): resource_id = resource.id bank_section = checkpoint.get_resource_bank_section(resource_id) LOG.info("Deleting pod backup, pod_id: %s.", resource_id) try: bank_section.update_object("status", constants.RESOURCE_STATUS_DELETING) objects = bank_section.list_objects() for obj in objects: if obj == "status": continue bank_section.delete_object(obj) bank_section.update_object("status", constants.RESOURCE_STATUS_DELETED) LOG.info("Finish delete pod, pod_id: %s.", resource_id) except Exception as err: LOG.error("Delete backup failed, pod_id: %s.", resource_id) bank_section.update_object("status", constants.RESOURCE_STATUS_ERROR) raise exception.DeleteResourceFailed( name="Pod Backup", reason=err, resource_id=resource_id, resource_type=constants.POD_RESOURCE_TYPE) class VerifyOperation(protection_plugin.Operation): def __init__(self): super(VerifyOperation, self).__init__() def on_main(self, checkpoint, resource, context, parameters, **kwargs): original_pod_id = resource.id bank_section = checkpoint.get_resource_bank_section( original_pod_id) LOG.info('Verifying the pod backup, pod_id: %s.', original_pod_id) update_method = partial( utils.update_resource_verify_result, kwargs.get('verify'), resource.type, original_pod_id) backup_status = bank_section.get_object("status") if backup_status == constants.RESOURCE_STATUS_AVAILABLE: update_method(constants.RESOURCE_STATUS_AVAILABLE) else: reason = ('The status of pod backup status is %s.' % backup_status) update_method(backup_status, reason) raise exception.VerifyResourceFailed( name="Pod backup", reason=reason, resource_id=original_pod_id, resource_type=resource.type) class RestoreOperation(protection_plugin.Operation): def __init__(self, poll_interval): super(RestoreOperation, self).__init__() self._interval = poll_interval def on_complete(self, checkpoint, resource, context, parameters, **kwargs): original_pod_id = resource.id LOG.info("Restoring pod backup, pod_id: %s.", original_pod_id) update_method = None try: resource_definition = checkpoint.get_resource_bank_section( original_pod_id).get_object("metadata") LOG.debug("Restoring pod backup, metadata: %s.", resource_definition) k8s_client = ClientFactory.create_client("k8s", context) new_resources = kwargs.get("new_resources") # restore pod new_pod_name = self._restore_pod_instance( k8s_client, new_resources, original_pod_id, parameters.get( "restore_name", "karbor-restored-pod-%s" % uuidutils.generate_uuid()), resource_definition) update_method = partial(utils.update_resource_restore_result, kwargs.get('restore'), resource.type, new_pod_name) update_method(constants.RESOURCE_STATUS_RESTORING) pod_namespace = resource_definition["namespace"] self._wait_pod_to_running(k8s_client, new_pod_name, pod_namespace) new_resources[original_pod_id] = new_pod_name update_method(constants.RESOURCE_STATUS_AVAILABLE) LOG.info("Finish restore pod, pod_id: %s.", original_pod_id) except Exception as e: if update_method: update_method(constants.RESOURCE_STATUS_ERROR, str(e)) LOG.exception("Restore pod backup failed, pod_id: %s.", original_pod_id) raise exception.RestoreResourceFailed( name="Pod Backup", reason=e, resource_id=original_pod_id, resource_type=constants.POD_RESOURCE_TYPE ) def _restore_pod_instance(self, k8s_client, new_resources, original_id, restore_name, resource_definition): pod_namespace = resource_definition["namespace"] pod_metadata = resource_definition["pod_metadata"] mounted_volumes_list = pod_metadata['spec'].get("volumes", None) if mounted_volumes_list: for mounted_volume in mounted_volumes_list: cinder_volume = mounted_volume.get("cinder", None) if cinder_volume: original_volume_id = cinder_volume["volumeID"] cinder_volume["volumeID"] = new_resources.get( original_volume_id) pod_metadata["metadata"]["name"] = restore_name pod_manifest = pod_metadata LOG.debug("Restoring pod instance, pod_manifest: %s.", pod_manifest) try: pod = k8s_client.create_namespaced_pod(body=pod_manifest, namespace=pod_namespace) except Exception as ex: LOG.error('Error creating pod (pod_id:%(pod_id)s): ' '%(reason)s', {'server_id': original_id, 'reason': ex}) raise return pod.metadata.name def _wait_pod_to_running(self, k8s_client, pod_name, pod_namespace): def _get_pod_status(): try: pod = k8s_client.read_namespaced_pod(name=pod_name, namespace=pod_namespace) return pod.status.phase except Exception as ex: LOG.error('Fetch pod(%(pod_name)s) failed, ' 'reason: %(reason)s', {'pod_name': pod_name, 'reason': ex}) return 'ERROR' is_success = utils.status_poll( _get_pod_status, interval=self._interval, success_statuses={'Running', }, failure_statuses={'ERROR', 'Failed', 'Unknown'}, ignore_statuses={'Pending'}, ) if not is_success: raise Exception('The pod does not run successfully') class PodProtectionPlugin(protection_plugin.ProtectionPlugin): _SUPPORT_RESOURCE_TYPES = [constants.POD_RESOURCE_TYPE] def __init__(self, config=None): super(PodProtectionPlugin, self).__init__(config) self._config.register_opts(pod_backup_opts, 'pod_backup_protection_plugin') self._poll_interval = ( self._config.pod_backup_protection_plugin.poll_interval) @classmethod def get_supported_resources_types(cls): return cls._SUPPORT_RESOURCE_TYPES @classmethod def get_options_schema(cls, resource_type): return pod_plugin_schemas.OPTIONS_SCHEMA @classmethod def get_restore_schema(cls, resource_type): return pod_plugin_schemas.RESTORE_SCHEMA @classmethod def get_verify_schema(cls, resources_type): return pod_plugin_schemas.VERIFY_SCHEMA @classmethod def get_saved_info_schema(cls, resource_type): return pod_plugin_schemas.SAVED_INFO_SCHEMA @classmethod def get_saved_info(cls, metadata_store, resource): pass def get_protect_operation(self, resource): return ProtectOperation() def get_restore_operation(self, resource): return RestoreOperation(self._poll_interval) def get_verify_operation(self, resource): return VerifyOperation() def get_delete_operation(self, resource): return DeleteOperation()
import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.path import Path class HivePlot(object): """ The HivePlot class will take in the following and return a hive plot: - nodes: a dictionary of nodes, in which there are at most 3 keys in the dictionary, and the nodes are sorted in a pre-specified order. One common grouping is by a node attribute and one possible ordering is by degree centrality. - edges: a dictionary of {group:edgelist}, where each edgelist is a list of (u,v,d) tuples (in NetworkX style), where u and v are the nodes to join, and d are the node attributes. The user will have to pre-sort and pre-group the nodes, and pre-map the edge color groupings. This code will determine the positioning and exact drawing of the edges. Hive plots are non-trivial to construct. These are the most important features one has to consider: - Grouping of nodes: - at most 3 groups. - Ordering of nodes: - must have an ordinal or continuous node attribute - Cross-group edges: - Undirected is easier to draw than directed. - Directed is possible. - Within-group edges: - Requires the duplication of an axis. """ def __init__(self, nodes, edges, node_colormap, edge_colormap=None, linewidth=0.5, is_directed=False, scale=10, ax=None, fig=None): super(HivePlot, self).__init__() self.nodes = nodes # dictionary of {group:[ordered_nodes] list} self.edges = edges # dictionary of {group:[(u,v,d)] tuples list} # boolean of whether graph is supposed to be directed or not self.is_directed = is_directed if fig is None: self.fig = plt.figure(figsize=(8, 8)) else: self.fig = fig if ax is None: self.ax = self.fig.add_subplot(111) else: self.ax = ax self.scale = scale self.dot_radius = self.scale / float(4) self.internal_radius = scale ** 2 self.linewidth = linewidth self.node_colormap = node_colormap # dictionary of node_group:color self.edge_colormap = edge_colormap # dictionary of edge_group:color self.major_angle = 0 self.initialize_major_angle() self.minor_angle = 0 self.initialize_minor_angle() """ Steps in graph drawing: 1. Determine the number of groups. This in turn determines the number of axes to draw, and the major angle between the axes. 2. For each group, determine whether there are edges between members of the same group. a. If True: - Duplicate the axis by shifting off by a minor angle. - Draw each axis line, with length proportional to number of nodes in the group: - One is at major angle + minor angle - One is at major angle - minor angle - Draw in the nodes. b. If False: - Draw the axis line at the major angle. - Length of axis line is proportional to the number of nodes in the group - Draw in the nodes. 3. Determine which node group is at the 0 radians position. The angles that are calculated will have to be adjusted for whether it is at 2*pi radians or at 0 radians, depending on the angle differences. 4. For each edge, determine the radial position of the start node and end node. Compute the middle angle and the mean radius of the start and end nodes. """ def simplified_edges(self): """ A generator for getting all of the edges without consuming extra memory. """ for group, edgelist in self.edges.items(): for u, v, d in edgelist: yield (u, v) def initialize_major_angle(self): """ Computes the major angle: 2pi radians / number of groups. """ num_groups = len(self.nodes.keys()) self.major_angle = 2 * np.pi / num_groups def initialize_minor_angle(self): """ Computes the minor angle: 2pi radians / 3 * number of groups. """ num_groups = len(self.nodes.keys()) self.minor_angle = 2 * np.pi / (6 * num_groups) def set_minor_angle(self, angle): """ Sets the major angle of the hive plot. I have restricted this to be less than the major angle. """ assert angle < self.major_angle,\ "Minor angle cannot be greater than the major angle." self.minor_angle = angle def plot_radius(self): """ Computes the plot radius: maximum of length of each list of nodes. """ plot_rad = 0 for group, nodelist in self.nodes.items(): proposed_radius = len(nodelist) * self.scale if proposed_radius > plot_rad: plot_rad = proposed_radius return plot_rad + self.internal_radius def axis_length(self, group): """ Computes the length of the axis for a given group. """ return len(self.nodes[group]) def has_edge_within_group(self, group): """ Checks whether there are within-group edges or not. """ assert group in self.nodes.keys(),\ "{0} not one of the group of nodes".format(group) nodelist = self.nodes[group] for n1, n2 in self.simplified_edges(): if n1 in nodelist and n2 in nodelist: return True def plot_axis(self, rs, theta): """ Renders the axis. """ xs, ys = get_cartesian(rs, theta) self.ax.plot(xs, ys, 'black', alpha=0.3) def plot_nodes(self, nodelist, theta, group): """ Plots nodes to screen. """ for i, node in enumerate(nodelist): r = self.internal_radius + i * self.scale x, y = get_cartesian(r, theta) circle = plt.Circle(xy=(x, y), radius=self.dot_radius, color=self.node_colormap[group], linewidth=0) self.ax.add_patch(circle) def group_theta(self, group): """ Computes the theta along which a group's nodes are aligned. """ for i, g in enumerate(self.nodes.keys()): if g == group: break return i * self.major_angle def add_axes_and_nodes(self): """ Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib axes) and the nodes that belong to each axis. """ for i, (group, nodelist) in enumerate(self.nodes.items()): theta = self.group_theta(group) if self.has_edge_within_group(group): theta = theta - self.minor_angle self.plot_nodes(nodelist, theta, group) theta = theta + 2 * self.minor_angle self.plot_nodes(nodelist, theta, group) else: self.plot_nodes(nodelist, theta, group) def find_node_group_membership(self, node): """ Identifies the group for which a node belongs to. """ for group, nodelist in self.nodes.items(): if node in nodelist: return group def get_idx(self, node): """ Finds the index of the node in the sorted list. """ group = self.find_node_group_membership(node) return self.nodes[group].index(node) def node_radius(self, node): """ Computes the radial position of the node. """ return self.get_idx(node) * self.scale + self.internal_radius def node_theta(self, node): """ Convenience function to find the node's theta angle. """ group = self.find_node_group_membership(node) return self.group_theta(group) def draw_edge(self, n1, n2, d, group): """ Renders the given edge (n1, n2) to the plot. """ start_radius = self.node_radius(n1) start_theta = self.node_theta(n1) end_radius = self.node_radius(n2) end_theta = self.node_theta(n2) start_theta, end_theta = self.correct_angles(start_theta, end_theta) start_theta, end_theta = self.adjust_angles(n1, start_theta, n2, end_theta) middle1_radius = np.min([start_radius, end_radius]) middle2_radius = np.max([start_radius, end_radius]) if start_radius > end_radius: middle1_radius, middle2_radius = middle2_radius, middle1_radius middle1_theta = np.mean([start_theta, end_theta]) middle2_theta = np.mean([start_theta, end_theta]) startx, starty = get_cartesian(start_radius, start_theta) middle1x, middle1y = get_cartesian(middle1_radius, middle1_theta) middle2x, middle2y = get_cartesian(middle2_radius, middle2_theta) # middlex, middley = get_cartesian(middle_radius, middle_theta) endx, endy = get_cartesian(end_radius, end_theta) verts = [(startx, starty), (middle1x, middle1y), (middle2x, middle2y), (endx, endy)] codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4] path = Path(verts, codes) if self.edge_colormap is None: edgecolor = 'black' else: edgecolor = self.edge_colormap[group] patch = patches.PathPatch(path, lw=self.linewidth, facecolor='none', edgecolor=edgecolor, alpha=0.3) self.ax.add_patch(patch) def add_edges(self): """ Draws all of the edges in the graph. """ for group, edgelist in self.edges.items(): for (u, v, d) in edgelist: self.draw_edge(u, v, d, group) def draw(self): """ The master function that is called that draws everything. """ self.ax.set_xlim(-self.plot_radius(), self.plot_radius()) self.ax.set_ylim(-self.plot_radius(), self.plot_radius()) self.add_axes_and_nodes() self.add_edges() self.ax.axis('off') def adjust_angles(self, start_node, start_angle, end_node, end_angle): """ This function adjusts the start and end angles to correct for duplicated axes. """ start_group = self.find_node_group_membership(start_node) end_group = self.find_node_group_membership(end_node) if start_group == 0 and end_group == len(self.nodes.keys())-1: if self.has_edge_within_group(start_group): start_angle = correct_negative_angle(start_angle - self.minor_angle) if self.has_edge_within_group(end_group): end_angle = correct_negative_angle(end_angle + self.minor_angle) elif start_group == len(self.nodes.keys())-1 and end_group == 0: if self.has_edge_within_group(start_group): start_angle = correct_negative_angle(start_angle + self.minor_angle) if self.has_edge_within_group(end_group): end_angle = correct_negative_angle(end_angle - self.minor_angle) elif start_group < end_group: if self.has_edge_within_group(end_group): end_angle = correct_negative_angle(end_angle - self.minor_angle) if self.has_edge_within_group(start_group): start_angle = correct_negative_angle(start_angle + self.minor_angle) elif end_group < start_group: if self.has_edge_within_group(start_group): start_angle = correct_negative_angle(start_angle - self.minor_angle) if self.has_edge_within_group(end_group): end_angle = correct_negative_angle(end_angle + self.minor_angle) return start_angle, end_angle def correct_angles(self, start_angle, end_angle): """ This function corrects for the following problems in the edges: """ # Edges going the anti-clockwise direction involves angle = 0. if start_angle == 0 and (end_angle - start_angle > np.pi): start_angle = np.pi * 2 if end_angle == 0 and (end_angle - start_angle < -np.pi): end_angle = np.pi * 2 # Case when start_angle == end_angle: if start_angle == end_angle: start_angle = start_angle - self.minor_angle end_angle = end_angle + self.minor_angle return start_angle, end_angle """ Global helper functions go here. """ def get_cartesian(r, theta): """ Given a radius and theta, return the cartesian (x, y) coordinates. """ x = r*np.sin(theta) y = r*np.cos(theta) return x, y def correct_negative_angle(angle): """ Corrects a negative angle to a positive one. """ if angle < 0: angle = 2 * np.pi + angle else: pass return angle
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import ( Seq2SeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) logger = getLogger(__name__) def eval_data_dir( data_dir, save_dir: str, model_name: str, bs: int = 8, max_source_length: int = 1024, type_path="val", n_obs=None, fp16=False, task="summarization", local_rank=None, num_return_sequences=1, dataset_kwargs: Dict = None, prefix="", **generate_kwargs, ) -> Dict: """Run evaluation on part of the data for one gpu and save to {save_dir}/rank_{rank}_output.json""" model_name = str(model_name) assert local_rank is not None torch.distributed.init_process_group(backend="nccl", rank=local_rank) save_dir = Path(save_dir) save_path = save_dir.joinpath(f"rank_{local_rank}_output.json") torch.cuda.set_device(local_rank) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).cuda() if fp16: model = model.half() # determine if we need to increase num_beams use_task_specific_params(model, task) # update config with task specific params num_beams = generate_kwargs.pop("num_beams", model.config.num_beams) # AttributeError risk? if num_return_sequences > num_beams: num_beams = num_return_sequences tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. if max_source_length is None: max_source_length = tokenizer.model_max_length if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" ds = Seq2SeqDataset( tokenizer, data_dir, max_source_length, max_target_length=1024, type_path=type_path, n_obs=n_obs, prefix=prefix, **dataset_kwargs, ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. sampler = ds.make_sortish_sampler(bs, distributed=True, add_extra_examples=False, shuffle=True) data_loader = DataLoader(ds, sampler=sampler, batch_size=bs, collate_fn=ds.collate_fn) results = [] for batch in tqdm(data_loader): summaries = model.generate( input_ids=batch["input_ids"].to(model.device), attention_mask=batch["attention_mask"].to(model.device), num_return_sequences=num_return_sequences, num_beams=num_beams, **generate_kwargs, ) preds = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) ids = batch["ids"] if num_return_sequences > 1: preds = chunks(preds, num_return_sequences) # batch size chunks, each of size num_return_seq for i, pred in enumerate(preds): results.append(dict(pred=pred, id=ids[i].item())) save_json(results, save_path) return results, sampler.num_replicas def run_generate(): parser = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir", type=str, help="like cnn_dm/test.source") parser.add_argument( "--model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.", default="sshleifer/distilbart-xsum-12-3", ) parser.add_argument("--save_dir", type=str, help="where to save", default="tmp_gen") parser.add_argument("--max_source_length", type=int, default=None) parser.add_argument( "--type_path", type=str, default="test", help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--local_rank", type=int, default=-1, required=False, help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs", type=int, default=None, required=False, help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences", type=int, default=1, required=False, help="How many sequences to return" ) parser.add_argument( "--sync_timeout", type=int, default=600, required=False, help="How long should master process wait for other processes to finish.", ) parser.add_argument("--src_lang", type=str, default=None, required=False) parser.add_argument("--tgt_lang", type=str, default=None, required=False) parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples" ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--debug", action="store_true") start_time = time.time() args, rest = parser.parse_known_args() generate_kwargs = parse_numeric_n_bool_cl_kwargs(rest) if generate_kwargs and args.local_rank <= 0: print(f"parsed the following generate kwargs: {generate_kwargs}") json_save_dir = Path(args.save_dir + "_tmp") Path(json_save_dir).mkdir(exist_ok=True) # this handles locking. intermediate_files = list(json_save_dir.glob("rank_*.json")) if intermediate_files: raise ValueError(f"Found files at {json_save_dir} please move or remove them.") # In theory, a node could finish and save before another node hits this. If this happens, we can address later. dataset_kwargs = {} if args.src_lang is not None: dataset_kwargs["src_lang"] = args.src_lang if args.tgt_lang is not None: dataset_kwargs["tgt_lang"] = args.tgt_lang Path(args.save_dir).mkdir(exist_ok=True) results, num_replicas = eval_data_dir( args.data_dir, json_save_dir, args.model_name, type_path=args.type_path, bs=args.bs, fp16=args.fp16, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=dataset_kwargs, **generate_kwargs, ) if args.local_rank <= 0: save_dir = Path(args.save_dir) save_dir.mkdir(exist_ok=True) partial_results = gather_results_from_each_node(num_replicas, json_save_dir, args.sync_timeout) preds = combine_partial_results(partial_results) if args.num_return_sequences > 1: save_path = save_dir.joinpath("pseudolabel_results.json") print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/") save_json(preds, save_path) return tgt_file = Path(args.data_dir).joinpath(args.type_path + ".target") with open(tgt_file) as f: labels = [x.rstrip() for x in f.readlines()][: len(preds)] # Calculate metrics, save metrics, and save _generations.txt calc_bleu = "translation" in args.task score_fn = calculate_bleu if calc_bleu else calculate_rouge metric_name = "bleu" if calc_bleu else "rouge" metrics: Dict = score_fn(preds, labels) metrics["n_obs"] = len(preds) runtime = time.time() - start_time metrics["seconds_per_sample"] = round(runtime / metrics["n_obs"], 4) metrics["n_gpus"] = num_replicas # TODO(@stas00): add whatever metadata to metrics metrics_save_path = save_dir.joinpath(f"{args.type_path}_{metric_name}.json") save_json(metrics, metrics_save_path, indent=None) print(metrics) write_txt_file(preds, save_dir.joinpath(f"{args.type_path}_generations.txt")) if args.debug: write_txt_file(labels, save_dir.joinpath(f"{args.type_path}.target")) else: shutil.rmtree(json_save_dir) def combine_partial_results(partial_results) -> List: """Concatenate partial results into one file, then sort it by id.""" records = [] for partial_result in partial_results: records.extend(partial_result) records = list(sorted(records, key=lambda x: x["id"])) preds = [x["pred"] for x in records] return preds def gather_results_from_each_node(num_replicas, save_dir, timeout) -> List[Dict[str, List]]: # WAIT FOR lots of .json files start_wait = time.time() logger.info("waiting for all nodes to finish") json_data = None while (time.time() - start_wait) < timeout: json_files = list(save_dir.glob("rank_*.json")) if len(json_files) < num_replicas: continue try: # make sure all json files are fully saved json_data = lmap(load_json, json_files) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes") # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
# Copyright 2013 Mirantis, Inc. # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinderclient import exceptions as cinder_exception import mock from nova import context from nova import exception from nova import test from nova.volume import cinder class FakeCinderClient(object): class Volumes(object): def get(self, volume_id): return {'id': volume_id} def list(self, detailed): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None def __init__(self): self.volumes = self.Volumes() self.volume_snapshots = self.volumes class FakeVolume(object): def __init__(self, dict=dict()): self.id = dict.get('id') or '1234' self.status = dict.get('status') or 'available' self.size = dict.get('size') or 1 self.availability_zone = dict.get('availability_zone') or 'cinder' self.created_at = dict.get('created_at') self.attach_time = dict.get('attach_time') self.mountpoint = dict.get('mountpoint') self.display_name = dict.get('display_name') or 'volume-' + self.id self.display_description = dict.get('display_description') or 'fake' self.volume_type_id = dict.get('volume_type_id') self.snapshot_id = dict.get('snapshot_id') self.metadata = dict.get('volume_metadata') or {} class CinderApiTestCase(test.NoDBTestCase): def setUp(self): super(CinderApiTestCase, self).setUp() self.api = cinder.API() self.cinderclient = FakeCinderClient() self.ctx = context.get_admin_context() self.mox.StubOutWithMock(cinder, 'cinderclient') self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view') self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view') self.mox.StubOutWithMock(cinder, 'get_cinder_client_version') def test_get(self): volume_id = 'volume_id1' cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'volume_id1'}) self.mox.ReplayAll() self.api.get(self.ctx, volume_id) def test_get_failed(self): volume_id = 'volume_id' cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound('')) cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest('')) cinder.cinderclient(self.ctx).AndRaise( cinder_exception.ConnectionError('')) self.mox.ReplayAll() self.assertRaises(exception.VolumeNotFound, self.api.get, self.ctx, volume_id) self.assertRaises(exception.InvalidInput, self.api.get, self.ctx, volume_id) self.assertRaises(exception.CinderConnectionFailed, self.api.get, self.ctx, volume_id) def test_create(self): cinder.get_cinder_client_version(self.ctx).AndReturn('2') cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create(self.ctx, 1, '', '') def test_create_failed(self): cinder.get_cinder_client_version(self.ctx).AndReturn('2') cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest('')) self.mox.ReplayAll() self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') @mock.patch('nova.volume.cinder.get_cinder_client_version') @mock.patch('nova.volume.cinder.cinderclient') def test_create_over_quota_failed(self, mock_cinderclient, mock_get_version): mock_get_version.return_value = '2' mock_cinderclient.return_value.volumes.create.side_effect = ( cinder_exception.OverLimit(413)) self.assertRaises(exception.OverQuota, self.api.create, self.ctx, 1, '', '') mock_cinderclient.return_value.volumes.create.assert_called_once_with( 1, user_id=None, imageRef=None, availability_zone=None, volume_type=None, description='', snapshot_id=None, name='', project_id=None, metadata=None) def test_get_all(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') cinder._untranslate_volume_summary_view(self.ctx, {'id': 'id2'}).AndReturn('id2') self.mox.ReplayAll() self.assertEqual(['id1', 'id2'], self.api.get_all(self.ctx)) def test_check_attach_volume_status_error(self): volume = {'status': 'error'} self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_volume_already_attached(self): volume = {'status': 'available'} volume['attach_status'] = "attached" self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_availability_zone_differs(self): volume = {'status': 'available'} volume['attach_status'] = "detached" instance = {'availability_zone': 'zone1', 'host': 'fakehost'} with mock.patch.object(cinder.az, 'get_instance_availability_zone', side_effect=lambda context, instance: 'zone1') as mock_get_instance_az: cinder.CONF.set_override('cinder_cross_az_attach', False) volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) mock_get_instance_az.assert_called_once_with(self.ctx, instance) mock_get_instance_az.reset_mock() volume['availability_zone'] = 'zone2' self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) mock_get_instance_az.assert_called_once_with(self.ctx, instance) mock_get_instance_az.reset_mock() del instance['host'] volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach( self.ctx, volume, instance)) self.assertFalse(mock_get_instance_az.called) volume['availability_zone'] = 'zone2' self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) self.assertFalse(mock_get_instance_az.called) cinder.CONF.reset() def test_check_attach(self): volume = {'status': 'available'} volume['attach_status'] = "detached" volume['availability_zone'] = 'zone1' instance = {'availability_zone': 'zone1', 'host': 'fakehost'} cinder.CONF.set_override('cinder_cross_az_attach', False) with mock.patch.object(cinder.az, 'get_instance_availability_zone', side_effect=lambda context, instance: 'zone1'): self.assertIsNone(self.api.check_attach( self.ctx, volume, instance)) cinder.CONF.reset() def test_check_detach(self): volume = {'status': 'available'} self.assertRaises(exception.InvalidVolume, self.api.check_detach, self.ctx, volume) volume['status'] = 'non-available' self.assertIsNone(self.api.check_detach(self.ctx, volume)) def test_reserve_volume(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'reserve') self.cinderclient.volumes.reserve('id1') self.mox.ReplayAll() self.api.reserve_volume(self.ctx, 'id1') def test_unreserve_volume(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'unreserve') self.cinderclient.volumes.unreserve('id1') self.mox.ReplayAll() self.api.unreserve_volume(self.ctx, 'id1') def test_begin_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'begin_detaching') self.cinderclient.volumes.begin_detaching('id1') self.mox.ReplayAll() self.api.begin_detaching(self.ctx, 'id1') def test_roll_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'roll_detaching') self.cinderclient.volumes.roll_detaching('id1') self.mox.ReplayAll() self.api.roll_detaching(self.ctx, 'id1') @mock.patch('nova.volume.cinder.cinderclient') def test_attach(self, mock_cinderclient): mock_volumes = mock.MagicMock() mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) self.api.attach(self.ctx, 'id1', 'uuid', 'point') mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point', mode='rw') @mock.patch('nova.volume.cinder.cinderclient') def test_attach_with_mode(self, mock_cinderclient): mock_volumes = mock.MagicMock() mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) self.api.attach(self.ctx, 'id1', 'uuid', 'point', mode='ro') mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point', mode='ro') def test_detach(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'detach') self.cinderclient.volumes.detach('id1') self.mox.ReplayAll() self.api.detach(self.ctx, 'id1') def test_initialize_connection(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'initialize_connection') self.cinderclient.volumes.initialize_connection('id1', 'connector') self.mox.ReplayAll() self.api.initialize_connection(self.ctx, 'id1', 'connector') def test_terminate_connection(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'terminate_connection') self.cinderclient.volumes.terminate_connection('id1', 'connector') self.mox.ReplayAll() self.api.terminate_connection(self.ctx, 'id1', 'connector') def test_delete(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'delete') self.cinderclient.volumes.delete('id1') self.mox.ReplayAll() self.api.delete(self.ctx, 'id1') def test_update(self): self.assertRaises(NotImplementedError, self.api.update, self.ctx, '', '') def test_get_snapshot(self): snapshot_id = 'snapshot_id' cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': snapshot_id}) self.mox.ReplayAll() self.api.get_snapshot(self.ctx, snapshot_id) def test_get_snapshot_failed(self): snapshot_id = 'snapshot_id' cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound('')) cinder.cinderclient(self.ctx).AndRaise( cinder_exception.ConnectionError('')) self.mox.ReplayAll() self.assertRaises(exception.SnapshotNotFound, self.api.get_snapshot, self.ctx, snapshot_id) self.assertRaises(exception.CinderConnectionFailed, self.api.get_snapshot, self.ctx, snapshot_id) def test_get_all_snapshots(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'id1'}).AndReturn('id1') cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'id2'}).AndReturn('id2') self.mox.ReplayAll() self.assertEqual(['id1', 'id2'], self.api.get_all_snapshots(self.ctx)) def test_create_snapshot(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '') def test_create_force(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) cinder._untranslate_snapshot_summary_view(self.ctx, {'id': 'created_id'}) self.mox.ReplayAll() self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '') def test_delete_snapshot(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volume_snapshots, 'delete') self.cinderclient.volume_snapshots.delete('id1') self.mox.ReplayAll() self.api.delete_snapshot(self.ctx, 'id1') @mock.patch('nova.volume.cinder.cinderclient') def test_get_volume_metadata(self, mock_cinderclient): volume_id = 'id1' metadata = {'key1': 'value1', 'key2': 'value2'} volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata}) mock_volumes = mock.MagicMock() mock_volumes.get.return_value = volume mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) results = self.api.get_volume_metadata(self.ctx, volume_id) mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.get.assert_called_once_with(volume_id) self.assertEqual(results, metadata) @mock.patch('nova.volume.cinder.cinderclient') def test_get_volume_metadata_value(self, mock_cinderclient): volume_id = 'id1' metadata = {'key1': 'value1'} volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata}) mock_volumes = mock.MagicMock() mock_volumes.get.return_value = volume mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) results = self.api.get_volume_metadata_value(self.ctx, volume_id, 'key1') mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.get.assert_called_once_with(volume_id) self.assertEqual(results, 'value1') @mock.patch('nova.volume.cinder.cinderclient') def test_delete_volume_metadata(self, mock_cinderclient): volume_id = 'id1' keys = ['key1', 'key2', 'key3'] mock_volumes = mock.MagicMock() mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) self.api.delete_volume_metadata(self.ctx, volume_id, keys) mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.delete_metadata.assert_called_once_with(volume_id, keys) @mock.patch('nova.volume.cinder.cinderclient') def test_update_volume_metadata(self, mock_cinderclient): volume_id = 'id1' metadata = {'key1': 'value1'} mock_volumes = mock.MagicMock() mock_volumes.set_metadata.return_value = metadata mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) updated_meta = self.api.update_volume_metadata(self.ctx, volume_id, metadata) mock_cinderclient.assert_called_once_with(self.ctx) self.assertFalse(mock_volumes.update_all_metadata.called) mock_volumes.set_metadata.assert_called_once_with(volume_id, metadata) self.assertEqual(metadata, updated_meta) @mock.patch('nova.volume.cinder.cinderclient') def test_update_volume_metadata_delete(self, mock_cinderclient): volume_id = 'id1' metadata = {'key1': 'value1', 'key2': 'value2'} mock_volumes = mock.MagicMock() mock_volumes.update_all_metadata.return_value = metadata mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes) updated_meta = self.api.update_volume_metadata(self.ctx, volume_id, metadata, delete=True) mock_cinderclient.assert_called_once_with(self.ctx) mock_volumes.update_all_metadata.assert_called_once_with(volume_id, metadata) self.assertFalse(mock_volumes.set_metadata.called) self.assertEqual(metadata, updated_meta) def test_update_snapshot_status(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volume_snapshots, 'update_snapshot_status') self.cinderclient.volume_snapshots.update_snapshot_status( 'id1', {'status': 'error', 'progress': '90%'}) self.mox.ReplayAll() self.api.update_snapshot_status(self.ctx, 'id1', 'error') def test_get_volume_encryption_metadata(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) self.mox.StubOutWithMock(self.cinderclient.volumes, 'get_encryption_metadata') self.cinderclient.volumes.\ get_encryption_metadata({'encryption_key_id': 'fake_key'}) self.mox.ReplayAll() self.api.get_volume_encryption_metadata(self.ctx, {'encryption_key_id': 'fake_key'})
# coding: utf-8 """ Wavefront REST API Documentation <p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer &lt;&lt;API-TOKEN&gt;&gt;\" to your HTTP requests.</p> # noqa: E501 OpenAPI spec version: v2 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from wavefront_api_client.configuration import Configuration class FacetSearchRequestContainer(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'facet_query': 'str', 'facet_query_matching_method': 'str', 'limit': 'int', 'offset': 'int', 'query': 'list[SearchQuery]' } attribute_map = { 'facet_query': 'facetQuery', 'facet_query_matching_method': 'facetQueryMatchingMethod', 'limit': 'limit', 'offset': 'offset', 'query': 'query' } def __init__(self, facet_query=None, facet_query_matching_method=None, limit=None, offset=None, query=None, _configuration=None): # noqa: E501 """FacetSearchRequestContainer - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._facet_query = None self._facet_query_matching_method = None self._limit = None self._offset = None self._query = None self.discriminator = None if facet_query is not None: self.facet_query = facet_query if facet_query_matching_method is not None: self.facet_query_matching_method = facet_query_matching_method if limit is not None: self.limit = limit if offset is not None: self.offset = offset if query is not None: self.query = query @property def facet_query(self): """Gets the facet_query of this FacetSearchRequestContainer. # noqa: E501 A string against which facet results are compared. If the facet result CONTAINs, STARTSWITH, or is an EXACT match for this value, as specified by facetQueryMatchingMethod, then it is returned. # noqa: E501 :return: The facet_query of this FacetSearchRequestContainer. # noqa: E501 :rtype: str """ return self._facet_query @facet_query.setter def facet_query(self, facet_query): """Sets the facet_query of this FacetSearchRequestContainer. A string against which facet results are compared. If the facet result CONTAINs, STARTSWITH, or is an EXACT match for this value, as specified by facetQueryMatchingMethod, then it is returned. # noqa: E501 :param facet_query: The facet_query of this FacetSearchRequestContainer. # noqa: E501 :type: str """ self._facet_query = facet_query @property def facet_query_matching_method(self): """Gets the facet_query_matching_method of this FacetSearchRequestContainer. # noqa: E501 The matching method used to filter when 'facetQuery' is used. Defaults to CONTAINS. # noqa: E501 :return: The facet_query_matching_method of this FacetSearchRequestContainer. # noqa: E501 :rtype: str """ return self._facet_query_matching_method @facet_query_matching_method.setter def facet_query_matching_method(self, facet_query_matching_method): """Sets the facet_query_matching_method of this FacetSearchRequestContainer. The matching method used to filter when 'facetQuery' is used. Defaults to CONTAINS. # noqa: E501 :param facet_query_matching_method: The facet_query_matching_method of this FacetSearchRequestContainer. # noqa: E501 :type: str """ allowed_values = ["CONTAINS", "STARTSWITH", "EXACT", "TAGPATH"] # noqa: E501 if (self._configuration.client_side_validation and facet_query_matching_method not in allowed_values): raise ValueError( "Invalid value for `facet_query_matching_method` ({0}), must be one of {1}" # noqa: E501 .format(facet_query_matching_method, allowed_values) ) self._facet_query_matching_method = facet_query_matching_method @property def limit(self): """Gets the limit of this FacetSearchRequestContainer. # noqa: E501 The number of results to return. Default: 100, Maximum allowed: 1000 # noqa: E501 :return: The limit of this FacetSearchRequestContainer. # noqa: E501 :rtype: int """ return self._limit @limit.setter def limit(self, limit): """Sets the limit of this FacetSearchRequestContainer. The number of results to return. Default: 100, Maximum allowed: 1000 # noqa: E501 :param limit: The limit of this FacetSearchRequestContainer. # noqa: E501 :type: int """ if (self._configuration.client_side_validation and limit is not None and limit > 1000): # noqa: E501 raise ValueError("Invalid value for `limit`, must be a value less than or equal to `1000`") # noqa: E501 if (self._configuration.client_side_validation and limit is not None and limit < 1): # noqa: E501 raise ValueError("Invalid value for `limit`, must be a value greater than or equal to `1`") # noqa: E501 self._limit = limit @property def offset(self): """Gets the offset of this FacetSearchRequestContainer. # noqa: E501 The number of results to skip before returning values. Default: 0 # noqa: E501 :return: The offset of this FacetSearchRequestContainer. # noqa: E501 :rtype: int """ return self._offset @offset.setter def offset(self, offset): """Sets the offset of this FacetSearchRequestContainer. The number of results to skip before returning values. Default: 0 # noqa: E501 :param offset: The offset of this FacetSearchRequestContainer. # noqa: E501 :type: int """ self._offset = offset @property def query(self): """Gets the query of this FacetSearchRequestContainer. # noqa: E501 A list of queries by which to limit the search results. Entities that match ALL queries in the list are returned # noqa: E501 :return: The query of this FacetSearchRequestContainer. # noqa: E501 :rtype: list[SearchQuery] """ return self._query @query.setter def query(self, query): """Sets the query of this FacetSearchRequestContainer. A list of queries by which to limit the search results. Entities that match ALL queries in the list are returned # noqa: E501 :param query: The query of this FacetSearchRequestContainer. # noqa: E501 :type: list[SearchQuery] """ self._query = query def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(FacetSearchRequestContainer, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, FacetSearchRequestContainer): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, FacetSearchRequestContainer): return True return self.to_dict() != other.to_dict()
""" Defines different IO utility methods and classes. Author: O.Z. """ # imports import os import sys import shutil from pathlib import Path __all__ = ['has_attribute', 'compare_paths', 'is_subfolder', 'extract_file_name', 'get_current_package_path', 'select_all_scripts', 'safe_write', 'safe_write_log', 'TemporaryFolder', 'TemporaryFile', 'TemporaryDirectoryTree', 'TemporaryFoldersManager'] # methods class TemporaryPathsEntry(object): """Temporary entry to the sys.paths variable. """ def __init__(self, entry_value): self.__entry_value = entry_value def __str__(self): return self.__entry_value def __enter__(self): sys.path.insert(0, self.__entry_value) return self def __exit__(self, exc_type, exc_value, traceback): for i in range(0, len(sys.path)): if sys.path[i] == self.__entry_value: sys.path.pop(i) return @property def value(self): """Gets entry value. """ return self.__entry_value def is_already_in_paths(self): """Returns True if the entry is already in the sys.path list. """ for path in sys.path: if path == self.value: return True return False def has_attribute(module_path, attribute): """Checks if the given module has a specific attribute. Args: module_path: a string, containing path to the module. attribute: an attribute to check. Returns: True if there's such attribute, False otherwise. Raises: OSError, ValueError. """ module = Path(module_path) parent = module.parent with TemporaryFile.from_path(str(parent / "temp_module.py")) as temp: temp.fill(module_path) module_name = "temp_module" with TemporaryPathsEntry(str(parent)) as paths_entry: try: __import__(module_name) return hasattr(sys.modules[module_name], attribute) except ImportError as err: raise ValueError("failed to import from " + str(paths_entry) + " module " + module_name + " - " + str(err)) def compare_paths(first, second): """Checks if two given paths are equal. Paths can be non-existing. Non-sensitive to the case. Args: first: a string (or object with overriden __str__ method), containing the first path. second: a string (or object with overriden __str__method), containing the second path. Returns: True if paths are equal, False otherwise. Raises: nothing. """ try: abs_first = os.path.abspath(str(first)) abs_second = os.path.abspath(str(second)) return abs_first.lower() == abs_second.lower() except (OSError, ValueError) as err: safe_write(sys.stderr, "ioutils.compare_paths error: " + str(err)) return str(first).lower() == str(second).lower() def is_subfolder(subfolder_path, parent_path): """Tries to check if given path is a subfolder of the given parent path. Args: subfolder_path: a string, which contains a path to check. parent_path: the parent path string. Returns: True if the given path is a subfolder. Raises: OSError, ValueError. """ subfolder_parents = list(Path(os.path.abspath(subfolder_path)).parents) parent_parents = [Path(os.path.abspath(parent_path))] parent_parents.extend(list(parent_parents[0].parents)) subfolder_parents.reverse() parent_parents.reverse() if len(subfolder_parents) < len(parent_parents): return False for i in range(0, len(parent_parents)): if not compare_paths(parent_parents[i], subfolder_parents[i]): return False return True def extract_file_name(file_path, error_stream=sys.stderr): """Tries to extract a file name from the given path string. The file should exist. Args: file_path: a path string to get the file name from. error_stream: a stream to write error messages to. Returns: a string, containing file name if succeeded, an empty string otherwise. Raises: nothing. """ try: file_object = Path(file_path) if not file_object.is_file(): safe_write(error_stream, "ioutils.extract_file_name error: " + file_path + " is not a file") return "" return str(file_object.relative_to(str(file_object.parent))) except (OSError, ValueError) as err: safe_write(error_stream, "ioutils.extract_file_name error: " + str(err)) return "" def get_current_package_path(error_stream=sys.stderr): """Tries to get current executed scripts package path. Args: error_stream: a stream to write error messages to. Returns: a string, containing package path if succeeded, an empty string otherwise. Raises: nothing. """ try: current_module_path = Path(os.path.join(os.getcwd(), sys.argv[0])) if not current_module_path.exists(): safe_write(error_stream, "ioutils.get_current_package_path error") return "" if current_module_path.is_file(): return os.path.abspath(str(current_module_path.parent)) return os.path.abspath(str(current_module_path)) except OSError as err: safe_write(error_stream, "ioutils.get_current_package_path error: " + str(err)) return "" return "" def select_all_scripts(path_string, error_stream=sys.stderr): """Gets the list of the scripts inside the specified package. The package is specified by the path to the folder with scripts. The method seeks the scripts recursively. The result is a list of strings, each entry is a relative path to the script (including file name). Args: path_string: a path string to the package, which scripts are selected. error_stream: a stream to write error to. Returns: list of strings, each entry is a relative to the specified package path (with file name) to the script. Returns an empty list in case of the error. Raises: nothing. """ scripts = [] try: path_object = Path(path_string) script_objects = list(path_object.glob("**/*.py")) for script in script_objects: scripts.append(str(Path(str(script)).relative_to(path_string))) except (OSError, ValueError): safe_write(error_stream, "ioutils.select_all_scripts error.") return scripts def safe_write(file_object, string_buffer): """Writes a string buffer using 'write' method of the given file object. Doesn't raise exceptions, but returns False if wasn't successful (and True otherwise). Args: file_object: the file object to use for output. string_buffer: a string buffer to write. Returns: True if succeeded. Raises: nothing. """ try: file_object.write(string_buffer) return True except ValueError: if not file_object == sys.stderr: safe_write(sys.stderr, "ioutils.safe_write: ValueError.\n") return False def safe_write_log(log_file_name, logs_folder, string_buffer, error_stream=sys.stderr): """Writes specified string buffer into the log file. It is assumed, that the log files should be placed into the specified folder. If the folder doesn't exist, it is created. If the log file already exists, it is rewritten. Args: log_file_name: the name of the log file. logs_folder: the folder, where the log file should be placed into. string_buffer: the string buffer to write. error_stream: an object with 'write' method, to write erros to. Returns: True if succeeded. Raises: nothing. """ try: if not os.path.exists(logs_folder): os.mkdir(logs_folder) except OSError: safe_write(error_stream, "ioutils.safe_write_log: failed to create " + logs_folder) return False log_path = "" try: log_path = os.path.join(logs_folder, log_file_name) except OSError: safe_write(error_stream, "ioutils.safe_write_log: failed to join file path.") return False try: if not os.path.exists(log_path): os.close(os.open(log_path, os.O_CREAT)) except OSError: safe_write(error_stream, "ioutils.safe_write_log: failed to create " + log_file_name) return False try: log_object = os.open(log_path, os.O_WRONLY) os.write(log_object, string_buffer) os.close(log_object) except OSError: safe_write(error_stream, "ioutils.safe_write_log: failed to write " + log_file_name) return False return True # classes class TemporaryFolder(object): """Temporary folder object, which manages (creates and removes) specified by the path folder. The object can be used only if the path specifies the folder inside existing directory. The folder shouldn't exist before the object is instantiated. Doesn't require manager. Attributes: __path: a string, which contains path to the folder. __folder: a pathlib.Path object for the folder. """ def __init__(self, path): self.__path = os.path.abspath(path) self.__folder = Path(self.__path) self.__folder.mkdir() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.cleanup() def cleanup(self): """Removes the temporary directory. Returns: nothing. Raises: nothing. """ try: self.__folder.rmdir() except OSError as err: safe_write(sys.stderr, "TemporaryFolder.cleanup error: " + str(err)) @property def full_path(self): """Gets the full path to the folder. Returns: string, which contains absolute full path to the folder. Raises: nothing. """ return self.__path @property def folder(self): """Gets a pathlib.Path object, related to the folder. Returns: a pathlib.Path object. Raises: nothing. """ return self.__folder class TemporaryFile(object): """Temporary file object, which manages its disk representation. The file is created inside existing folder. The file shouldn't exist before the object is instantiated. Doesn't require manager. Attributes: __path: a string, which contains the path to the file. __file: a pathlib.Path object for the file. __name: a string, which contains the file name. """ def __init__(self, folder, file_name): self.__path = os.path.join(str(folder), file_name) self.__file = Path(self.__path) self.__name = file_name self.__file.touch() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.cleanup() @staticmethod def from_path(file_path): """Creates a temporary file, using the given full path. Args: file_path: a string, containing the full path to the temp file (or object with a __str__ method). Returns: a temporary file object. Raises: OSError, ValueError. """ file_obj = Path(str(file_path)) return TemporaryFile(file_obj.parent, str(file_obj.relative_to(str(file_obj.parent)))) def fill(self, another_file_name): """Copies the content of the specified file into the temporary file. Args: another_file_name: a string, containing the path to the file with a content. Returns: nothing. Raises: OSError, ValueError. """ with open(another_file_name, "r") as another_file: with open(self.__path, "w") as temp_file: for line in another_file: temp_file.write(line) def cleanup(self): """Removes the temporary file. In case of error, writes to the sys.stderr the message. Returns: nothing. Raises: nothing. """ try: self.__file.unlink() except OSError as err: safe_write(sys.stderr, "TemporaryFile.cleanup error: " + str(err)) @property def full_path(self): """Gets the full path to the file. Returns: string, which contains absolute full path to the file. Raises: nothing. """ return self.__path @property def file(self): """Gets a pathlib.Path object, related to the file. Returns: a pathlib.Path object. Raises: nothing. """ return self.__file @property def name(self): """Gets the file name. Returns: a string, which contains the file name. Raises: nothing. """ return self.__name class TemporaryDirectoryTree(object): """An object, which creates and manages a directory tree, specified in constructor. The directory is specified by the dictionary object with two keys: name and subs. 'name' specifies the name of the directory tree node and 'subs' is a list of similar dictionary objects (nodes of the directory tree, which are children of current node). Attributes: __tree_path: a string, containing path to the tree. __cleanup_list: a list of TemporaryFolder objects sorted in order for the cleanup. """ def __init__(self, root_node, root_path=os.getcwd()): self.__tree_path = os.path.abspath(root_path) self.__cleanup_list = [] creation_stack = [root_node] while len(creation_stack) > 0: cur_node = creation_stack[0] folder_path = os.path.join(root_path, cur_node['name']) self.__cleanup_list.append(TemporaryFolder(folder_path)) for child in cur_node['subs']: child_path = os.path.join(cur_node['name'], child['name']) creation_stack.append(dict(name=child_path, subs=child['subs'])) creation_stack.pop(0) self.__cleanup_list.reverse() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.cleanup() def cleanup(self): """Removes the temporary directory tree from the disk. In case of error, writes message to the sys.stderr. Returns: nothing. Raises: nothing. """ try: for folder in self.__cleanup_list: folder.cleanup() self.__cleanup_list = [] except (OSError, ValueError) as err: safe_write(sys.stderr, "TemporaryDirectoryTree.cleanup error: " + str(err)) @property def root_path(self): """Gets a tree root path. Returns: a string, containing path to the root of the tree. Raises: nothing. """ return self.__tree_path class ManagedTemporaryFolder(object): """Temporary folder object, which is managed by the temporary folders manager. Should be instantiated by the manager. Attributes: __path: a string, which contains path to the folder. __folder: a pathlib.Path object for the folder. __manager: a manager, which has tracks the folder. """ def __init__(self, path, manager): self.__path = os.path.abspath(path) self.__folder = Path(self.__path) self.__manager = manager if not os.path.exists(str(self.__folder.parent)): manager.get_folder(str(self.__folder.parent)) if not self.__folder.exists(): self.__folder.mkdir() @property def absolute_path(self): """Gets an absolute path to the folder. Returns: a string, which contains an absolute path to the folder. Raises: nothing. """ return self.__path @property def folder(self): """Gets a pathlib Path object for the folder. Returns: a pathlib Path object for the folder. Raises: OSError, ValueError """ return self.__folder @property def parent(self): """Gets a parent directory of the folder. Returns: a temp folder object, corresponding to the parent directory. Raises: nothing. """ return self.__manager.get_folder(str(self.__folder.parent)) def create_file(self, file_name): """Asks the manager to create a temporary file inside the folder. Args: file_name: the name of the created file. Returns: nothing. Raises: OSError, ValueError. """ temp_file = self.__folder / file_name self.__manager.track_file(temp_file.path) class TemporaryFoldersManager(object): """Manager class of the temporary folders. It is a class, which manages created folders and tracks them. When the 'cleanup' method is called (or the manager is been destroyed), these folders are removed. Attributes: __folders: list of temp folders objects. __files: list of paths to the temp files. """ def __init__(self): self.__folders = [] self.__files = [] self.__force_remove = False def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): try: self.cleanup(self.__force_remove) except (OSError, ValueError) as err: safe_write(sys.stderr, str(err)) @staticmethod def from_list(paths, force_remove=False): """Creates temporary folders manager from the list of entries with 'path' and 'isdir' keys. Args: paths: list of dicts with 'path' and 'isdir' keys. force_remove: value of the force_remove of newly created manager. Returns: a temporary folders manager. Raises: OSError, ValueError. """ temp_manager = TemporaryFoldersManager() for path in paths: if path['isdir']: temp_manager.get_folder(path['path']) else: temp_manager.track_file(path['path']) temp_manager.set_force_remove(force_remove) return temp_manager def set_force_remove(self, force_remove=True): """Sets the force_remove flag. Args: force_remove: new value of the flag. Returns: nothing. Raises: nothing. """ self.__force_remove = force_remove def get_folder(self, folder_path): """Gets a temporary folder object for the specified path. If the temporary folder already exists, returns the registered object. Otherwise creates the folder, registers it and returns the object. Args: folder_path: a string, which contains a path to the folder. Returns: a temporary folder object. Raises: OSError, ValueError. """ folder_abs_path = os.path.abspath(folder_path) for temp_folder in self.__folders: if temp_folder.absolute_path.upper() == folder_abs_path.upper(): return temp_folder folder = Path(folder_path) if folder.exists(): if not folder.is_dir(): raise ValueError() return ManagedTemporaryFolder(folder_path, self) result = ManagedTemporaryFolder(folder_path, self) self.__folders.append(result) return result def is_temporary(self, path): """Checks if given folder or file is registered as temporary. Args: path: a string, which contains the path. Returns: True if the path is temporary. Raises: OSError. """ folder = Path(path) if not folder.exists(): return False ref_paths = [] if folder.is_file(): ref_paths = [temp for temp in self.__files] else: ref_paths = [temp.absolute_path for temp in self.__folders] for ref_path in ref_paths: if compare_paths(ref_path, str(folder)): return True return False def track_file(self, file_path): """Registers the given file as temporary. If the file doesn't exist, it will be created. Args: file_path: a string, which contains path to the file. Returns: nothing. Raises: OSError. """ file_object = Path(file_path) for temp_file in self.__files: if compare_paths(file_path, temp_file): return self.get_folder(str(file_object.parent)) if not file_object.exists(): file_object.touch() self.__files.append(os.path.abspath(file_path)) def cleanup(self, force_remove=False): """Removing all temporary folders and files. Args: force_remove: determines if non-registered subfolders should be removed. Returns: nothing. Raises: OSError, ValueError. """ for temp_file_path in self.__files: temp_file = Path(temp_file_path) if temp_file.exists() and temp_file.is_file(): temp_file.unlink() self.__files = [] cleanup_queue = [] sorting = [temp for temp in self.__folders if temp.folder.exists()] while len(sorting) > 0: not_parent = None for i in range(0, len(sorting)): is_not_parent = True for j in range(0, len(sorting)): if i == j: continue if is_subfolder(sorting[j].absolute_path, sorting[i].absolute_path): is_not_parent = False break if is_not_parent: not_parent = i break assert not not_parent is None cleanup_queue.append(sorting[not_parent]) sorting.pop(not_parent) for temp in cleanup_queue: if not force_remove: temp.folder.rmdir() else: shutil.rmtree(temp.absolute_path) # testing def main(): """Checking and Testing method """ pass if __name__ == '__main__': main()
from __future__ import print_function, division, absolute_import import collections import itertools import numpy as np from numba import unittest_support as unittest from numba.compiler import compile_isolated from numba import jit, types, errors from .support import TestCase, MemoryLeakMixin, tag Rect = collections.namedtuple('Rect', ('width', 'height')) Point = collections.namedtuple('Point', ('x', 'y', 'z')) Empty = collections.namedtuple('Empty', ()) def tuple_return_usecase(a, b): return a, b def tuple_first(tup): a, b = tup return a def tuple_second(tup): a, b = tup return b def tuple_index(tup, idx): return tup[idx] def tuple_index_static(tup): # Note the negative index return tup[-2] def tuple_slice2(tup): return tup[1:-1] def tuple_slice3(tup): return tup[1::2] def len_usecase(tup): return len(tup) def add_usecase(a, b): return a + b def eq_usecase(a, b): return a == b def ne_usecase(a, b): return a != b def gt_usecase(a, b): return a > b def ge_usecase(a, b): return a >= b def lt_usecase(a, b): return a < b def le_usecase(a, b): return a <= b def in_usecase(a, b): return a in b def bool_usecase(tup): return bool(tup), (3 if tup else 2) def getattr_usecase(tup): return tup.z, tup.y, tup.x def make_point(a, b, c): return Point(a, b, c) def make_point_kws(a, b, c): return Point(z=c, y=b, x=a) def make_point_nrt(n): r = Rect(list(range(n)), np.zeros(n + 1)) # This also exercises attribute access p = Point(r, len(r.width), len(r.height)) return p def type_usecase(tup, *args): return type(tup)(*args) def identity(tup): return tup class TestTupleReturn(TestCase): @tag('important') def test_array_tuple(self): aryty = types.Array(types.float64, 1, 'C') cres = compile_isolated(tuple_return_usecase, (aryty, aryty)) a = b = np.arange(5, dtype='float64') ra, rb = cres.entry_point(a, b) self.assertTrue((ra == a).all()) self.assertTrue((rb == b).all()) del a, b self.assertTrue((ra == rb).all()) def test_scalar_tuple(self): scalarty = types.float32 cres = compile_isolated(tuple_return_usecase, (scalarty, scalarty)) a = b = 1 ra, rb = cres.entry_point(a, b) self.assertEqual(ra, a) self.assertEqual(rb, b) @tag('important') def test_hetero_tuple(self): alltypes = [] allvalues = [] alltypes.append((types.int32, types.int64)) allvalues.append((1, 2)) alltypes.append((types.float32, types.float64)) allvalues.append((1.125, .25)) alltypes.append((types.int32, types.float64)) allvalues.append((1231, .5)) for (ta, tb), (a, b) in zip(alltypes, allvalues): cres = compile_isolated(tuple_return_usecase, (ta, tb)) ra, rb = cres.entry_point(a, b) self.assertPreciseEqual((ra, rb), (a, b)) class TestTuplePassing(TestCase): @tag('important') def test_unituple(self): tuple_type = types.UniTuple(types.int32, 2) cr_first = compile_isolated(tuple_first, (tuple_type,)) cr_second = compile_isolated(tuple_second, (tuple_type,)) self.assertPreciseEqual(cr_first.entry_point((4, 5)), 4) self.assertPreciseEqual(cr_second.entry_point((4, 5)), 5) @tag('important') def test_hetero_tuple(self): tuple_type = types.Tuple((types.int64, types.float32)) cr_first = compile_isolated(tuple_first, (tuple_type,)) cr_second = compile_isolated(tuple_second, (tuple_type,)) self.assertPreciseEqual(cr_first.entry_point((2**61, 1.5)), 2**61) self.assertPreciseEqual(cr_second.entry_point((2**61, 1.5)), 1.5) def test_size_mismatch(self): # Issue #1638: tuple size should be checked when unboxing tuple_type = types.UniTuple(types.int32, 2) cr = compile_isolated(tuple_first, (tuple_type,)) with self.assertRaises(ValueError) as raises: cr.entry_point((4, 5, 6)) self.assertEqual(str(raises.exception), "size mismatch for tuple, expected 2 element(s) but got 3") class TestOperations(TestCase): @tag('important') def test_len(self): pyfunc = len_usecase cr = compile_isolated(pyfunc, [types.Tuple((types.int64, types.float32))]) self.assertPreciseEqual(cr.entry_point((4, 5)), 2) cr = compile_isolated(pyfunc, [types.UniTuple(types.int64, 3)]) self.assertPreciseEqual(cr.entry_point((4, 5, 6)), 3) @tag('important') def test_index(self): pyfunc = tuple_index cr = compile_isolated(pyfunc, [types.UniTuple(types.int64, 3), types.int64]) tup = (4, 3, 6) for i in range(len(tup)): self.assertPreciseEqual(cr.entry_point(tup, i), tup[i]) # With a compile-time static index (the code generation path is different) pyfunc = tuple_index_static for typ in (types.UniTuple(types.int64, 4), types.Tuple((types.int64, types.int32, types.int64, types.int32))): cr = compile_isolated(pyfunc, (typ,)) tup = (4, 3, 42, 6) self.assertPreciseEqual(cr.entry_point(tup), pyfunc(tup)) typ = types.UniTuple(types.int64, 1) with self.assertTypingError(): cr = compile_isolated(pyfunc, (typ,)) def test_in(self): pyfunc = in_usecase cr = compile_isolated(pyfunc, [types.int64, types.UniTuple(types.int64, 3)]) tup = (4, 1, 5) for i in range(5): self.assertPreciseEqual(cr.entry_point(i, tup), pyfunc(i, tup)) def check_slice(self, pyfunc): tup = (4, 5, 6, 7) cr = compile_isolated(pyfunc, [types.UniTuple(types.int64, 4)]) self.assertPreciseEqual(cr.entry_point(tup), pyfunc(tup)) cr = compile_isolated( pyfunc, [types.Tuple((types.int64, types.int32, types.int64, types.int32))]) self.assertPreciseEqual(cr.entry_point(tup), pyfunc(tup)) def test_slice2(self): self.check_slice(tuple_slice2) def test_slice3(self): self.check_slice(tuple_slice3) def test_bool(self): pyfunc = bool_usecase cr = compile_isolated(pyfunc, [types.Tuple((types.int64, types.int32))]) args = ((4, 5),) self.assertPreciseEqual(cr.entry_point(*args), pyfunc(*args)) cr = compile_isolated(pyfunc, [types.UniTuple(types.int64, 3)]) args = ((4, 5, 6),) self.assertPreciseEqual(cr.entry_point(*args), pyfunc(*args)) cr = compile_isolated(pyfunc, [types.Tuple(())]) self.assertPreciseEqual(cr.entry_point(()), pyfunc(())) @tag('important') def test_add(self): pyfunc = add_usecase samples = [(types.Tuple(()), ()), (types.UniTuple(types.int32, 0), ()), (types.UniTuple(types.int32, 1), (42,)), (types.Tuple((types.int64, types.float32)), (3, 4.5)), ] for (ta, a), (tb, b) in itertools.product(samples, samples): cr = compile_isolated(pyfunc, (ta, tb)) expected = pyfunc(a, b) got = cr.entry_point(a, b) self.assertPreciseEqual(got, expected, msg=(ta, tb)) def _test_compare(self, pyfunc): def eq(pyfunc, cfunc, args): self.assertIs(cfunc(*args), pyfunc(*args), "mismatch for arguments %s" % (args,)) # Same-sized tuples argtypes = [types.Tuple((types.int64, types.float32)), types.UniTuple(types.int32, 2)] for ta, tb in itertools.product(argtypes, argtypes): cr = compile_isolated(pyfunc, (ta, tb)) cfunc = cr.entry_point for args in [((4, 5), (4, 5)), ((4, 5), (4, 6)), ((4, 6), (4, 5)), ((4, 5), (5, 4))]: eq(pyfunc, cfunc, args) # Different-sized tuples argtypes = [types.Tuple((types.int64, types.float32)), types.UniTuple(types.int32, 3)] cr = compile_isolated(pyfunc, tuple(argtypes)) cfunc = cr.entry_point for args in [((4, 5), (4, 5, 6)), ((4, 5), (4, 4, 6)), ((4, 5), (4, 6, 7))]: eq(pyfunc, cfunc, args) @tag('important') def test_eq(self): self._test_compare(eq_usecase) @tag('important') def test_ne(self): self._test_compare(ne_usecase) @tag('important') def test_gt(self): self._test_compare(gt_usecase) @tag('important') def test_ge(self): self._test_compare(ge_usecase) @tag('important') def test_lt(self): self._test_compare(lt_usecase) @tag('important') def test_le(self): self._test_compare(le_usecase) class TestNamedTuple(TestCase, MemoryLeakMixin): def test_unpack(self): def check(p): for pyfunc in tuple_first, tuple_second: cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(p), pyfunc(p)) # Homogenous check(Rect(4, 5)) # Heterogenous check(Rect(4, 5.5)) def test_len(self): def check(p): pyfunc = len_usecase cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(p), pyfunc(p)) # Homogenous check(Rect(4, 5)) check(Point(4, 5, 6)) # Heterogenous check(Rect(4, 5.5)) check(Point(4, 5.5, 6j)) def test_index(self): pyfunc = tuple_index cfunc = jit(nopython=True)(pyfunc) p = Point(4, 5, 6) for i in range(len(p)): self.assertPreciseEqual(cfunc(p, i), pyfunc(p, i)) def test_bool(self): def check(p): pyfunc = bool_usecase cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(p), pyfunc(p)) # Homogenous check(Rect(4, 5)) # Heterogenous check(Rect(4, 5.5)) check(Empty()) def _test_compare(self, pyfunc): def eq(pyfunc, cfunc, args): self.assertIs(cfunc(*args), pyfunc(*args), "mismatch for arguments %s" % (args,)) cfunc = jit(nopython=True)(pyfunc) # Same-sized named tuples for a, b in [((4, 5), (4, 5)), ((4, 5), (4, 6)), ((4, 6), (4, 5)), ((4, 5), (5, 4))]: eq(pyfunc, cfunc, (Rect(*a), Rect(*b))) # Different-sized named tuples for a, b in [((4, 5), (4, 5, 6)), ((4, 5), (4, 4, 6)), ((4, 5), (4, 6, 7))]: eq(pyfunc, cfunc, (Rect(*a), Point(*b))) @tag('important') def test_eq(self): self._test_compare(eq_usecase) @tag('important') def test_ne(self): self._test_compare(ne_usecase) def test_gt(self): self._test_compare(gt_usecase) def test_ge(self): self._test_compare(ge_usecase) def test_lt(self): self._test_compare(lt_usecase) def test_le(self): self._test_compare(le_usecase) @tag('important') def test_getattr(self): pyfunc = getattr_usecase cfunc = jit(nopython=True)(pyfunc) for args in (4, 5, 6), (4, 5.5, 6j): p = Point(*args) self.assertPreciseEqual(cfunc(p), pyfunc(p)) @tag('important') def test_construct(self): def check(pyfunc): cfunc = jit(nopython=True)(pyfunc) for args in (4, 5, 6), (4, 5.5, 6j): expected = pyfunc(*args) got = cfunc(*args) self.assertIs(type(got), type(expected)) self.assertPreciseEqual(got, expected) check(make_point) check(make_point_kws) def test_type(self): # Test the type() built-in on named tuples pyfunc = type_usecase cfunc = jit(nopython=True)(pyfunc) arg_tuples = [(4, 5, 6), (4, 5.5, 6j)] for tup_args, args in itertools.product(arg_tuples, arg_tuples): tup = Point(*tup_args) expected = pyfunc(tup, *args) got = cfunc(tup, *args) self.assertIs(type(got), type(expected)) self.assertPreciseEqual(got, expected) class TestNamedTupleNRT(TestCase, MemoryLeakMixin): def test_return(self): # Check returning a namedtuple with a list inside it pyfunc = make_point_nrt cfunc = jit(nopython=True)(pyfunc) for arg in (3, 0): expected = pyfunc(arg) got = cfunc(arg) self.assertIs(type(got), type(expected)) self.assertPreciseEqual(got, expected) class TestConversions(TestCase): """ Test implicit conversions between tuple types. """ def check_conversion(self, fromty, toty, val): pyfunc = identity cr = compile_isolated(pyfunc, (fromty,), toty) cfunc = cr.entry_point res = cfunc(val) self.assertEqual(res, val) def test_conversions(self): check = self.check_conversion fromty = types.UniTuple(types.int32, 2) check(fromty, types.UniTuple(types.float32, 2), (4, 5)) check(fromty, types.Tuple((types.float32, types.int16)), (4, 5)) aty = types.UniTuple(types.int32, 0) bty = types.Tuple(()) check(aty, bty, ()) check(bty, aty, ()) with self.assertRaises(errors.TypingError) as raises: check(fromty, types.Tuple((types.float32,)), (4, 5)) self.assertIn("No conversion from (int32 x 2) to (float32 x 1)", str(raises.exception)) if __name__ == '__main__': unittest.main()
# ============================================================================= # periscope-ps (blipp) # # Copyright (c) 2013-2016, Trustees of Indiana University, # All rights reserved. # # This software may be modified and distributed under the terms of the BSD # license. See the COPYING file for details. # # This software was created at the Indiana University Center for Research in # Extreme Scale Technologies (CREST). # ============================================================================= SAMPLE_CONFIG = \ { "status": "ON", "ttl": 300, "name": "blipp", # "id": "1234567890", "serviceType": "http://some_schema_domain/blipp", "runningOn": { "href": "http://dev.incntre.iu.edu/nodes/anode", "rel": "full"}, "properties": { "configurations": { "unis_url":"http://dev.incntre.iu.edu", "use_ssl": "", "ssl_cert": "cert_file", "ssl_key": "key_file", "ssl_cafile": "ca_file", "domain":"testdomain.net", "probe_defaults": { "collection_schedule":"simple", "schedule_params": {"every": 10}, "reporting_schedule":"simple|num_measurements...etc", "reporting_params":["arg1", 2], "collection_size":10000000, "collection_ttl":1500000, "ms_url":"http://dev.incntre.iu.edu"}, "probes": { "ping1": { "probe_module": "ping", "collection_schedule": "simple", "schedule_params": {"every": 5}, "reporting_schedule": "simple", "reporting_params": [6], "ms_url": "someurl", "collection_ttl": 30000, "kwargs": {"remote_host": "129.62.33.22", "timeout": 3, "packet_size": 56, "byte_pattern": "0xAAAA"} }, "ping2": { "probe_module": "ping", "collection_schedule": "simple", "schedule_params": {"every": 10}, "reporting_schedule": "simple", "reporting_params": [7], "ms_url": "someurl", "kwargs": {"remote_host": "bing.com", "timeout": 2, "packet_size": 56, "byte_pattern": "0xAAAA"} }, "cpu": { "probe_module": "cpu", "collection_schedule": "simple", "schedule_params": 1, "kwargs": {"proc_dir": "/proc"} }, "net": { "probe_module": "net", "status": "off", "kwargs": {"proc_dir": "/proc", "unis_url": "http://www.dev.incntre.iu.edu", "subject": "http://www.dev.incntre.iu.edu/nodes/hikerbear"} } } } } } SAMPLE_STRIPPED = { "status": "ON", "ttl": 300, "name": "blipp", # "id": "1234567890", "serviceType": "http://some_schema_domain/blipp", "runningOn": { "href": "http://dev.incntre.iu.edu/nodes/anode", "rel": "full"}, "properties": { "configurations": { "unis_url":"http://dev.incntre.iu.edu", "use_ssl": "", "ssl_cert": "cert_file", "ssl_key": "key_file", "ssl_cafile": "ca_file", "domain":"testdomain.net" } } } STRIPPED_PROBES = { "ping1": { "collection_size":10000000, "probe_module": "ping", "collection_schedule": "simple", "schedule_params": {"every": 5}, "reporting_schedule": "simple", "reporting_params": [6], "ms_url": "someurl", "collection_ttl": 30000, "kwargs": {"remote_host": "129.62.33.22", "timeout": 3, "packet_size": 56, "byte_pattern": "0xAAAA"} }, "ping2": { "collection_size":10000000, "collection_ttl":1500000, "probe_module": "ping", "collection_schedule": "simple", "schedule_params": {"every": 10}, "reporting_schedule": "simple", "reporting_params": [7], "ms_url": "someurl", "kwargs": {"remote_host": "bing.com", "timeout": 2, "packet_size": 56, "byte_pattern": "0xAAAA"} }, "cpu": { "reporting_schedule":"simple|num_measurements...etc", "reporting_params":["arg1", 2], "collection_size":10000000, "collection_ttl":1500000, "ms_url":"http://dev.incntre.iu.edu", "probe_module": "cpu", "collection_schedule": "simple", "schedule_params": 1, "kwargs": {"proc_dir": "/proc"} }, "net": { "collection_schedule":"simple", "schedule_params": {"every": 10}, "reporting_schedule":"simple|num_measurements...etc", "reporting_params":["arg1", 2], "collection_size":10000000, "collection_ttl":1500000, "ms_url":"http://dev.incntre.iu.edu", "probe_module": "net", "status": "off", "kwargs": {"proc_dir": "/proc", "unis_url": "http://www.dev.incntre.iu.edu", "subject": "http://www.dev.incntre.iu.edu/nodes/hikerbear"} } } PING_SCHEMA = { "name": "pingschema", "address": "iu.edu", u"probe_module": u"cmd_line_probe", "domain":"testdomain.net", "unis_url":"http://dev.incntre.iu.edu", "runningOn": {"href": "http://dev.incntre.iu.edu/nodes/anode", "rel": "full"}, "collection_schedule": "simple", "schedule_params": {"every": 10}, "reporting_schedule": "simple|num_measurements...etc", "reporting_params": ['arg1', 2], "collection_size": 10000000, "collection_ttl": 1500000, "ms_url": "http://dev.incntre.iu.edu", "use_ssl": "", "ssl_cert": "cert_file", "ssl_key": "key_file", "ssl_cafile": "ca_file", "properties": {}, '$schema': 'file://ping-schema.json', u'command': u'ping -W $TIMEOUT -s $PACKET_SIZE -t $TTL -p $PATTERN -M $HINT -Q $TOS $EXTRAARGS $ADDRESS', u'regex': u'ttl=(?P<ttl>\\d+).*time=(?P<rtt>\\d+\\.\\d+) ', u'eventTypes': { u'ttl': u'ps:tools:blipp:linux:net:ping:ttl', u'rtt': u'ps:tools:blipp:linux:net:ping:rtt' }, u'timeout': 2, u'packet_size': 56, u'ttl': 60, u'pattern': u'00', u'hint': u'dont', u'tos': u'0', u'extraargs': u'', } PING_1 = {"name": "ping1", "probe_module": "ping", "domain":"testdomain.net", "unis_url":"http://dev.incntre.iu.edu", "runningOn": {"href": "http://dev.incntre.iu.edu/nodes/anode", "rel": "full"}, "collection_schedule": "simple", "schedule_params": {"every": 5}, "reporting_schedule": "simple", "reporting_params": [6], "collection_size": 10000000, "collection_ttl": 30000, "ms_url": "someurl", "use_ssl": "", "ssl_cert": "cert_file", "ssl_key": "key_file", "ssl_cafile": "ca_file", "properties": {}, "kwargs": {"remote_host": "129.62.33.22", "timeout": 3, "packet_size": 56, "byte_pattern": "0xAAAA"}} PING_2= {"name": "ping2", "probe_module": "ping", "domain":"testdomain.net", "unis_url":"http://dev.incntre.iu.edu", "runningOn": {"href": "http://dev.incntre.iu.edu/nodes/anode", "rel": "full"}, "collection_schedule": "simple", "schedule_params": {"every": 10}, "reporting_schedule": "simple", "reporting_params": [7], "collection_size": 10000000, "collection_ttl": 1500000, "use_ssl": "", "ssl_cert": "cert_file", "ssl_key": "key_file", "ssl_cafile": "ca_file", "ms_url": "someurl", "properties":{}, "kwargs": {"remote_host": "bing.com", "timeout": 2, "packet_size": 56, "byte_pattern": "0xAAAA"}} big_config_dict = {"not_in_unis": "file_val", "in_unis": "file_val", "name": "blipp", "id": 1234567890, "runningOn": {"href": "http://dev.incntre.iu.edu/nodes/anode", "rel": "full"}, "probes": {"ping": {"collection_schedule": "simple|other poss?", "schedule_params": ["arg1", 2, "arg3"], "reporting_schedule": "simple|num_measurements...etc", "reporting_params": ["arg1", 2], "collection_size": 10000000, "collection_ttl": 1500000, "ms_url": "someurl", "kwargs": {"remote_host": "google.com", "timeout": 2, "packet_size": 56, "byte_pattern": "0xAAAA"}, "targets": [{"kwargs": {"remote_host": "129.62.33.22", "timeout": 3}, "collection_ttl": 30000}, {"kwargs": {"remote_host": "bing.com"}}]}, "cpu": {"collection_schedule": "simple", "schedule_params": 1, "kwargs": {"proc_dir": "/proc"}}, "net": {"status": "off", "kwargs": {"proc_dir": "/proc", "unis_url": "http://www.dev.incntre.iu.edu", "subject": "http://www.dev.incntre.iu.edu/nodes/hikerbear"} }}} bootstrap_local_config = { "status": "ON", "$schema": 'http://unis.incntre.iu.edu/schema/20140214/service#', "serviceType": "http://some_schema_domain/blipp", "name": "blipp", "ttl": 100000, "location":{"institution": "blipp unit test inst"}, "description": "blipp unit test", "properties": { "configurations": { "unis_url":"http://dev.incntre.iu.edu", "probe_defaults": {"collection_schedule":"builtins.simple", "schedule_params":{"every": 20}, "reporting_params":7, "collection_size":10000000, "collection_ttl":1500000, "ms_url":"http://dev.incntre.iu.edu"}, "domain":"blippunittest", "hostname":"unittest", "host_urn": "urn:ogf:network:domain=blippunittest:node=unittest", "probes":{ "ping":{ "collection_schedule":"simple", "kwargs":{"remote_host":"google.com", "timeout":2, "packet_size":56, "byte_pattern":"0xAAAA"}, "targets":[{"kwargs":{"remote_host":"127.0.0.1", "timeout":3}, "collection_ttl":30000}, {"kwargs":{"remote_host":"131.253.13.32"}}] }, "cpu":{ "collection_schedule":"simple", "schedule_params":{"every": 5}, "kwargs":{"proc_dir":"/proc"} }, "net":{ "status": "OFF", "kwargs": {"proc_dir":"/proc", "unis_url": "http://dev.incntre.iu.edu"} } } }, "summary": { "metadata": [] } } } meminfo = """ MemTotal: 8127964 kB MemFree: 525460 kB Buffers: 358700 kB Cached: 4390548 kB SwapCached: 768 kB Active: 4015276 kB Inactive: 3044436 kB Active(anon): 1950768 kB Inactive(anon): 367044 kB Active(file): 2064508 kB Inactive(file): 2677392 kB Unevictable: 0 kB Mlocked: 0 kB SwapTotal: 5105656 kB SwapFree: 5104120 kB Dirty: 96 kB Writeback: 0 kB AnonPages: 2309808 kB Mapped: 173400 kB Shmem: 7360 kB Slab: 401292 kB SReclaimable: 371660 kB SUnreclaim: 29632 kB KernelStack: 4672 kB PageTables: 42628 kB NFS_Unstable: 0 kB Bounce: 0 kB WritebackTmp: 0 kB CommitLimit: 9169636 kB Committed_AS: 5341024 kB VmallocTotal: 34359738367 kB VmallocUsed: 330620 kB VmallocChunk: 34359380988 kB HardwareCorrupted: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 2048 kB DirectMap4k: 2021376 kB DirectMap2M: 6301696 kB""" user_beancounters = """Version: 2.5 uid resource held maxheld barrier limit failcnt 7: kmemsize 61925780 64507904 9223372036854775807 9223372036854775807 0 lockedpages 0 0 9223372036854775807 9223372036854775807 0 privvmpages 312066 321621 9223372036854775807 9223372036854775807 0 shmpages 640 1296 9223372036854775807 9223372036854775807 0 dummy 0 0 0 0 0 numproc 34 84 9223372036854775807 9223372036854775807 0 physpages 1173286 1180545 9223372036854775807 9223372036854775807 0 vmguarpages 0 0 9223372036854775807 9223372036854775807 0 oomguarpages 12375 17889 9223372036854775807 9223372036854775807 0 numtcpsock 10 74 9223372036854775807 9223372036854775807 0 numflock 2 10 9223372036854775807 9223372036854775807 0 numpty 1 2 9223372036854775807 9223372036854775807 0 numsiginfo 0 27 9223372036854775807 9223372036854775807 0 tcpsndbuf 182080 1398784 9223372036854775807 9223372036854775807 0 tcprcvbuf 163840 7966208 9223372036854775807 9223372036854775807 0 othersockbuf 14016 41184 9223372036854775807 9223372036854775807 0 dgramrcvbuf 0 8768 9223372036854775807 9223372036854775807 0 numothersock 94 99 9223372036854775807 9223372036854775807 0 dcachesize 57694551 57798312 60817408 67108864 0 numfile 360 600 9223372036854775807 9223372036854775807 0 dummy 0 0 0 0 0 dummy 0 0 0 0 0 dummy 0 0 0 0 0 numiptent 20 20 9223372036854775807 9223372036854775807 0"""
#!/usr/bin/env python # encoding: utf-8 import os from types import NoneType from xmlrpclib import DateTime import mock from nose.tools import * # flake8: noqa from tests.base import OsfTestCase from osf_tests.factories import (UserFactory, ProjectFactory, NodeFactory, AuthFactory, RegistrationFactory, PrivateLinkFactory) from framework.auth import Auth from website.util import rubeus from website.util.rubeus import sort_by_name from osf.utils import sanitize class TestRubeus(OsfTestCase): def setUp(self): super(TestRubeus, self).setUp() self.project = ProjectFactory.create() self.consolidated_auth = Auth(user=self.project.creator) self.non_authenticator = UserFactory() self.project.save() self.project.add_contributor( contributor=self.non_authenticator, auth=self.consolidated_auth, ) self.project.add_addon('s3', self.consolidated_auth) self.project.creator.add_addon('s3', self.consolidated_auth) self.node_settings = self.project.get_addon('s3') self.user_settings = self.project.creator.get_addon('s3') self.user_settings.access_key = 'We-Will-Rock-You' self.user_settings.secret_key = 'Idontknowanyqueensongs' self.node_settings.bucket = 'Sheer-Heart-Attack' self.node_settings.user_settings = self.user_settings self.node_settings.save() def test_hgrid_dummy(self): node_settings = self.node_settings node = self.project user = Auth(self.project.creator) # FIXME: These tests are very brittle. expected = { 'isPointer': False, 'provider': 's3', 'addonFullname': node_settings.config.full_name, 'iconUrl': node_settings.config.icon_url, 'name': 'Amazon S3: {0}'.format( node_settings.bucket ), 'kind': 'folder', 'accept': { 'maxSize': node_settings.config.max_file_size, 'acceptedFiles': node_settings.config.accept_extensions }, 'isAddonRoot': True, 'extra': None, 'buttons': None, 'nodeId': node._id, 'nodeUrl': node.url, 'nodeApiUrl': node.api_url, } permissions = { 'view': node.can_view(user), 'edit': node.can_edit(user) and not node.is_registration, } expected['permissions'] = permissions actual = rubeus.build_addon_root(node_settings, node_settings.bucket, permissions=permissions) assert actual['urls']['fetch'] assert actual['urls']['upload'] del actual['urls'] assert_equals(actual, expected) def test_build_addon_root_has_correct_upload_limits(self): self.node_settings.config.max_file_size = 10 self.node_settings.config.high_max_file_size = 20 node = self.project user = self.project.creator auth = Auth(user) permissions = { 'view': node.can_view(auth), 'edit': node.can_edit(auth) and not node.is_registration, } result = rubeus.build_addon_root( self.node_settings, self.node_settings.bucket, permissions=permissions, user=user ) assert_equal(result['accept']['maxSize'], self.node_settings.config.max_file_size) # user now has elevated upload limit user.add_system_tag('high_upload_limit') user.save() result = rubeus.build_addon_root( self.node_settings, self.node_settings.bucket, permissions=permissions, user=user ) assert_equal( result['accept']['maxSize'], self.node_settings.config.high_max_file_size ) def test_build_addon_root_for_anonymous_vols_hides_path(self): private_anonymous_link = PrivateLinkFactory(anonymous=True) private_anonymous_link.nodes.add(self.project) private_anonymous_link.save() project_viewer = UserFactory() result = rubeus.build_addon_root( self.node_settings, self.node_settings.bucket, user=project_viewer, private_key=private_anonymous_link.key ) assert result['name'] == 'Amazon S3' def test_build_addon_root_for_anonymous_vols_shows_path(self): private_link = PrivateLinkFactory() private_link.nodes.add(self.project) private_link.save() project_viewer = UserFactory() result = rubeus.build_addon_root( self.node_settings, self.node_settings.bucket, user=project_viewer, private_key=private_link.key ) assert result['name'] == 'Amazon S3: {0}'.format( self.node_settings.bucket ) def test_hgrid_dummy_fail(self): node_settings = self.node_settings node = self.project user = Auth(self.project.creator) rv = { 'isPointer': False, 'addon': 's3', 'addonFullname': node_settings.config.full_name, 'iconUrl': node_settings.config.icon_url, 'name': 'Amazon S3: {0}'.format( node_settings.bucket ), 'kind': 'folder', 'permissions': { 'view': node.can_view(user), 'edit': node.can_edit(user) and not node.is_registration, }, 'urls': { 'fetch': node.api_url + 's3/hgrid/', 'upload': node.api_url + 's3/upload/' }, 'accept': { 'maxSize': node_settings.config.max_file_size, 'acceptedFiles': node_settings.config.accept_extensions }, 'isAddonRoot': True, 'nodeId': node._id, 'nodeUrl': node.url, 'nodeApiUrl': node.api_url, } permissions = { 'view': node.can_view(user), 'edit': node.can_edit(user) and not node.is_registration, } assert_not_equals(rubeus.build_addon_root( node_settings, node_settings.bucket, permissions=permissions), rv) def test_hgrid_dummy_overrides(self): node_settings = self.node_settings node = self.project user = Auth(self.project.creator) expected = { 'isPointer': False, 'provider': 's3', 'addonFullname': node_settings.config.full_name, 'iconUrl': node_settings.config.icon_url, 'name': 'Amazon S3: {0}'.format( node_settings.bucket ), 'kind': 'folder', 'permissions': { 'view': node.can_view(user), 'edit': node.can_edit(user) and not node.is_registration, }, 'urls': {}, 'accept': { 'maxSize': node_settings.config.max_file_size, 'acceptedFiles': node_settings.config.accept_extensions }, 'isAddonRoot': True, 'extra': None, 'buttons': None, 'nodeId': node._id, 'nodeUrl': node.url, 'nodeApiUrl': node.api_url, } permissions = { 'view': node.can_view(user), 'edit': node.can_edit(user) and not node.is_registration, } assert_equal( rubeus.build_addon_root( node_settings, node_settings.bucket, permissions=permissions, urls={} ), expected ) def test_get_nodes_deleted_component(self): node = NodeFactory(creator=self.project.creator, parent=self.project) node.is_deleted = True collector = rubeus.NodeFileCollector( self.project, Auth(user=UserFactory()) ) nodes = collector._get_nodes(self.project) assert_equal(len(nodes['children']), 0) def test_serialized_pointer_has_flag_indicating_its_a_pointer(self): project = ProjectFactory(creator=self.consolidated_auth.user) pointed_project = ProjectFactory(is_public=True) project.add_pointer(pointed_project, auth=self.consolidated_auth) serializer = rubeus.NodeFileCollector(node=project, auth=self.consolidated_auth) ret = serializer._get_nodes(project) child = ret['children'][1] # first child is OSFStorage, second child is pointer assert_true(child['isPointer']) def test_private_components_not_shown(self): user = UserFactory() public_project = ProjectFactory(creator=user, is_public=True) private_child = NodeFactory(parent=public_project, creator=user, is_public=False) public_grandchild = NodeFactory(parent=private_child, creator=user, is_public=True) private_greatgrandchild = NodeFactory(parent=public_grandchild, creator=user, is_public=False) public_greatgreatgranchild = NodeFactory(parent=private_greatgrandchild, creator=user, is_public=True) serializer = rubeus.NodeFileCollector(node=public_project, auth=Auth(user=UserFactory())) ret = serializer.to_hgrid() children = ret[0]['children'] assert 'osfstorage' == children[0]['provider'] assert public_grandchild._id == children[1]['nodeID'] assert public_grandchild.title == children[1]['name'] assert False == children[1]['permissions']['edit'] assert public_greatgreatgranchild._id == children[1]['children'][1]['nodeID'] assert public_greatgreatgranchild.title == children[1]['children'][1]['name'] assert False == children[1]['children'][1]['permissions']['edit'] assert 'Private Component' not in ret def test_private_components_shown(self): user = UserFactory() public_project = ProjectFactory(creator=user, is_public=True) private_child = NodeFactory(parent=public_project, creator=user, is_public=False) public_grandchild = NodeFactory(parent=private_child, creator=user, is_public=True) serializer = rubeus.NodeFileCollector(node=public_project, auth=Auth(user)) ret = serializer.to_hgrid() children = ret[0]['children'] assert 'osfstorage' == children[0]['provider'] assert private_child._id == children[1]['nodeID'] assert private_child.title == children[1]['name'] assert True == children[1]['permissions']['edit'] assert public_grandchild._id == children[1]['children'][1]['nodeID'] assert public_grandchild.title == children[1]['children'][1]['name'] assert True == children[1]['children'][1]['permissions']['edit'] assert 'Private Component' not in ret # TODO: Make this more reusable across test modules mock_addon = mock.Mock() serialized = { 'addon': 'mockaddon', 'name': 'Mock Addon', 'isAddonRoot': True, 'extra': '', 'permissions': {'view': True, 'edit': True}, 'urls': { 'fetch': '/fetch', 'delete': '/delete' } } mock_addon.config.get_hgrid_data.return_value = [serialized] class TestSerializingNodeWithAddon(OsfTestCase): def setUp(self): super(TestSerializingNodeWithAddon, self).setUp() self.auth = AuthFactory() self.project = ProjectFactory(creator=self.auth.user) self.project.get_addons = mock.Mock() self.project.get_addons.return_value = [mock_addon] self.serializer = rubeus.NodeFileCollector(node=self.project, auth=self.auth) def test_collect_addons(self): ret = self.serializer._collect_addons(self.project) assert_equal(ret, [serialized]) def test_sort_by_name(self): files = [ {'name': 'F.png'}, {'name': 'd.png'}, {'name': 'B.png'}, {'name': 'a.png'}, {'name': 'c.png'}, {'name': 'e.png'}, {'name': 'g.png'}, ] sorted_files = [ {'name': 'a.png'}, {'name': 'B.png'}, {'name': 'c.png'}, {'name': 'd.png'}, {'name': 'e.png'}, {'name': 'F.png'}, {'name': 'g.png'}, ] ret = sort_by_name(files) for index, value in enumerate(ret): assert_equal(value['name'], sorted_files[index]['name']) def test_sort_by_name_none(self): files = None sorted_files = None ret = sort_by_name(files) assert_equal(ret, sorted_files) def test_serialize_node(self): ret = self.serializer._get_nodes(self.project) assert_equal( len(ret['children']), len(self.project.get_addons.return_value) + len(list(self.project.nodes)) ) assert_equal(ret['kind'], rubeus.FOLDER) assert_equal(ret['name'], self.project.title) assert_equal( ret['permissions'], { 'view': True, 'edit': True, } ) assert_equal( ret['urls'], { 'upload': None, 'fetch': None, }, )
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import mock from rally import exceptions as rally_exceptions from rally.plugins.openstack.scenarios.nova import servers from tests.unit import fakes from tests.unit import test NOVA_SERVERS_MODULE = "rally.plugins.openstack.scenarios.nova.servers" NOVA_SERVERS = NOVA_SERVERS_MODULE + ".NovaServers" @ddt.ddt class NovaServersTestCase(test.ScenarioTestCase): def test_boot_rescue_unrescue(self): actions = [{"rescue_unrescue": 5}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._rescue_server = mock.MagicMock() scenario._unrescue_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) self.assertEqual(5, scenario._rescue_server.call_count, "Rescue not called 5 times") self.assertEqual(5, scenario._unrescue_server.call_count, "Unrescue not called 5 times") scenario._rescue_server.assert_has_calls(server_calls) scenario._unrescue_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_stop_start(self): actions = [{"stop_start": 5}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._start_server = mock.MagicMock() scenario._stop_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) self.assertEqual(5, scenario._stop_server.call_count, "Stop not called 5 times") self.assertEqual(5, scenario._start_server.call_count, "Start not called 5 times") scenario._stop_server.assert_has_calls(server_calls) scenario._start_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_multiple_bounce_actions(self): actions = [{"hard_reboot": 5}, {"stop_start": 8}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario._reboot_server = mock.MagicMock() scenario._stop_and_start_server = mock.MagicMock() scenario.generate_random_name = mock.MagicMock(return_value="name") scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) self.assertEqual(5, scenario._reboot_server.call_count, "Reboot not called 5 times") scenario._reboot_server.assert_has_calls(server_calls) server_calls = [] for i in range(8): server_calls.append(mock.call(fake_server)) self.assertEqual(8, scenario._stop_and_start_server.call_count, "Stop/Start not called 8 times") scenario._stop_and_start_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_lock_unlock_and_delete(self): server = fakes.FakeServer() image = fakes.FakeImage() flavor = fakes.FakeFlavor() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.Mock(return_value=server) scenario._lock_server = mock.Mock(side_effect=lambda s: s.lock()) scenario._unlock_server = mock.Mock(side_effect=lambda s: s.unlock()) scenario._delete_server = mock.Mock( side_effect=lambda s, **kwargs: self.assertFalse(getattr(s, "OS-EXT-STS:locked", False))) scenario.boot_lock_unlock_and_delete(image, flavor, fakearg="fakearg") scenario._boot_server.assert_called_once_with(image, flavor, fakearg="fakearg") scenario._lock_server.assert_called_once_with(server) scenario._unlock_server.assert_called_once_with(server) scenario._delete_server.assert_called_once_with(server, force=False) def test_validate_actions(self): actions = [{"hardd_reboot": 6}] scenario = servers.NovaServers(self.context) self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = [{"hard_reboot": "no"}] self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = {"hard_reboot": 6} self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = {"hard_reboot": -1} self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = {"hard_reboot": 0} self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) def _verify_reboot(self, soft=True): actions = [{"soft_reboot" if soft else "hard_reboot": 5}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._reboot_server = mock.MagicMock() scenario._soft_reboot_server = mock.MagicMock() scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario.generate_random_name = mock.MagicMock(return_value="name") scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) if soft: self.assertEqual(5, scenario._soft_reboot_server.call_count, "Reboot not called 5 times") scenario._soft_reboot_server.assert_has_calls(server_calls) else: self.assertEqual(5, scenario._reboot_server.call_count, "Reboot not called 5 times") scenario._reboot_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_soft_reboot(self): self._verify_reboot(soft=True) def test_boot_hard_reboot(self): self._verify_reboot(soft=False) def test_boot_and_delete_server(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario.sleep_between = mock.MagicMock() scenario.boot_and_delete_server("img", 0, 10, 20, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario.sleep_between.assert_called_once_with(10, 20) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_and_delete_multiple_servers(self): scenario = servers.NovaServers(self.context) scenario._boot_servers = mock.Mock() scenario._delete_servers = mock.Mock() scenario.sleep_between = mock.Mock() scenario.boot_and_delete_multiple_servers("img", "flavor", count=15, min_sleep=10, max_sleep=20, fakearg="fakearg") scenario._boot_servers.assert_called_once_with("img", "flavor", 1, instances_amount=15, fakearg="fakearg") scenario.sleep_between.assert_called_once_with(10, 20) scenario._delete_servers.assert_called_once_with( scenario._boot_servers.return_value, force=False) def test_boot_and_list_server(self): scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock() scenario._list_servers = mock.MagicMock() scenario.boot_and_list_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._list_servers.assert_called_once_with(True) def test_suspend_and_resume_server(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._suspend_server = mock.MagicMock() scenario._resume_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.suspend_and_resume_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._suspend_server.assert_called_once_with(fake_server) scenario._resume_server.assert_called_once_with(fake_server) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_pause_and_unpause_server(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._pause_server = mock.MagicMock() scenario._unpause_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.pause_and_unpause_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._pause_server.assert_called_once_with(fake_server) scenario._unpause_server.assert_called_once_with(fake_server) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_shelve_and_unshelve_server(self): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._shelve_server = mock.MagicMock() scenario._unshelve_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.shelve_and_unshelve_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._shelve_server.assert_called_once_with(fake_server) scenario._unshelve_server.assert_called_once_with(fake_server) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_list_servers(self): scenario = servers.NovaServers(self.context) scenario._list_servers = mock.MagicMock() scenario.list_servers(True) scenario._list_servers.assert_called_once_with(True) def test_boot_server_from_volume_and_delete(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario.sleep_between = mock.MagicMock() scenario._delete_server = mock.MagicMock() fake_volume = fakes.FakeVolumeManager().create() fake_volume.id = "volume_id" scenario._create_volume = mock.MagicMock(return_value=fake_volume) scenario.boot_server_from_volume_and_delete("img", 0, 5, 10, 20, fakearg="f") scenario._create_volume.assert_called_once_with(5, imageRef="img") scenario._boot_server.assert_called_once_with( "img", 0, block_device_mapping={"vda": "volume_id:::1"}, fakearg="f") scenario.sleep_between.assert_called_once_with(10, 20) scenario._delete_server.assert_called_once_with(fake_server, force=False) def _prepare_boot(self, nic=None, assert_nic=False): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.generate_random_name = mock.MagicMock(return_value="name") kwargs = {"fakearg": "f"} expected_kwargs = {"fakearg": "f"} assert_nic = nic or assert_nic if nic: kwargs["nics"] = nic if assert_nic: self.clients("nova").networks.create("net-1") expected_kwargs["nics"] = nic or [{"net-id": "net-2"}] return scenario, kwargs, expected_kwargs def _verify_boot_server(self, nic=None, assert_nic=False): scenario, kwargs, expected_kwargs = self._prepare_boot( nic=nic, assert_nic=assert_nic) scenario.boot_server("img", 0, **kwargs) scenario._boot_server.assert_called_once_with( "img", 0, auto_assign_nic=False, **expected_kwargs) def test_boot_server_no_nics(self): self._verify_boot_server(nic=None, assert_nic=False) def test_boot_server_with_nic(self): self._verify_boot_server(nic=[{"net-id": "net-1"}], assert_nic=True) def test_snapshot_server(self): fake_server = object() fake_image = fakes.FakeImageManager()._create() fake_image.id = "image_id" scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._create_image = mock.MagicMock(return_value=fake_image) scenario._delete_server = mock.MagicMock() scenario._delete_image = mock.MagicMock() scenario.snapshot_server("i", 0, fakearg=2) scenario._boot_server.assert_has_calls([ mock.call("i", 0, fakearg=2), mock.call("image_id", 0, fakearg=2)]) scenario._create_image.assert_called_once_with(fake_server) scenario._delete_server.assert_has_calls([ mock.call(fake_server, force=False), mock.call(fake_server, force=False)]) scenario._delete_image.assert_called_once_with(fake_image) def _test_resize(self, confirm=False): fake_server = object() fake_image = fakes.FakeImageManager()._create() fake_image.id = "image_id" flavor = mock.MagicMock() to_flavor = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._resize_confirm = mock.MagicMock() scenario._resize_revert = mock.MagicMock() scenario._resize = mock.MagicMock() scenario._delete_server = mock.MagicMock() kwargs = {"confirm": confirm} scenario.resize_server(fake_image, flavor, to_flavor, **kwargs) scenario._resize.assert_called_once_with(fake_server, to_flavor) if confirm: scenario._resize_confirm.assert_called_once_with(fake_server) else: scenario._resize_revert.assert_called_once_with(fake_server) def test_resize_with_confirm(self): self._test_resize(confirm=True) def test_resize_with_revert(self): self._test_resize(confirm=False) @ddt.data({"confirm": True, "do_delete": True}, {"confirm": False, "do_delete": True}) @ddt.unpack def test_boot_server_attach_created_volume_and_resize(self, confirm=False, do_delete=False): fake_volume = mock.MagicMock() fake_server = mock.MagicMock() flavor = mock.MagicMock() to_flavor = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._create_volume = mock.MagicMock(return_value=fake_volume) scenario._attach_volume = mock.MagicMock() scenario._resize_confirm = mock.MagicMock() scenario._resize_revert = mock.MagicMock() scenario._resize = mock.MagicMock() scenario._detach_volume = mock.MagicMock() scenario._delete_volume = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.sleep_between = mock.MagicMock() volume_size = 10 scenario.boot_server_attach_created_volume_and_resize( "img", flavor, to_flavor, volume_size, min_sleep=10, max_sleep=20, confirm=confirm, do_delete=do_delete) scenario._boot_server.assert_called_once_with("img", flavor) scenario._create_volume.assert_called_once_with(volume_size) scenario._attach_volume.assert_called_once_with(fake_server, fake_volume) scenario._detach_volume.assert_called_once_with(fake_server, fake_volume) scenario.sleep_between.assert_called_once_with(10, 20) scenario._resize.assert_called_once_with(fake_server, to_flavor) if confirm: scenario._resize_confirm.assert_called_once_with(fake_server) else: scenario._resize_revert.assert_called_once_with(fake_server) if do_delete: scenario._detach_volume.assert_called_once_with(fake_server, fake_volume) scenario._delete_volume.assert_called_once_with(fake_volume) scenario._delete_server.assert_called_once_with(fake_server, force=False) @ddt.data({"confirm": True, "do_delete": True}, {"confirm": False, "do_delete": True}) @ddt.unpack def test_boot_server_from_volume_and_resize(self, confirm=False, do_delete=False): fake_server = object() flavor = mock.MagicMock() to_flavor = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._resize_confirm = mock.MagicMock() scenario._resize_revert = mock.MagicMock() scenario._resize = mock.MagicMock() scenario.sleep_between = mock.MagicMock() scenario._delete_server = mock.MagicMock() fake_volume = fakes.FakeVolumeManager().create() fake_volume.id = "volume_id" scenario._create_volume = mock.MagicMock(return_value=fake_volume) volume_size = 10 scenario.boot_server_from_volume_and_resize( "img", flavor, to_flavor, volume_size, min_sleep=10, max_sleep=20, confirm=confirm, do_delete=do_delete) scenario._create_volume.assert_called_once_with(10, imageRef="img") scenario._boot_server.assert_called_once_with( "img", flavor, block_device_mapping={"vda": "volume_id:::1"}) scenario.sleep_between.assert_called_once_with(10, 20) scenario._resize.assert_called_once_with(fake_server, to_flavor) if confirm: scenario._resize_confirm.assert_called_once_with(fake_server) else: scenario._resize_revert.assert_called_once_with(fake_server) if do_delete: scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_and_live_migrate_server(self): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.sleep_between = mock.MagicMock() scenario._find_host_to_migrate = mock.MagicMock( return_value="host_name") scenario._live_migrate = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.boot_and_live_migrate_server("img", 0, min_sleep=10, max_sleep=20, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario.sleep_between.assert_called_once_with(10, 20) scenario._find_host_to_migrate.assert_called_once_with(fake_server) scenario._live_migrate.assert_called_once_with(fake_server, "host_name", False, False) scenario._delete_server.assert_called_once_with(fake_server) def test_boot_server_from_volume_and_live_migrate(self): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.sleep_between = mock.MagicMock() scenario._find_host_to_migrate = mock.MagicMock( return_value="host_name") scenario._live_migrate = mock.MagicMock() scenario._delete_server = mock.MagicMock() fake_volume = fakes.FakeVolumeManager().create() fake_volume.id = "volume_id" scenario._create_volume = mock.MagicMock(return_value=fake_volume) scenario.boot_server_from_volume_and_live_migrate("img", 0, 5, min_sleep=10, max_sleep=20, fakearg="f") scenario._create_volume.assert_called_once_with(5, imageRef="img") scenario._boot_server.assert_called_once_with( "img", 0, block_device_mapping={"vda": "volume_id:::1"}, fakearg="f") scenario.sleep_between.assert_called_once_with(10, 20) scenario._find_host_to_migrate.assert_called_once_with(fake_server) scenario._live_migrate.assert_called_once_with(fake_server, "host_name", False, False) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_server_attach_created_volume_and_live_migrate(self): fake_volume = mock.MagicMock() fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._attach_volume = mock.MagicMock() scenario._detach_volume = mock.MagicMock() scenario.sleep_between = mock.MagicMock() scenario._find_host_to_migrate = mock.MagicMock( return_value="host_name") scenario._live_migrate = mock.MagicMock() scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario._create_volume = mock.MagicMock(return_value=fake_volume) scenario._delete_volume = mock.MagicMock() image = "img" flavor = "flavor" size = 5 boot_kwargs = {"some_var": "asd"} scenario.boot_server_attach_created_volume_and_live_migrate( image, flavor, size, min_sleep=10, max_sleep=20, boot_server_kwargs=boot_kwargs) scenario._boot_server.assert_called_once_with(image, flavor, **boot_kwargs) scenario._create_volume.assert_called_once_with(size) scenario._attach_volume.assert_called_once_with(fake_server, fake_volume) scenario._detach_volume.assert_called_once_with(fake_server, fake_volume) scenario.sleep_between.assert_called_once_with(10, 20) scenario._live_migrate.assert_called_once_with(fake_server, "host_name", False, False) scenario._delete_volume.assert_called_once_with(fake_volume) scenario._delete_server.assert_called_once_with(fake_server) def _test_boot_and_migrate_server(self, confirm=False): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._stop_server = mock.MagicMock() scenario._migrate = mock.MagicMock() scenario._resize_confirm = mock.MagicMock() scenario._resize_revert = mock.MagicMock() scenario._delete_server = mock.MagicMock() kwargs = {"confirm": confirm} scenario.boot_and_migrate_server("img", 0, fakearg="fakearg", **kwargs) scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg", confirm=confirm) scenario._stop_server.assert_called_once_with(fake_server) scenario._migrate.assert_called_once_with(fake_server) if confirm: scenario._resize_confirm.assert_called_once_with(fake_server, status="SHUTOFF") else: scenario._resize_revert.assert_called_once_with(fake_server, status="SHUTOFF") scenario._delete_server.assert_called_once_with(fake_server) def test_boot_and_migrate_server_with_confirm(self): self._test_boot_and_migrate_server(confirm=True) def test_boot_and_migrate_server_with_revert(self): self._test_boot_and_migrate_server(confirm=False) def test_boot_and_rebuild_server(self): scenario = servers.NovaServers(self.context) scenario._boot_server = mock.Mock() scenario._rebuild_server = mock.Mock() scenario._delete_server = mock.Mock() from_image = "img1" to_image = "img2" flavor = "flavor" scenario.boot_and_rebuild_server(from_image, to_image, flavor, fakearg="fakearg") scenario._boot_server.assert_called_once_with(from_image, flavor, fakearg="fakearg") server = scenario._boot_server.return_value scenario._rebuild_server.assert_called_once_with(server, to_image) scenario._delete_server.assert_called_once_with(server) def test_boot_and_show_server(self): server = fakes.FakeServer() image = fakes.FakeImage() flavor = fakes.FakeFlavor() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=server) scenario._show_server = mock.MagicMock() scenario.boot_and_show_server(image, flavor, fakearg="fakearg") scenario._boot_server.assert_called_once_with(image, flavor, fakearg="fakearg") scenario._show_server.assert_called_once_with(server) @ddt.data({"length": None}, {"length": 10}) @ddt.unpack def test_boot_and_get_console_server(self, length): server = fakes.FakeServer() image = fakes.FakeImage() flavor = fakes.FakeFlavor() kwargs = {"fakearg": "fakearg"} scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=server) scenario._get_server_console_output = mock.MagicMock() scenario.boot_and_get_console_output(image, flavor, length, **kwargs) scenario._boot_server.assert_called_once_with(image, flavor, **kwargs) scenario._get_server_console_output.assert_called_once_with(server, length) @mock.patch(NOVA_SERVERS_MODULE + ".network_wrapper.wrap") def test_boot_and_associate_floating_ip(self, mock_wrap): scenario = servers.NovaServers(self.context) server = mock.Mock() scenario._boot_server = mock.Mock(return_value=server) scenario._associate_floating_ip = mock.Mock() image = "img" flavor = "flavor" scenario.boot_and_associate_floating_ip(image, flavor, fakearg="fakearg") scenario._boot_server.assert_called_once_with(image, flavor, fakearg="fakearg") net_wrap = mock_wrap.return_value net_wrap.create_floating_ip.assert_called_once_with( tenant_id=server.tenant_id) scenario._associate_floating_ip.assert_called_once_with( server, net_wrap.create_floating_ip.return_value["ip"])
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function from argparse import Namespace import tensorflow as tf import numpy as np from niftynet.evaluation.pairwise_measures import PairwiseMeasures from niftynet.utilities.util_common import MorphologyOps from niftynet.evaluation.segmentation_evaluator import SegmentationEvaluator import niftynet.evaluation.segmentation_evaluations as segmentation_evaluations import niftynet.evaluation.regression_evaluations as regression_evaluations from niftynet.evaluation.classification_evaluator import ClassificationEvaluator from niftynet.evaluation.regression_evaluator import RegressionEvaluator TEST_CASES = {0: {'seg_img': np.array([1, 0, 0, 0]), 'ref_img': np.array([1, 0, 0, 0])}, 1: {'seg_img': np.array([1, 0, 1, 0]), 'ref_img': np.array([1, 0, 0, 0])}, 2: {'seg_img': np.array([3, 2, 0, 0]), 'ref_img': np.array([1, 2, 0, 0])}, 3: {'seg_img': np.array([1, 0, 0.5, 0]), 'ref_img': np.array([1, 0, 0, 0])}, 4: {'seg_img': np.reshape([1, 1, 1, 0, 0, 0, 0, 0],[2,2,2,1,1]), 'ref_img': np.reshape([0, 0, 0, 0, 0, 0, 1, 1],[2,2,2,1,1])}, } class BinaryCheckTest(np.testing.TestCase): def test_binary_check_for_labels(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[2]['seg_img'], ref_img=TEST_CASES[2]['ref_img']) self.assertRaises(ValueError, pairwise_measures.check_binary) def test_binary_check_for_probabilities(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[3]['seg_img'], ref_img=TEST_CASES[3]['ref_img']) self.assertRaises(ValueError, pairwise_measures.check_binary) class PairwiseTests(np.testing.TestCase): def test_dice_score(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[0]['seg_img'], ref_img=TEST_CASES[0]['ref_img']) self.assertEqual(pairwise_measures.dice_score(), 1.0) def test_true_positive(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.tp(), 1.0) def test_faulty_inputs(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[3]['seg_img'], ref_img=TEST_CASES[3]['ref_img']) self.assertRaises(ValueError, pairwise_measures.tp) def test_true_negative(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.tn(), 2.) def test_n_negative(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.n_neg_ref(), 3.) self.assertEqual(pairwise_measures.n_neg_seg(), 2.) def test_union(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.n_union(), 2.) def test_intersection(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.n_intersection(), 1.) def test_sensitivity(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.sensitivity(), 1.) def test_specificity(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.specificity(), 2. / 3) def test_accuracy(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.accuracy(), 3. / 4) def test_false_positive_rate(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.false_positive_rate(), 1. / 3) def test_positive_predictive_value(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.positive_predictive_values(), 1. / 2) def test_negative_predictive_value(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.negative_predictive_values(), 1.) def test_intersection_over_union(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.intersection_over_union(), 1. / 2) def test_jaccard(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.jaccard(), 1. / 2) def test_informedness(self): # true positive rate - false positive rate pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertAlmostEqual(pairwise_measures.informedness(), 2. / 3) def test_markedness(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img']) self.assertEqual(pairwise_measures.markedness(), 1. / 2) def test_centre_of_mass(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img'], pixdim=[2]) self.assertListEqual(list(pairwise_measures.com_ref()), [0.0]) self.assertListEqual(list(pairwise_measures.com_seg()), [1.0]) self.assertEqual(pairwise_measures.com_dist(), 2.) def test_vol_diff(self): pairwise_measures = PairwiseMeasures(seg_img=TEST_CASES[1]['seg_img'], ref_img=TEST_CASES[1]['ref_img'], pixdim=[2]) self.assertEqual(pairwise_measures.vol_diff(), 1.) class MorphologyTests(np.testing.TestCase): def test_2d_offset(self): test_img = np.concatenate([np.zeros([3, 3]), np.ones([3, 3])]) # expected border -- everywhere the test_img is 1, except the centre of it expected_border = np.zeros([6, 3]) expected_border[3:][:] = 1 expected_border[4, 1] = 0 self.assertRaises(AssertionError, MorphologyOps, test_img, 8) #calculated_border = MorphologyOps(test_img, 8).border_map() #self.assertTrue(np.array_equal(calculated_border, expected_border)) def test_3d_offset(self): test_img = np.zeros([10, 10, 10]) test_img[5, 5, 5] = 1 # border is the same as the test image -- just the one positive voxel calculated_border = MorphologyOps(test_img, 8).border_map() self.assertTrue(np.array_equal(test_img, calculated_border)) def test_1d_error(self): test_img = np.zeros([1]) self.assertRaises(AssertionError, MorphologyOps, test_img, 8) #self.assertRaises(ValueError, MorphologyOps(test_img, 8).border_map) class RegressionEvaluationTests(np.testing.TestCase): def build_data(self): ref = np.reshape([1., .2, 2., 1., .9, .2, 3., 2.], [2, 2, 2, 1, 1]) out = np.reshape([1., .3, 2., 1., .9, .2, 3., 2.], [2, 2, 2, 1, 1]) return ref, out def test_mse(self): rd = regression_evaluations.mse(None, None, None).metric( *self.build_data()) self.assertAlmostEqual(rd, 0.00125, 3) def test_rmse(self): rd = regression_evaluations.rmse(None, None, None).metric( *self.build_data()) self.assertAlmostEqual(rd, 0.03535, 3) def test_mae(self): rd = regression_evaluations.mae(None, None, None).metric( *self.build_data()) self.assertAlmostEqual(rd, 0.0125, 3) class SegmentationEvaluationTests(np.testing.TestCase): def metric(self, cls, case): return cls(None, None, None).metric_from_binarized( seg=TEST_CASES[case]['seg_img'], ref=TEST_CASES[case]['ref_img']) def test_average_distance(self): self.assertAlmostEqual(self.metric( segmentation_evaluations.average_distance, 4), 1.2485,3) def test_hausdorff_distance(self): self.assertAlmostEqual(self.metric( segmentation_evaluations.hausdorff_distance, 4), 1.414,3) def test_hausdorff95_distance(self): self.assertAlmostEqual(self.metric( segmentation_evaluations.hausdorff95_distance, 4), 1.414,3) def test_dice_score(self): self.assertEqual(self.metric(segmentation_evaluations.dice, 0), 1.0) def test_true_positive(self): self.assertEqual(self.metric(segmentation_evaluations.tp, 1), 1.0) def test_true_negative(self): self.assertEqual(self.metric(segmentation_evaluations.tn, 1), 2.) def test_n_negative(self): self.assertEqual(self.metric( segmentation_evaluations.n_neg_ref, 1), 3.0) self.assertEqual(self.metric( segmentation_evaluations.n_neg_seg, 1), 2.0) def test_union(self): self.assertEqual(self.metric(segmentation_evaluations.n_union, 1), 2.0) def test_intersection(self): self.assertEqual(self.metric( segmentation_evaluations.n_intersection,1), 1.0) def test_sensitivity(self): self.assertEqual(self.metric( segmentation_evaluations.sensitivity, 1), 1.0) def test_specificity(self): self.assertEqual(self.metric( segmentation_evaluations.specificity, 1), 2. / 3) def test_accuracy(self): self.assertEqual(self.metric( segmentation_evaluations.accuracy, 1), 3. / 4) def test_false_positive_rate(self): self.assertEqual(self.metric( segmentation_evaluations.false_positive_rate, 1), 1. / 3) def test_positive_predictive_value(self): self.assertEqual(self.metric( segmentation_evaluations.positive_predictive_values, 1), 1. / 2) def test_negative_predictive_value(self): self.assertEqual(self.metric( segmentation_evaluations.negative_predictive_values, 1), 1.0) def test_intersection_over_union(self): self.assertEqual(self.metric( segmentation_evaluations.intersection_over_union, 1), 1. / 2) def test_jaccard(self): self.assertEqual(self.metric( segmentation_evaluations.jaccard, 1), 1. / 2) def test_informedness(self): self.assertAlmostEqual(self.metric( segmentation_evaluations.informedness, 1), 2. / 3) def test_markedness(self): self.assertEqual(self.metric( segmentation_evaluations.markedness,1), 1. /2) class ClassificationEvaluationTests(np.testing.TestCase): def data1(self): raw_data = [[0,.12],[0,.24],[1,.36], [0,.45], [0,.61],[1,.28],[1,.99], [1,.89]] formatted_data = [{'label':np.reshape(datum[0],[1,1,1,1,1]), 'inferred':np.reshape([1-datum[1],datum[1]],[1,1,1,1,2])} for datum in raw_data] return formatted_data def data2(self): raw_data = [[0,0],[0,0],[1,0],[0,0], [0,1],[1,0],[1,1],[1,1]] formatted_data = [{'label':np.reshape(datum[0],[1,1,1,1,1]), 'inferred':np.reshape([datum[1]],[1,1,1,1,1])} for datum in raw_data] return formatted_data def generator(self, data): interp_orders = {'label':0,'inferred':-1} for idx, datum in enumerate(data): yield ('test'+str(idx), datum,interp_orders) def evaluator(self, eval_str, output_prob=True): class NS(object): def __init__(self, dict): self.__dict__.update(dict) classification_param=NS({'num_classes':2, 'output_prob':output_prob}) eval_param=NS({'evaluations':eval_str}) return ClassificationEvaluator(None, classification_param, eval_param) def test_accuracy_output_prob(self): data = self.data1() evl = self.evaluator('niftynet.evaluation.classification_evaluations.accuracy') result_dict = evl.evaluate_from_generator(self.generator(data)) self.assertIn((None,), result_dict) by_threshold = result_dict[(None,)].to_dict('index') self.assertEqual(by_threshold, {0: {'accuracy': 0.625}}) def test_accuracy_output_label(self): data = self.data2() evl = self.evaluator('niftynet.evaluation.classification_evaluations.accuracy', False) result_dict = evl.evaluate_from_generator(self.generator(data)) self.assertIn((None,), result_dict) by_threshold = result_dict[(None,)].to_dict('index') self.assertEqual(by_threshold, {0: {'accuracy': 0.625}}) def test_contrib_roc(self): data = self.data1() evl = self.evaluator('niftynet.contrib.evaluation.classification_evaluations.roc') result_dict = evl.evaluate_from_generator(self.generator(data)) self.assertIn(('thresholds',), result_dict) by_threshold = result_dict[('thresholds',)].to_dict('index') get_key = lambda x: [k for k in by_threshold.keys() if np.abs(k-x)<.01][0] sample = by_threshold[get_key(0.444)] self.assertEqual(sample['fp'],2) self.assertEqual(sample['spec'],0.5) self.assertEqual(sample['sens'],0.5) # FPF: 0.0000 0.0000 0.3333 0.3333 1.0000 # TPF: 0.0000 0.6667 0.6667 1.0000 1.0000 # #AREA UNDER ROC CURVE: # Area under fitted curve (Az) = 0.9043 # Estimated std. error = 0.1260 # Trapezoidal (Wilcoxon) area = 0.8889 def test_contrib_roc_auc(self): data = self.data1() evl = self.evaluator('niftynet.contrib.evaluation.classification_evaluations.roc_auc') result_dict = evl.evaluate_from_generator(self.generator(data)) self.assertIn((None,), result_dict) print(result_dict[(None,)].to_dict('index')) self.assertEqual(result_dict[(None,)].to_dict('index'), {0: {'roc_auc': 0.71875}}) class SegmentationEvaluatorTests(np.testing.TestCase): """ Tests that evaluator - evaluations integration works """ class ReaderStub(): def __init__(self): self.count = 0 sz=[2,2,2,1,1] self.data=((0, {'label': np.reshape([1, 0, 0, 0, 1, 0, 0, 0], sz), 'inferred': np.reshape([1, 0, 0, 0, 1, 0, 0, 0], sz)}, None), (1, {'label': np.reshape([1, 1, 0, 0, 1, 0, 0, 0], sz), 'inferred': np.reshape([1, 0, 0, 0, 1, 0, 0, 0], sz)}, None), (-1, None, None)) def __call__(self, shuffle): return_value = self.data[self.count] self.count += 1 return return_value def get_subject_id(self, image_id): return ['foo','bar'][image_id] def test_segmentation_evaluator(self): app_param = Namespace(evaluation_units='label,cc',output_prob=False, num_classes=2) eval_param = Namespace(evaluations='Dice,Jaccard,average_distance') evalu = SegmentationEvaluator(SegmentationEvaluatorTests.ReaderStub(), app_param, eval_param) result_dict = evalu.evaluate() self.assertIn(('subject_id', 'cc_id'), result_dict) self.assertIn(('subject_id', 'label'), result_dict) group_cc = result_dict[('subject_id', 'cc_id')] group_l = result_dict[('subject_id', 'label')] self.assertIn('jaccard', list(group_l.columns)) self.assertIn('dice', list(group_l.columns)) self.assertIn('jaccard', list(group_cc.columns)) self.assertIn('dice', list(group_cc.columns)) self.assertIn('average_distance', list(group_cc.columns)) self.assertIn(('foo','r1_s1'), list(group_cc.index)) self.assertIn(('bar','r1_s1'), list(group_cc.index)) self.assertIn(('foo',1), list(group_l.index)) self.assertIn(('bar',1), list(group_l.index)) class RegressionEvaluatorTests(np.testing.TestCase): """ Tests that evaluator - evaluations integration works """ class ReaderStub(): def __init__(self): self.count = 0 sz = [2, 2, 2, 1, 1] self.data = ((0, {'output': np.reshape([1, 0, 0, 0, 1, 0, 0, 0], sz), 'inferred': np.reshape([1, 0, 0, 0, 1, 0, 0, 0], sz)}, None), (1, {'output': np.reshape([1, 1, 0, 0, 1, 0, 0, 0], sz), 'inferred': np.reshape([1, 0, 0, 0, 1, 0, 0, 0], sz)}, None), (-1, None, None)) def __call__(self, shuffle): return_value = self.data[self.count] self.count += 1 return return_value def get_subject_id(self, image_id): return ['foo', 'bar'][image_id] def test_regression_evaluator(self): app_param = Namespace() eval_param = Namespace(evaluations='rmse,mse') evalu = RegressionEvaluator(RegressionEvaluatorTests.ReaderStub(), app_param, eval_param) result_dict = evalu.evaluate() self.assertIn(('subject_id',), result_dict) group = result_dict[('subject_id',)] self.assertEqual(('subject_id',), group.index.names) self.assertIn('mse', list(group.columns)) self.assertIn('rmse', list(group.columns)) self.assertIn('foo', list(group.index)) self.assertIn('bar', list(group.index)) if __name__ == '__main__': tf.test.main()
"""extra utility functions""" import os import sys import logging import random import operator import numpy as np import math import tensorflow.keras import tensorflow as tf from tensorflow.python.keras.layers import Dropout from tensorflow.keras.callbacks import Callback from sklearn.metrics import f1_score, precision_score, recall_score import socket import datetime import collections from collections import OrderedDict def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def log(args): """Create logging directory structure according to args.""" if hasattr(args, "checkpoint") and args.checkpoint: return _log_from_checkpoint(args) else: stamp = datetime.date.strftime(datetime.datetime.now(), "%Y.%m.%d-%Hh%Mm%Ss") + "_{}".format(socket.gethostname()) full_logdir = os.path.join(args.log_dir, args.log_name, stamp) os.makedirs(full_logdir, exist_ok=True) args.log_dir = "{}:{}".format(socket.gethostname(), full_logdir) _log_args(full_logdir, args) return full_logdir, 0 def _log_from_checkpoint(args): """Infer logging directory from checkpoint file.""" int_dir, checkpoint_name = os.path.split(args.checkpoint) logdir = os.path.dirname(int_dir) checkpoint_num = int(checkpoint_name.split('_')[1]) _log_args(logdir, args, modified_iter=checkpoint_num) return logdir, checkpoint_num def _log_args(logdir, args, modified_iter=0): """Write log of current arguments to text.""" keys = sorted(arg for arg in dir(args) if not arg.startswith("_")) args_dict = {key: getattr(args, key) for key in keys} with open(os.path.join(logdir, "config.log"), "a") as f: f.write("Values at iteration {}\n".format(modified_iter)) for k in keys: s = ": ".join([k,str(args_dict[k])]) + "\n" f.write(s) def tokenize_annotations(annotations): """Function to tokenize & convert a list of genes GO term annotations to their equivalent list of GO term annotation ids""" go_terms = [] for annotation in annotations: go_terms.extend(annotation.split()) go_terms_freq = OrderedDict({k: v for k, v in sorted(collections.Counter(go_terms).items(), key=lambda item: item[1], reverse=True)}) # example output: OrderedDict([('GO0006810', 804), ('GO0006351', 512), ('GO0006355', 497), ..., ('GO0006351', 56), ('GO0006873', 13), ('GO0034427', 2)]) go_term_indeces = {go_term:indx+1 for indx, go_term in enumerate(go_terms_freq)} # each index represents a one-hot vector for its assiciate GO term annotations_to_annotation_ids = [] for annotation in annotations: annotations_to_annotation_ids.append([go_term_indeces[go_term] for go_term in annotation.split()]) return annotations_to_annotation_ids, go_term_indeces def pad_annotations(annotations, maxlen): """Function to zero-pad a lists of annotations (the ids), each annotation belongs to one gene""" return np.array([[0]*(maxlen-len(annotation))+annotation for annotation in annotations]) def exp_decay(epoch, initial_lrate): """Function to apply exponential decay to the initial learning rate""" k = 0.1 lrate = initial_lrate * math.exp(-k*epoch) return lrate def save_gene_pairs(logdir, model_id, train_pair, test_pair, train_gene, test_gene): """Function to store the gene pairs used for training and testing in different models""" path="{}/gene_pairs/model_{}".format(logdir, model_id+1) os.makedirs(path, exist_ok=True) with open("{}/train_pair.txt".format(path), "w") as fw: fw.write("\n".join(train_pair)) with open("{}/test_pair.txt".format(path), "w") as fw: fw.write("\n".join(test_pair)) with open("{}/train_gene.txt".format(path), "w") as fw: fw.write("\n".join(train_gene)) with open("{}/test_gene.txt".format(path), "w") as fw: fw.write("\n".join(test_gene)) def save_model(path, models, epoch, verbose=True): """Function to save deepSimDEF models (checkpointing)""" path = "{}/model_checkpoints/epoch_{}".format(path, epoch) os.makedirs(path, exist_ok=True) # create the directory if it does not exist for ind in range(len(models)): if verbose: print("Saving model {} to disk ...".format(ind+1)) model_json = models[ind].to_json() with open("{}/model_{}.json".format(path, ind+1, epoch), "w") as json_file: json_file.write(model_json) models[ind].save_weights("{}/model_{}.h5".format(path, ind+1, epoch)) if verbose: print("The model and its weights are saved!!") def save_embeddings(path, models, go_term_indeces, sub_ontology_interested, embedding_save, epoch, verbose=True): """Function to save deepSimDEF updated embeddings""" path = "{}/model_embeddings_updated/epoch_{}".format(path, epoch) for ind in range(len(models)): names = [weight.name for layer in models[ind].layers for weight in layer.weights] weights = models[ind].get_weights() for sbo in sub_ontology_interested: sbo_path = "{}/{}".format(path, sbo) os.makedirs(sbo_path, exist_ok=True) # create the directory if it does not exist for name, weight in zip(names, weights): if "embedding_{}_{}".format(sbo, ind) in name: embeddings=weight go_ids = [i for i, _ in sorted(go_term_indeces[sbo].items(), key=operator.itemgetter(1))] # list of all GO terms in this particular ontology, sorted by the numbers of their annotations with open("{}/{}_Model_{}.emb".format(sbo_path, embedding_save[sbo], ind+1), "w") as file_writer: for i in range(len(go_ids)): file_writer.write((go_ids[i] + " ").replace("\r", "\\r")) file_writer.write(" ".join([str(j) for j in embeddings[i+1]])+"\n") if verbose: print("The GO term embeddings for model {} are Saved!!".format(ind+1)) class DropAnnotation(Dropout): """Timestep Dropout. This version performs the same function as Dropout, however it drops entire timesteps (e.g., words embeddings in NLP tasks) instead of individual elements (features). # Arguments rate: float between 0 and 1. Fraction of the timesteps to drop. # Input shape 3D tensor with shape: `(samples, timesteps, channels)` # Output shape Same as input # References - A Theoretically Grounded Application of Dropout in Recurrent Neural Networks (https://arxiv.org/pdf/1512.05287) """ def __init__(self, rate, **kwargs): super(DropAnnotation, self).__init__(rate, **kwargs) self.input_spec = InputSpec(ndim=3) def _get_noise_shape(self, inputs): input_shape = K.shape(inputs) noise_shape = (input_shape[0], input_shape[1], 1) return noise_shape def partial_shuffle(array, percent=0.0): """Function for partial shuffling of (annotations of) ground truth (for 'Negative Control' experiments)""" # which characters are to be shuffled: idx_todo = random.sample(range(len(array)), int(len(array) * percent)) # what are the new positions of these to-be-shuffled characters: idx_target = idx_todo[:] random.shuffle(idx_target) # map all "normal" character positions {0:0, 1:1, 2:2, ...} mapper = dict((i, i) for i in range(len(array))) # update with all shuffles in the string: {old_pos:new_pos, old_pos:new_pos, ...} mapper.update(zip(idx_todo, idx_target)) # use mapper to modify the string: return [array[mapper[i]] for i in range(len(array))] def cal_iter_time(former_iteration_endpoint, e, args, tz): """Calculating 'Computation Time' for this round of iteration""" current_iteration_endpoint = datetime.datetime.now(tz) current_iteration_elapsed = str(current_iteration_endpoint - former_iteration_endpoint).split(".")[0] expected_running_time_left = str((current_iteration_endpoint - former_iteration_endpoint)*(args.nb_epoch-(e+1))).split(".")[0] temp = current_iteration_elapsed.split(":") if int(temp[0])==0 and int(temp[1])==0: current_iteration_elapsed = temp[2] elif int(temp[0])==0: current_iteration_elapsed = temp[1]+":"+temp[2] former_iteration_endpoint = current_iteration_endpoint print("This epoch took {} seconds to run; the expected running time left to go through all epochs is {}!\n".format( current_iteration_elapsed, expected_running_time_left)) return former_iteration_endpoint, current_iteration_elapsed def log_model_result_for_sequence_homology(epoch, model, lr, best_pearson, progress_pearson, pearson, best_spearman, progress_spearman, spearman, logdir): """logging the results of each model (within each epoch)""" with open('{}/result.log'.format(logdir), 'a') as fw: fw.write('| epoch {:3d} | model {:2d} | lr {:.5f} ' '| best-pearson {:.4f} | progress-prs {:+.4f} | pearson {:.4f} ' '| best-spearman {:.4f} | progress-spr {:+.4f} | spearman {:.4f}\n'.format( epoch, model, lr, best_pearson, progress_pearson, pearson, best_spearman, progress_spearman, spearman)) def log_epoch_result_for_sequence_homology(epoch, epochs, time, best_pearson, pearson, best_epoch_pearson, best_spearman, spearman, best_epoch_spearman, logdir): """logging the results of the given epoch (aggregated across all models)""" with open('{}/result.log'.format(logdir), 'a') as fw: fw.write("-"*161+"\n") fw.write("| epoch result {:3d}/{} | time: {}s | best-pearson {:.4f} | pearson {:.4f} | best epoch prs {:3d} | best-spearman {:.4f} | spearman {:.4f} | best epoch spr {:3d}\n".format( epoch, epochs, time, best_pearson, pearson, best_epoch_pearson, best_spearman, spearman, best_epoch_spearman)) fw.write("-"*161+"\n") def log_model_result_for_gene_expression(epoch, model, lr, best_pearson, progress_pearson, pearson, best_spearman, progress_spearman, spearman, logdir): """logging the results of each model (within each epoch)""" with open('{}/result.log'.format(logdir), 'a') as fw: fw.write('| epoch {:3d} | model {:2d} | lr {:.5f} ' '| best-pearson {:.4f} | progress-prs {:+.4f} | pearson {:.4f} ' '| best-spearman {:.4f} | progress-spr {:+.4f} | spearman {:.4f}\n'.format( epoch, model, lr, best_pearson, progress_pearson, pearson, best_spearman, progress_spearman, spearman)) def log_epoch_result_for_gene_expression(epoch, epochs, time, best_pearson, pearson, best_epoch_pearson, best_spearman, spearman, best_epoch_spearman, logdir): """logging the results of the given epoch (aggregated across all models)""" with open('{}/result.log'.format(logdir), 'a') as fw: fw.write("-"*161+"\n") fw.write("| epoch result {:3d}/{} | time: {}s | best-pearson {:.4f} | pearson {:.4f} | best epoch prs {:3d} | best-spearman {:.4f} | spearman {:.4f} | best epoch spr {:3d}\n".format( epoch, epochs, time, best_pearson, pearson, best_epoch_pearson, best_spearman, spearman, best_epoch_spearman)) fw.write("-"*161+"\n") def log_model_result_for_ppi(epoch, model, lr, best_f1_measure, progress_f1_measure, current_f1_measure, logdir): """logging the results of each model (within each epoch)""" with open('{}/result.log'.format(logdir), 'a') as fw: fw.write('| epoch {:3d} | model {:2d} | lr {:.5f} ' '| best-F1score {:.2f} | progress-F1score {:+.2f} | F1score {:.2f}\n'.format( epoch, model, lr, best_f1_measure, progress_f1_measure, current_f1_measure)) def log_epoch_result_for_ppi(epoch, epochs, time, best_cv_f1_measure, f1_res, best_epoch, logdir): """logging the results of the given epoch (aggregated across all models)""" with open('{}/result.log'.format(logdir), 'a') as fw: fw.write("-"*97+"\n") fw.write("| epoch result {:3d}/{} | time: {}s | best-F1score {:.2f} | F1score {:.2f} | best epoch {:3d}\n".format( epoch, epochs, time, best_cv_f1_measure, f1_res, best_epoch)) fw.write("-"*97+"\n") class F1Score(Callback): """F1Score should be computed across all data (not in single batches defined in metrics in model.compile)""" def __init__(self, validation_data=None): super(F1Score, self).__init__() self.validation_data = validation_data def on_train_begin(self, logs={}): self.val_f1s = [] self.val_recalls = [] self.val_precisions = [] def on_epoch_end(self, epoch, logs={}): val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round() val_targ = self.validation_data[1] _val_f1 = f1_score(val_targ, val_predict) _val_recall = recall_score(val_targ, val_predict) _val_precision = precision_score(val_targ, val_predict) self.val_f1s.append(_val_f1) self.val_recalls.append(_val_recall) self.val_precisions.append(_val_precision) print(" val_f1: {:.5f}".format(_val_f1)) def extract_annotation_1st_form(sub_ontology_all, gene_annotations_dir, include_electronic_annotation=False, verbose=True): """Function to extract annotation information from gene-annotation files (for every subontolgy)""" max_ann_len = {} # maximum annotation length (for every subontolgy) max_ann_len_indx = {} # index of the gene with maximum annotation length (for every subontolgy) gene_indeces = {} # indeces of every genes (for every subontolgy) gene_annotations = {} # genes annotations; ie. their one-hot vector ids (for every subontolgy) go_term_indeces = {} # indeces of GO terms (for every subontolgy); assigned based on their higher annotation frequencies to the genes for sbo in sub_ontology_all: gene_indeces[sbo] = {} max_ann_len[sbo] = 0 without_iea_genes = [] # first, for our experiments genes should have at least one IEA- annotation with open("{}/gene_protein_GO_terms_without_IEA.{}".format(gene_annotations_dir, sbo)) as fr: for gene in [line.split()[0] for line in fr.readlines()]: without_iea_genes.append(gene) if include_electronic_annotation: file_reader = open("{}/gene_protein_GO_terms_with_IEA.{}".format(gene_annotations_dir, sbo)) else: file_reader = open("{}/gene_protein_GO_terms_without_IEA.{}".format(gene_annotations_dir, sbo)) index_counter = 1 annotations = [] for line in file_reader: #values = line.rstrip().replace(':', '').split() values = line.rstrip().split() if values[0] not in without_iea_genes: continue # making sure to experiment "without IEA" and "with IEA" we work with same data gene_indeces[sbo][values[0]] = index_counter if len(values[2:]) > max_ann_len[sbo]: max_ann_len[sbo] = len(values[2:]) max_ann_len_indx[sbo] = index_counter annotations.append(' '.join(values[2:])) index_counter += 1 gene_annotations[sbo], go_term_indeces[sbo] = tokenize_annotations(annotations) if verbose: print("Found {} annotating GO terms from {}".format(len(go_term_indeces[sbo]), sbo)) most_freq_count = 10 print("Top {} most frequent GO terms annotating genes in {}:".format(most_freq_count, sbo)) for GO_ID, indx in sorted(go_term_indeces[sbo].items(), key=operator.itemgetter(1))[:most_freq_count]: print(" >>> {} {}".format(GO_ID, indx)) print("Number of annotated gene products by '{}' terms: {}".format(sbo, len(gene_annotations[sbo]))) print("Maximum annotation length of one gene product ('{}' sub-ontology): {}".format(sbo, max_ann_len[sbo])) print("Index of the gene with the maximum annotations ('{}' sub-ontology): {}\n".format(sbo, max_ann_len_indx[sbo])) file_reader.close() return gene_indeces, gene_annotations, go_term_indeces, max_ann_len, max_ann_len_indx def extract_annotation_2nd_form(sub_ontology_all, gene_annotations_dir, include_electronic_annotation=False, verbose=True): """Function to extract annotation information from gene-annotation files (for every subontolgy)""" max_ann_len = {} # maximum annotation length (for every subontolgy) max_ann_len_indx = {} # index of the gene with maximum annotation length (for every subontolgy) gene_indeces = {} # indeces of every genes (for every subontolgy) gene_annotations = {} # genes annotations; ie. their one-hot vector ids (for every subontolgy) go_term_indeces = {} # indeces of GO terms (for every subontolgy); assigned based on their higher annotation frequencies to the genes for sbo in sub_ontology_all: gene_indeces[sbo] = {} max_ann_len[sbo] = 0 without_iea_genes = [] # first, for our experiments genes should have at least one IEA- annotation with open("{}/gene_protein_GO_terms_without_IEA.{}".format(gene_annotations_dir, sbo)) as fr: for gene in [line.split()[1] for line in fr.readlines()]: without_iea_genes.append(gene) if include_electronic_annotation: file_reader = open("{}/gene_protein_GO_terms_with_IEA.{}".format(gene_annotations_dir, sbo)) else: file_reader = open("{}/gene_protein_GO_terms_without_IEA.{}".format(gene_annotations_dir, sbo)) index_counter = 1 annotations = [] for line in file_reader: #values = line.rstrip().replace(':', '').split() values = line.rstrip().split() if values[1] not in without_iea_genes: continue # making sure to experiment "without IEA" and "with IEA" we work with same data gene_indeces[sbo][values[1]] = index_counter if len(values[2:]) > max_ann_len[sbo]: max_ann_len[sbo] = len(values[2:]) max_ann_len_indx[sbo] = index_counter annotations.append(' '.join(values[2:])) index_counter += 1 gene_annotations[sbo], go_term_indeces[sbo] = tokenize_annotations(annotations) if verbose: print("Found {} annotating GO terms from {}".format(len(go_term_indeces[sbo]), sbo)) most_freq_count = 10 print("Top {} most frequent GO terms annotating genes in {}:".format(most_freq_count, sbo)) for GO_ID, indx in sorted(go_term_indeces[sbo].items(), key=operator.itemgetter(1))[:most_freq_count]: print(" >>> {} {}".format(GO_ID, indx)) print("Number of annotated gene products by '{}' terms: {}".format(sbo, len(gene_annotations[sbo]))) print("Maximum annotation length of one gene product ('{}' sub-ontology): {}".format(sbo, max_ann_len[sbo])) print("Index of the gene with the maximum annotations ('{}' sub-ontology): {}\n".format(sbo, max_ann_len_indx[sbo])) file_reader.close() return gene_indeces, gene_annotations, go_term_indeces, max_ann_len, max_ann_len_indx def make_reproducible(seed): """makes sure the results are going to be as reproducible as possible (on GPUs might notice slight differences) for a fixed seed""" os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) tf.random.set_seed(seed) session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf) tf.compat.v1.keras.backend.set_session(sess)
''' This module holds the constants used for specifying the states of the debugger. ''' from __future__ import nested_scopes STATE_RUN = 1 STATE_SUSPEND = 2 PYTHON_SUSPEND = 1 DJANGO_SUSPEND = 2 JINJA2_SUSPEND = 3 try: __setFalse = False except: import __builtin__ setattr(__builtin__, 'True', 1) setattr(__builtin__, 'False', 0) class DebugInfoHolder: #we have to put it here because it can be set through the command line (so, the #already imported references would not have it). DEBUG_RECORD_SOCKET_READS = False DEBUG_TRACE_LEVEL = -1 DEBUG_TRACE_BREAKPOINTS = -1 #Hold a reference to the original _getframe (because psyco will change that as soon as it's imported) import sys #Note: the sys import must be here anyways (others depend on it) try: get_frame = sys._getframe except AttributeError: def get_frame(): raise AssertionError('sys._getframe not available (possible causes: enable -X:Frames on IronPython?)') #Used to determine the maximum size of each variable passed to eclipse -- having a big value here may make #the communication slower -- as the variables are being gathered lazily in the latest version of eclipse, #this value was raised from 200 to 1000. MAXIMUM_VARIABLE_REPRESENTATION_SIZE = 1000 # Prefix for saving functions return values in locals RETURN_VALUES_DICT = '__pydevd_ret_val_dict' import os from _pydevd_bundle import pydevd_vm_type IS_JYTHON = pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON IS_JYTH_LESS25 = False if IS_JYTHON: if sys.version_info[0] == 2 and sys.version_info[1] < 5: IS_JYTH_LESS25 = True IS_PYTHON_STACKLESS = "stackless" in sys.version.lower() CYTHON_SUPPORTED = False try: import platform python_implementation = platform.python_implementation() except: pass else: if python_implementation == 'CPython' and not IS_PYTHON_STACKLESS: # Only available for CPython! if ( (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or (sys.version_info[0] == 3 and sys.version_info[1] >= 3) or (sys.version_info[0] > 3) ): # Supported in 2.7 or 3.3 onwards (32 or 64) CYTHON_SUPPORTED = True #======================================================================================================================= # Python 3? #======================================================================================================================= IS_PY3K = False IS_PY34_OR_GREATER = False IS_PY36_OR_GREATER = False IS_PY2 = True IS_PY27 = False IS_PY24 = False try: if sys.version_info[0] >= 3: IS_PY3K = True IS_PY2 = False if (sys.version_info[0] == 3 and sys.version_info[1] >= 4) or sys.version_info[0] > 3: IS_PY34_OR_GREATER = True if (sys.version_info[0] == 3 and sys.version_info[1] >= 6) or sys.version_info[0] > 3: IS_PY36_OR_GREATER = True elif sys.version_info[0] == 2 and sys.version_info[1] == 7: IS_PY27 = True elif sys.version_info[0] == 2 and sys.version_info[1] == 4: IS_PY24 = True except AttributeError: pass #Not all versions have sys.version_info try: SUPPORT_GEVENT = os.getenv('GEVENT_SUPPORT', 'False') == 'True' except: # Jython 2.1 doesn't accept that construct SUPPORT_GEVENT = False # At the moment gevent supports Python >= 2.6 and Python >= 3.3 USE_LIB_COPY = SUPPORT_GEVENT and \ ((not IS_PY3K and sys.version_info[1] >= 6) or (IS_PY3K and sys.version_info[1] >= 3)) INTERACTIVE_MODE_AVAILABLE = sys.platform in ('darwin', 'win32') or os.getenv('DISPLAY') is not None IS_PYCHARM = True def protect_libraries_from_patching(): """ In this function we delete some modules from `sys.modules` dictionary and import them again inside `_pydev_saved_modules` in order to save their original copies there. After that we can use these saved modules within the debugger to protect them from patching by external libraries (e.g. gevent). """ patched = ['threading', 'thread', '_thread', 'time', 'socket', 'Queue', 'queue', 'select', 'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer', 'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver'] for name in patched: try: __import__(name) except: pass patched_modules = dict([(k, v) for k, v in sys.modules.items() if k in patched]) for name in patched_modules: del sys.modules[name] # import for side effects import _pydev_imps._pydev_saved_modules for name in patched_modules: sys.modules[name] = patched_modules[name] if USE_LIB_COPY: protect_libraries_from_patching() from _pydev_imps._pydev_saved_modules import thread _nextThreadIdLock = thread.allocate_lock() #======================================================================================================================= # Jython? #======================================================================================================================= try: dict_contains = dict.has_key except: try: #Py3k does not have has_key anymore, and older versions don't have __contains__ dict_contains = dict.__contains__ except: try: dict_contains = dict.has_key except NameError: def dict_contains(d, key): return d.has_key(key) if IS_PY3K: def dict_keys(d): return list(d.keys()) def dict_values(d): return list(d.values()) dict_iter_values = dict.values def dict_iter_items(d): return d.items() def dict_items(d): return list(d.items()) else: dict_keys = None try: dict_keys = dict.keys except: pass if IS_JYTHON or not dict_keys: def dict_keys(d): return d.keys() try: dict_iter_values = dict.itervalues except: try: dict_iter_values = dict.values #Older versions don't have the itervalues except: def dict_iter_values(d): return d.values() try: dict_values = dict.values except: def dict_values(d): return d.values() def dict_iter_items(d): try: return d.iteritems() except: return d.items() def dict_items(d): return d.items() try: xrange = xrange except: #Python 3k does not have it xrange = range try: import itertools izip = itertools.izip except: izip = zip try: object except NameError: class object: pass import __builtin__ setattr(__builtin__, 'object', object) try: enumerate except: def enumerate(lst): ret = [] i = 0 for element in lst: ret.append((i, element)) i += 1 return ret #======================================================================================================================= # StringIO #======================================================================================================================= try: from StringIO import StringIO except: from io import StringIO #======================================================================================================================= # get_pid #======================================================================================================================= def get_pid(): try: return os.getpid() except AttributeError: try: #Jython does not have it! import java.lang.management.ManagementFactory #@UnresolvedImport -- just for jython pid = java.lang.management.ManagementFactory.getRuntimeMXBean().getName() return pid.replace('@', '_') except: #ok, no pid available (will be unable to debug multiple processes) return '000001' def clear_cached_thread_id(thread): try: del thread.__pydevd_id__ except AttributeError: pass #======================================================================================================================= # get_thread_id #======================================================================================================================= def get_thread_id(thread): try: tid = thread.__pydevd_id__ if tid is None: # Fix for https://sw-brainwy.rhcloud.com/tracker/PyDev/645 # if __pydevd_id__ is None, recalculate it... also, use an heuristic # that gives us always the same id for the thread (using thread.ident or id(thread)). raise AttributeError() except AttributeError: _nextThreadIdLock.acquire() try: #We do a new check with the lock in place just to be sure that nothing changed tid = getattr(thread, '__pydevd_id__', None) if tid is None: pid = get_pid() try: tid = thread.__pydevd_id__ = 'pid_%s_id_%s' % (pid, thread.get_ident()) except: # thread.ident isn't always there... (use id(thread) instead if it's not there). tid = thread.__pydevd_id__ = 'pid_%s_id_%s' % (pid, id(thread)) finally: _nextThreadIdLock.release() return tid #=============================================================================== # Null #=============================================================================== class Null: """ Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205 """ def __init__(self, *args, **kwargs): return None def __call__(self, *args, **kwargs): return self def __getattr__(self, mname): if len(mname) > 4 and mname[:2] == '__' and mname[-2:] == '__': # Don't pretend to implement special method names. raise AttributeError(mname) return self def __setattr__(self, name, value): return self def __delattr__(self, name): return self def __repr__(self): return "<Null>" def __str__(self): return "Null" def __len__(self): return 0 def __getitem__(self): return self def __setitem__(self, *args, **kwargs): pass def write(self, *args, **kwargs): pass def __nonzero__(self): return 0 def __iter__(self): return iter(()) def call_only_once(func): ''' To be used as a decorator @call_only_once def func(): print 'Calling func only this time' Actually, in PyDev it must be called as: func = call_only_once(func) to support older versions of Python. ''' def new_func(*args, **kwargs): if not new_func._called: new_func._called = True return func(*args, **kwargs) new_func._called = False return new_func if __name__ == '__main__': if Null(): sys.stdout.write('here\n')
import unittest import unittest.mock import random import os import time import pickle import warnings import test.support from functools import partial from math import log, exp, pi, fsum, sin, factorial from test import support from fractions import Fraction from collections import Counter class TestBasicOps: # Superclass with tests common to all generators. # Subclasses must arrange for self.gen to retrieve the Random instance # to be tested. def randomlist(self, n): """Helper function to make a list of random numbers""" return [self.gen.random() for i in range(n)] def test_autoseed(self): self.gen.seed() state1 = self.gen.getstate() time.sleep(0.1) self.gen.seed() # different seeds at different times state2 = self.gen.getstate() self.assertNotEqual(state1, state2) def test_saverestore(self): N = 1000 self.gen.seed() state = self.gen.getstate() randseq = self.randomlist(N) self.gen.setstate(state) # should regenerate the same sequence self.assertEqual(randseq, self.randomlist(N)) def test_seedargs(self): # Seed value with a negative hash. class MySeed(object): def __hash__(self): return -1729 for arg in [None, 0, 1, -1, 10**20, -(10**20), False, True, 3.14, 'a']: self.gen.seed(arg) for arg in [1+2j, tuple('abc'), MySeed()]: with self.assertWarns(DeprecationWarning): self.gen.seed(arg) for arg in [list(range(3)), dict(one=1)]: with self.assertWarns(DeprecationWarning): self.assertRaises(TypeError, self.gen.seed, arg) self.assertRaises(TypeError, self.gen.seed, 1, 2, 3, 4) self.assertRaises(TypeError, type(self.gen), []) @unittest.mock.patch('random._urandom') # os.urandom def test_seed_when_randomness_source_not_found(self, urandom_mock): # Random.seed() uses time.time() when an operating system specific # randomness source is not found. To test this on machines where it # exists, run the above test, test_seedargs(), again after mocking # os.urandom() so that it raises the exception expected when the # randomness source is not available. urandom_mock.side_effect = NotImplementedError self.test_seedargs() def test_shuffle(self): shuffle = self.gen.shuffle lst = [] shuffle(lst) self.assertEqual(lst, []) lst = [37] shuffle(lst) self.assertEqual(lst, [37]) seqs = [list(range(n)) for n in range(10)] shuffled_seqs = [list(range(n)) for n in range(10)] for shuffled_seq in shuffled_seqs: shuffle(shuffled_seq) for (seq, shuffled_seq) in zip(seqs, shuffled_seqs): self.assertEqual(len(seq), len(shuffled_seq)) self.assertEqual(set(seq), set(shuffled_seq)) # The above tests all would pass if the shuffle was a # no-op. The following non-deterministic test covers that. It # asserts that the shuffled sequence of 1000 distinct elements # must be different from the original one. Although there is # mathematically a non-zero probability that this could # actually happen in a genuinely random shuffle, it is # completely negligible, given that the number of possible # permutations of 1000 objects is 1000! (factorial of 1000), # which is considerably larger than the number of atoms in the # universe... lst = list(range(1000)) shuffled_lst = list(range(1000)) shuffle(shuffled_lst) self.assertTrue(lst != shuffled_lst) shuffle(lst) self.assertTrue(lst != shuffled_lst) self.assertRaises(TypeError, shuffle, (1, 2, 3)) def test_shuffle_random_argument(self): # Test random argument to shuffle. shuffle = self.gen.shuffle mock_random = unittest.mock.Mock(return_value=0.5) seq = bytearray(b'abcdefghijk') with self.assertWarns(DeprecationWarning): shuffle(seq, mock_random) mock_random.assert_called_with() def test_choice(self): choice = self.gen.choice with self.assertRaises(IndexError): choice([]) self.assertEqual(choice([50]), 50) self.assertIn(choice([25, 75]), [25, 75]) def test_sample(self): # For the entire allowable range of 0 <= k <= N, validate that # the sample is of the correct length and contains only unique items N = 100 population = range(N) for k in range(N+1): s = self.gen.sample(population, k) self.assertEqual(len(s), k) uniq = set(s) self.assertEqual(len(uniq), k) self.assertTrue(uniq <= set(population)) self.assertEqual(self.gen.sample([], 0), []) # test edge case N==k==0 # Exception raised if size of sample exceeds that of population self.assertRaises(ValueError, self.gen.sample, population, N+1) self.assertRaises(ValueError, self.gen.sample, [], -1) def test_sample_distribution(self): # For the entire allowable range of 0 <= k <= N, validate that # sample generates all possible permutations n = 5 pop = range(n) trials = 10000 # large num prevents false negatives without slowing normal case for k in range(n): expected = factorial(n) // factorial(n-k) perms = {} for i in range(trials): perms[tuple(self.gen.sample(pop, k))] = None if len(perms) == expected: break else: self.fail() def test_sample_inputs(self): # SF bug #801342 -- population can be any iterable defining __len__() self.gen.sample(range(20), 2) self.gen.sample(range(20), 2) self.gen.sample(str('abcdefghijklmnopqrst'), 2) self.gen.sample(tuple('abcdefghijklmnopqrst'), 2) def test_sample_on_dicts(self): self.assertRaises(TypeError, self.gen.sample, dict.fromkeys('abcdef'), 2) def test_sample_on_sets(self): with self.assertWarns(DeprecationWarning): population = {10, 20, 30, 40, 50, 60, 70} self.gen.sample(population, k=5) def test_sample_with_counts(self): sample = self.gen.sample # General case colors = ['red', 'green', 'blue', 'orange', 'black', 'brown', 'amber'] counts = [500, 200, 20, 10, 5, 0, 1 ] k = 700 summary = Counter(sample(colors, counts=counts, k=k)) self.assertEqual(sum(summary.values()), k) for color, weight in zip(colors, counts): self.assertLessEqual(summary[color], weight) self.assertNotIn('brown', summary) # Case that exhausts the population k = sum(counts) summary = Counter(sample(colors, counts=counts, k=k)) self.assertEqual(sum(summary.values()), k) for color, weight in zip(colors, counts): self.assertLessEqual(summary[color], weight) self.assertNotIn('brown', summary) # Case with population size of 1 summary = Counter(sample(['x'], counts=[10], k=8)) self.assertEqual(summary, Counter(x=8)) # Case with all counts equal. nc = len(colors) summary = Counter(sample(colors, counts=[10]*nc, k=10*nc)) self.assertEqual(summary, Counter(10*colors)) # Test error handling with self.assertRaises(TypeError): sample(['red', 'green', 'blue'], counts=10, k=10) # counts not iterable with self.assertRaises(ValueError): sample(['red', 'green', 'blue'], counts=[-3, -7, -8], k=2) # counts are negative with self.assertRaises(ValueError): sample(['red', 'green', 'blue'], counts=[0, 0, 0], k=2) # counts are zero with self.assertRaises(ValueError): sample(['red', 'green'], counts=[10, 10], k=21) # population too small with self.assertRaises(ValueError): sample(['red', 'green', 'blue'], counts=[1, 2], k=2) # too few counts with self.assertRaises(ValueError): sample(['red', 'green', 'blue'], counts=[1, 2, 3, 4], k=2) # too many counts def test_sample_counts_equivalence(self): # Test the documented strong equivalence to a sample with repeated elements. # We run this test on random.Random() which makes deterministic selections # for a given seed value. sample = random.sample seed = random.seed colors = ['red', 'green', 'blue', 'orange', 'black', 'amber'] counts = [500, 200, 20, 10, 5, 1 ] k = 700 seed(8675309) s1 = sample(colors, counts=counts, k=k) seed(8675309) expanded = [color for (color, count) in zip(colors, counts) for i in range(count)] self.assertEqual(len(expanded), sum(counts)) s2 = sample(expanded, k=k) self.assertEqual(s1, s2) pop = 'abcdefghi' counts = [10, 9, 8, 7, 6, 5, 4, 3, 2] seed(8675309) s1 = ''.join(sample(pop, counts=counts, k=30)) expanded = ''.join([letter for (letter, count) in zip(pop, counts) for i in range(count)]) seed(8675309) s2 = ''.join(sample(expanded, k=30)) self.assertEqual(s1, s2) def test_choices(self): choices = self.gen.choices data = ['red', 'green', 'blue', 'yellow'] str_data = 'abcd' range_data = range(4) set_data = set(range(4)) # basic functionality for sample in [ choices(data, k=5), choices(data, range(4), k=5), choices(k=5, population=data, weights=range(4)), choices(k=5, population=data, cum_weights=range(4)), ]: self.assertEqual(len(sample), 5) self.assertEqual(type(sample), list) self.assertTrue(set(sample) <= set(data)) # test argument handling with self.assertRaises(TypeError): # missing arguments choices(2) self.assertEqual(choices(data, k=0), []) # k == 0 self.assertEqual(choices(data, k=-1), []) # negative k behaves like ``[0] * -1`` with self.assertRaises(TypeError): choices(data, k=2.5) # k is a float self.assertTrue(set(choices(str_data, k=5)) <= set(str_data)) # population is a string sequence self.assertTrue(set(choices(range_data, k=5)) <= set(range_data)) # population is a range with self.assertRaises(TypeError): choices(set_data, k=2) # population is not a sequence self.assertTrue(set(choices(data, None, k=5)) <= set(data)) # weights is None self.assertTrue(set(choices(data, weights=None, k=5)) <= set(data)) with self.assertRaises(ValueError): choices(data, [1,2], k=5) # len(weights) != len(population) with self.assertRaises(TypeError): choices(data, 10, k=5) # non-iterable weights with self.assertRaises(TypeError): choices(data, [None]*4, k=5) # non-numeric weights for weights in [ [15, 10, 25, 30], # integer weights [15.1, 10.2, 25.2, 30.3], # float weights [Fraction(1, 3), Fraction(2, 6), Fraction(3, 6), Fraction(4, 6)], # fractional weights [True, False, True, False] # booleans (include / exclude) ]: self.assertTrue(set(choices(data, weights, k=5)) <= set(data)) with self.assertRaises(ValueError): choices(data, cum_weights=[1,2], k=5) # len(weights) != len(population) with self.assertRaises(TypeError): choices(data, cum_weights=10, k=5) # non-iterable cum_weights with self.assertRaises(TypeError): choices(data, cum_weights=[None]*4, k=5) # non-numeric cum_weights with self.assertRaises(TypeError): choices(data, range(4), cum_weights=range(4), k=5) # both weights and cum_weights for weights in [ [15, 10, 25, 30], # integer cum_weights [15.1, 10.2, 25.2, 30.3], # float cum_weights [Fraction(1, 3), Fraction(2, 6), Fraction(3, 6), Fraction(4, 6)], # fractional cum_weights ]: self.assertTrue(set(choices(data, cum_weights=weights, k=5)) <= set(data)) # Test weight focused on a single element of the population self.assertEqual(choices('abcd', [1, 0, 0, 0]), ['a']) self.assertEqual(choices('abcd', [0, 1, 0, 0]), ['b']) self.assertEqual(choices('abcd', [0, 0, 1, 0]), ['c']) self.assertEqual(choices('abcd', [0, 0, 0, 1]), ['d']) # Test consistency with random.choice() for empty population with self.assertRaises(IndexError): choices([], k=1) with self.assertRaises(IndexError): choices([], weights=[], k=1) with self.assertRaises(IndexError): choices([], cum_weights=[], k=5) def test_choices_subnormal(self): # Subnormal weights would occasionally trigger an IndexError # in choices() when the value returned by random() was large # enough to make `random() * total` round up to the total. # See https://bugs.python.org/msg275594 for more detail. choices = self.gen.choices choices(population=[1, 2], weights=[1e-323, 1e-323], k=5000) def test_choices_with_all_zero_weights(self): # See issue #38881 with self.assertRaises(ValueError): self.gen.choices('AB', [0.0, 0.0]) def test_gauss(self): # Ensure that the seed() method initializes all the hidden state. In # particular, through 2.2.1 it failed to reset a piece of state used # by (and only by) the .gauss() method. for seed in 1, 12, 123, 1234, 12345, 123456, 654321: self.gen.seed(seed) x1 = self.gen.random() y1 = self.gen.gauss(0, 1) self.gen.seed(seed) x2 = self.gen.random() y2 = self.gen.gauss(0, 1) self.assertEqual(x1, x2) self.assertEqual(y1, y2) def test_getrandbits(self): # Verify ranges for k in range(1, 1000): self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k) self.assertEqual(self.gen.getrandbits(0), 0) # Verify all bits active getbits = self.gen.getrandbits for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]: all_bits = 2**span-1 cum = 0 cpl_cum = 0 for i in range(100): v = getbits(span) cum |= v cpl_cum |= all_bits ^ v self.assertEqual(cum, all_bits) self.assertEqual(cpl_cum, all_bits) # Verify argument checking self.assertRaises(TypeError, self.gen.getrandbits) self.assertRaises(TypeError, self.gen.getrandbits, 1, 2) self.assertRaises(ValueError, self.gen.getrandbits, -1) self.assertRaises(TypeError, self.gen.getrandbits, 10.1) def test_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): state = pickle.dumps(self.gen, proto) origseq = [self.gen.random() for i in range(10)] newgen = pickle.loads(state) restoredseq = [newgen.random() for i in range(10)] self.assertEqual(origseq, restoredseq) @test.support.cpython_only def test_bug_41052(self): # _random.Random should not be allowed to serialization import _random for proto in range(pickle.HIGHEST_PROTOCOL + 1): r = _random.Random() self.assertRaises(TypeError, pickle.dumps, r, proto) def test_bug_1727780(self): # verify that version-2-pickles can be loaded # fine, whether they are created on 32-bit or 64-bit # platforms, and that version-3-pickles load fine. files = [("randv2_32.pck", 780), ("randv2_64.pck", 866), ("randv3.pck", 343)] for file, value in files: with open(support.findfile(file),"rb") as f: r = pickle.load(f) self.assertEqual(int(r.random()*1000), value) def test_bug_9025(self): # Had problem with an uneven distribution in int(n*random()) # Verify the fix by checking that distributions fall within expectations. n = 100000 randrange = self.gen.randrange k = sum(randrange(6755399441055744) % 3 == 2 for i in range(n)) self.assertTrue(0.30 < k/n < .37, (k/n)) def test_randbytes(self): # Verify ranges for n in range(1, 10): data = self.gen.randbytes(n) self.assertEqual(type(data), bytes) self.assertEqual(len(data), n) self.assertEqual(self.gen.randbytes(0), b'') # Verify argument checking self.assertRaises(TypeError, self.gen.randbytes) self.assertRaises(TypeError, self.gen.randbytes, 1, 2) self.assertRaises(ValueError, self.gen.randbytes, -1) self.assertRaises(TypeError, self.gen.randbytes, 1.0) try: random.SystemRandom().random() except NotImplementedError: SystemRandom_available = False else: SystemRandom_available = True @unittest.skipUnless(SystemRandom_available, "random.SystemRandom not available") class SystemRandom_TestBasicOps(TestBasicOps, unittest.TestCase): gen = random.SystemRandom() def test_autoseed(self): # Doesn't need to do anything except not fail self.gen.seed() def test_saverestore(self): self.assertRaises(NotImplementedError, self.gen.getstate) self.assertRaises(NotImplementedError, self.gen.setstate, None) def test_seedargs(self): # Doesn't need to do anything except not fail self.gen.seed(100) def test_gauss(self): self.gen.gauss_next = None self.gen.seed(100) self.assertEqual(self.gen.gauss_next, None) def test_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): self.assertRaises(NotImplementedError, pickle.dumps, self.gen, proto) def test_53_bits_per_float(self): # This should pass whenever a C double has 53 bit precision. span = 2 ** 53 cum = 0 for i in range(100): cum |= int(self.gen.random() * span) self.assertEqual(cum, span-1) def test_bigrand(self): # The randrange routine should build-up the required number of bits # in stages so that all bit positions are active. span = 2 ** 500 cum = 0 for i in range(100): r = self.gen.randrange(span) self.assertTrue(0 <= r < span) cum |= r self.assertEqual(cum, span-1) def test_bigrand_ranges(self): for i in [40,80, 160, 200, 211, 250, 375, 512, 550]: start = self.gen.randrange(2 ** (i-2)) stop = self.gen.randrange(2 ** i) if stop <= start: continue self.assertTrue(start <= self.gen.randrange(start, stop) < stop) def test_rangelimits(self): for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]: self.assertEqual(set(range(start,stop)), set([self.gen.randrange(start,stop) for i in range(100)])) def test_randrange_nonunit_step(self): rint = self.gen.randrange(0, 10, 2) self.assertIn(rint, (0, 2, 4, 6, 8)) rint = self.gen.randrange(0, 2, 2) self.assertEqual(rint, 0) def test_randrange_errors(self): raises = partial(self.assertRaises, ValueError, self.gen.randrange) # Empty range raises(3, 3) raises(-721) raises(0, 100, -12) # Non-integer start/stop raises(3.14159) raises(0, 2.71828) # Zero and non-integer step raises(0, 42, 0) raises(0, 42, 3.14159) def test_randbelow_logic(self, _log=log, int=int): # check bitcount transition points: 2**i and 2**(i+1)-1 # show that: k = int(1.001 + _log(n, 2)) # is equal to or one greater than the number of bits in n for i in range(1, 1000): n = 1 << i # check an exact power of two numbits = i+1 k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) self.assertEqual(n, 2**(k-1)) n += n - 1 # check 1 below the next power of two k = int(1.00001 + _log(n, 2)) self.assertIn(k, [numbits, numbits+1]) self.assertTrue(2**k > n > 2**(k-2)) n -= n >> 15 # check a little farther below the next power of two k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) # note the stronger assertion self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase): gen = random.Random() def test_guaranteed_stable(self): # These sequences are guaranteed to stay the same across versions of python self.gen.seed(3456147, version=1) self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.ac362300d90d2p-1', '0x1.9d16f74365005p-1', '0x1.1ebb4352e4c4dp-1', '0x1.1a7422abf9c11p-1']) self.gen.seed("the quick brown fox", version=2) self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.1239ddfb11b7cp-3', '0x1.b3cbb5c51b120p-4', '0x1.8c4f55116b60fp-1', '0x1.63eb525174a27p-1']) def test_bug_27706(self): # Verify that version 1 seeds are unaffected by hash randomization self.gen.seed('nofar', version=1) # hash('nofar') == 5990528763808513177 self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.8645314505ad7p-1', '0x1.afb1f82e40a40p-5', '0x1.2a59d2285e971p-1', '0x1.56977142a7880p-6']) self.gen.seed('rachel', version=1) # hash('rachel') == -9091735575445484789 self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.0b294cc856fcdp-1', '0x1.2ad22d79e77b8p-3', '0x1.3052b9c072678p-2', '0x1.578f332106574p-3']) self.gen.seed('', version=1) # hash('') == 0 self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.b0580f98a7dbep-1', '0x1.84129978f9c1ap-1', '0x1.aeaa51052e978p-2', '0x1.092178fb945a6p-2']) def test_bug_31478(self): # There shouldn't be an assertion failure in _random.Random.seed() in # case the argument has a bad __abs__() method. class BadInt(int): def __abs__(self): return None try: self.gen.seed(BadInt()) except TypeError: pass def test_bug_31482(self): # Verify that version 1 seeds are unaffected by hash randomization # when the seeds are expressed as bytes rather than strings. # The hash(b) values listed are the Python2.7 hash() values # which were used for seeding. self.gen.seed(b'nofar', version=1) # hash('nofar') == 5990528763808513177 self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.8645314505ad7p-1', '0x1.afb1f82e40a40p-5', '0x1.2a59d2285e971p-1', '0x1.56977142a7880p-6']) self.gen.seed(b'rachel', version=1) # hash('rachel') == -9091735575445484789 self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.0b294cc856fcdp-1', '0x1.2ad22d79e77b8p-3', '0x1.3052b9c072678p-2', '0x1.578f332106574p-3']) self.gen.seed(b'', version=1) # hash('') == 0 self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.b0580f98a7dbep-1', '0x1.84129978f9c1ap-1', '0x1.aeaa51052e978p-2', '0x1.092178fb945a6p-2']) b = b'\x00\x20\x40\x60\x80\xA0\xC0\xE0\xF0' self.gen.seed(b, version=1) # hash(b) == 5015594239749365497 self.assertEqual([self.gen.random().hex() for i in range(4)], ['0x1.52c2fde444d23p-1', '0x1.875174f0daea4p-2', '0x1.9e9b2c50e5cd2p-1', '0x1.fa57768bd321cp-2']) def test_setstate_first_arg(self): self.assertRaises(ValueError, self.gen.setstate, (1, None, None)) def test_setstate_middle_arg(self): start_state = self.gen.getstate() # Wrong type, s/b tuple self.assertRaises(TypeError, self.gen.setstate, (2, None, None)) # Wrong length, s/b 625 self.assertRaises(ValueError, self.gen.setstate, (2, (1,2,3), None)) # Wrong type, s/b tuple of 625 ints self.assertRaises(TypeError, self.gen.setstate, (2, ('a',)*625, None)) # Last element s/b an int also self.assertRaises(TypeError, self.gen.setstate, (2, (0,)*624+('a',), None)) # Last element s/b between 0 and 624 with self.assertRaises((ValueError, OverflowError)): self.gen.setstate((2, (1,)*624+(625,), None)) with self.assertRaises((ValueError, OverflowError)): self.gen.setstate((2, (1,)*624+(-1,), None)) # Failed calls to setstate() should not have changed the state. bits100 = self.gen.getrandbits(100) self.gen.setstate(start_state) self.assertEqual(self.gen.getrandbits(100), bits100) # Little trick to make "tuple(x % (2**32) for x in internalstate)" # raise ValueError. I cannot think of a simple way to achieve this, so # I am opting for using a generator as the middle argument of setstate # which attempts to cast a NaN to integer. state_values = self.gen.getstate()[1] state_values = list(state_values) state_values[-1] = float('nan') state = (int(x) for x in state_values) self.assertRaises(TypeError, self.gen.setstate, (2, state, None)) def test_referenceImplementation(self): # Compare the python implementation with results from the original # code. Create 2000 53-bit precision random floats. Compare only # the last ten entries to show that the independent implementations # are tracking. Here is the main() function needed to create the # list of expected random numbers: # void main(void){ # int i; # unsigned long init[4]={61731, 24903, 614, 42143}, length=4; # init_by_array(init, length); # for (i=0; i<2000; i++) { # printf("%.15f ", genrand_res53()); # if (i%5==4) printf("\n"); # } # } expected = [0.45839803073713259, 0.86057815201978782, 0.92848331726782152, 0.35932681119782461, 0.081823493762449573, 0.14332226470169329, 0.084297823823520024, 0.53814864671831453, 0.089215024911993401, 0.78486196105372907] self.gen.seed(61731 + (24903<<32) + (614<<64) + (42143<<96)) actual = self.randomlist(2000)[-10:] for a, e in zip(actual, expected): self.assertAlmostEqual(a,e,places=14) def test_strong_reference_implementation(self): # Like test_referenceImplementation, but checks for exact bit-level # equality. This should pass on any box where C double contains # at least 53 bits of precision (the underlying algorithm suffers # no rounding errors -- all results are exact). from math import ldexp expected = [0x0eab3258d2231f, 0x1b89db315277a5, 0x1db622a5518016, 0x0b7f9af0d575bf, 0x029e4c4db82240, 0x04961892f5d673, 0x02b291598e4589, 0x11388382c15694, 0x02dad977c9e1fe, 0x191d96d4d334c6] self.gen.seed(61731 + (24903<<32) + (614<<64) + (42143<<96)) actual = self.randomlist(2000)[-10:] for a, e in zip(actual, expected): self.assertEqual(int(ldexp(a, 53)), e) def test_long_seed(self): # This is most interesting to run in debug mode, just to make sure # nothing blows up. Under the covers, a dynamically resized array # is allocated, consuming space proportional to the number of bits # in the seed. Unfortunately, that's a quadratic-time algorithm, # so don't make this horribly big. seed = (1 << (10000 * 8)) - 1 # about 10K bytes self.gen.seed(seed) def test_53_bits_per_float(self): # This should pass whenever a C double has 53 bit precision. span = 2 ** 53 cum = 0 for i in range(100): cum |= int(self.gen.random() * span) self.assertEqual(cum, span-1) def test_bigrand(self): # The randrange routine should build-up the required number of bits # in stages so that all bit positions are active. span = 2 ** 500 cum = 0 for i in range(100): r = self.gen.randrange(span) self.assertTrue(0 <= r < span) cum |= r self.assertEqual(cum, span-1) def test_bigrand_ranges(self): for i in [40,80, 160, 200, 211, 250, 375, 512, 550]: start = self.gen.randrange(2 ** (i-2)) stop = self.gen.randrange(2 ** i) if stop <= start: continue self.assertTrue(start <= self.gen.randrange(start, stop) < stop) def test_rangelimits(self): for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]: self.assertEqual(set(range(start,stop)), set([self.gen.randrange(start,stop) for i in range(100)])) def test_getrandbits(self): super().test_getrandbits() # Verify cross-platform repeatability self.gen.seed(1234567) self.assertEqual(self.gen.getrandbits(100), 97904845777343510404718956115) def test_randrange_uses_getrandbits(self): # Verify use of getrandbits by randrange # Use same seed as in the cross-platform repeatability test # in test_getrandbits above. self.gen.seed(1234567) # If randrange uses getrandbits, it should pick getrandbits(100) # when called with a 100-bits stop argument. self.assertEqual(self.gen.randrange(2**99), 97904845777343510404718956115) def test_randbelow_logic(self, _log=log, int=int): # check bitcount transition points: 2**i and 2**(i+1)-1 # show that: k = int(1.001 + _log(n, 2)) # is equal to or one greater than the number of bits in n for i in range(1, 1000): n = 1 << i # check an exact power of two numbits = i+1 k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) self.assertEqual(n, 2**(k-1)) n += n - 1 # check 1 below the next power of two k = int(1.00001 + _log(n, 2)) self.assertIn(k, [numbits, numbits+1]) self.assertTrue(2**k > n > 2**(k-2)) n -= n >> 15 # check a little farther below the next power of two k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) # note the stronger assertion self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion def test_randbelow_without_getrandbits(self): # Random._randbelow() can only use random() when the built-in one # has been overridden but no new getrandbits() method was supplied. maxsize = 1<<random.BPF with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) # Population range too large (n >= maxsize) self.gen._randbelow_without_getrandbits( maxsize+1, maxsize=maxsize ) self.gen._randbelow_without_getrandbits(5640, maxsize=maxsize) # issue 33203: test that _randbelow returns zero on # n == 0 also in its getrandbits-independent branch. x = self.gen._randbelow_without_getrandbits(0, maxsize=maxsize) self.assertEqual(x, 0) # This might be going too far to test a single line, but because of our # noble aim of achieving 100% test coverage we need to write a case in # which the following line in Random._randbelow() gets executed: # # rem = maxsize % n # limit = (maxsize - rem) / maxsize # r = random() # while r >= limit: # r = random() # <== *This line* <==< # # Therefore, to guarantee that the while loop is executed at least # once, we need to mock random() so that it returns a number greater # than 'limit' the first time it gets called. n = 42 epsilon = 0.01 limit = (maxsize - (maxsize % n)) / maxsize with unittest.mock.patch.object(random.Random, 'random') as random_mock: random_mock.side_effect = [limit + epsilon, limit - epsilon] self.gen._randbelow_without_getrandbits(n, maxsize=maxsize) self.assertEqual(random_mock.call_count, 2) def test_randrange_bug_1590891(self): start = 1000000000000 stop = -100000000000000000000 step = -200 x = self.gen.randrange(start, stop, step) self.assertTrue(stop < x <= start) self.assertEqual((x+stop)%step, 0) def test_choices_algorithms(self): # The various ways of specifying weights should produce the same results choices = self.gen.choices n = 104729 self.gen.seed(8675309) a = self.gen.choices(range(n), k=10000) self.gen.seed(8675309) b = self.gen.choices(range(n), [1]*n, k=10000) self.assertEqual(a, b) self.gen.seed(8675309) c = self.gen.choices(range(n), cum_weights=range(1, n+1), k=10000) self.assertEqual(a, c) # American Roulette population = ['Red', 'Black', 'Green'] weights = [18, 18, 2] cum_weights = [18, 36, 38] expanded_population = ['Red'] * 18 + ['Black'] * 18 + ['Green'] * 2 self.gen.seed(9035768) a = self.gen.choices(expanded_population, k=10000) self.gen.seed(9035768) b = self.gen.choices(population, weights, k=10000) self.assertEqual(a, b) self.gen.seed(9035768) c = self.gen.choices(population, cum_weights=cum_weights, k=10000) self.assertEqual(a, c) def test_randbytes(self): super().test_randbytes() # Mersenne Twister randbytes() is deterministic # and does not depend on the endian and bitness. seed = 8675309 expected = b'3\xa8\xf9f\xf4\xa4\xd06\x19\x8f\x9f\x82\x02oe\xf0' self.gen.seed(seed) self.assertEqual(self.gen.randbytes(16), expected) # randbytes(0) must not consume any entropy self.gen.seed(seed) self.assertEqual(self.gen.randbytes(0), b'') self.assertEqual(self.gen.randbytes(16), expected) # Four randbytes(4) calls give the same output than randbytes(16) self.gen.seed(seed) self.assertEqual(b''.join([self.gen.randbytes(4) for _ in range(4)]), expected) # Each randbytes(1), randbytes(2) or randbytes(3) call consumes # 4 bytes of entropy self.gen.seed(seed) expected1 = expected[3::4] self.assertEqual(b''.join(self.gen.randbytes(1) for _ in range(4)), expected1) self.gen.seed(seed) expected2 = b''.join(expected[i + 2: i + 4] for i in range(0, len(expected), 4)) self.assertEqual(b''.join(self.gen.randbytes(2) for _ in range(4)), expected2) self.gen.seed(seed) expected3 = b''.join(expected[i + 1: i + 4] for i in range(0, len(expected), 4)) self.assertEqual(b''.join(self.gen.randbytes(3) for _ in range(4)), expected3) def test_randbytes_getrandbits(self): # There is a simple relation between randbytes() and getrandbits() seed = 2849427419 gen2 = random.Random() self.gen.seed(seed) gen2.seed(seed) for n in range(9): self.assertEqual(self.gen.randbytes(n), gen2.getrandbits(n * 8).to_bytes(n, 'little')) def gamma(z, sqrt2pi=(2.0*pi)**0.5): # Reflection to right half of complex plane if z < 0.5: return pi / sin(pi*z) / gamma(1.0-z) # Lanczos approximation with g=7 az = z + (7.0 - 0.5) return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([ 0.9999999999995183, 676.5203681218835 / z, -1259.139216722289 / (z+1.0), 771.3234287757674 / (z+2.0), -176.6150291498386 / (z+3.0), 12.50734324009056 / (z+4.0), -0.1385710331296526 / (z+5.0), 0.9934937113930748e-05 / (z+6.0), 0.1659470187408462e-06 / (z+7.0), ]) class TestDistributions(unittest.TestCase): def test_zeroinputs(self): # Verify that distributions can handle a series of zero inputs' g = random.Random() x = [g.random() for i in range(50)] + [0.0]*5 g.random = x[:].pop; g.uniform(1,10) g.random = x[:].pop; g.paretovariate(1.0) g.random = x[:].pop; g.expovariate(1.0) g.random = x[:].pop; g.weibullvariate(1.0, 1.0) g.random = x[:].pop; g.vonmisesvariate(1.0, 1.0) g.random = x[:].pop; g.normalvariate(0.0, 1.0) g.random = x[:].pop; g.gauss(0.0, 1.0) g.random = x[:].pop; g.lognormvariate(0.0, 1.0) g.random = x[:].pop; g.vonmisesvariate(0.0, 1.0) g.random = x[:].pop; g.gammavariate(0.01, 1.0) g.random = x[:].pop; g.gammavariate(1.0, 1.0) g.random = x[:].pop; g.gammavariate(200.0, 1.0) g.random = x[:].pop; g.betavariate(3.0, 3.0) g.random = x[:].pop; g.triangular(0.0, 1.0, 1.0/3.0) def test_avg_std(self): # Use integration to test distribution average and standard deviation. # Only works for distributions which do not consume variates in pairs g = random.Random() N = 5000 x = [i/float(N) for i in range(1,N)] for variate, args, mu, sigmasqrd in [ (g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12), (g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0), (g.expovariate, (1.5,), 1/1.5, 1/1.5**2), (g.vonmisesvariate, (1.23, 0), pi, pi**2/3), (g.paretovariate, (5.0,), 5.0/(5.0-1), 5.0/((5.0-1)**2*(5.0-2))), (g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0), gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]: g.random = x[:].pop y = [] for i in range(len(x)): try: y.append(variate(*args)) except IndexError: pass s1 = s2 = 0 for e in y: s1 += e s2 += (e - mu) ** 2 N = len(y) self.assertAlmostEqual(s1/N, mu, places=2, msg='%s%r' % (variate.__name__, args)) self.assertAlmostEqual(s2/(N-1), sigmasqrd, places=2, msg='%s%r' % (variate.__name__, args)) def test_constant(self): g = random.Random() N = 100 for variate, args, expected in [ (g.uniform, (10.0, 10.0), 10.0), (g.triangular, (10.0, 10.0), 10.0), (g.triangular, (10.0, 10.0, 10.0), 10.0), (g.expovariate, (float('inf'),), 0.0), (g.vonmisesvariate, (3.0, float('inf')), 3.0), (g.gauss, (10.0, 0.0), 10.0), (g.lognormvariate, (0.0, 0.0), 1.0), (g.lognormvariate, (-float('inf'), 0.0), 0.0), (g.normalvariate, (10.0, 0.0), 10.0), (g.paretovariate, (float('inf'),), 1.0), (g.weibullvariate, (10.0, float('inf')), 10.0), (g.weibullvariate, (0.0, 10.0), 0.0), ]: for i in range(N): self.assertEqual(variate(*args), expected) def test_von_mises_range(self): # Issue 17149: von mises variates were not consistently in the # range [0, 2*PI]. g = random.Random() N = 100 for mu in 0.0, 0.1, 3.1, 6.2: for kappa in 0.0, 2.3, 500.0: for _ in range(N): sample = g.vonmisesvariate(mu, kappa) self.assertTrue( 0 <= sample <= random.TWOPI, msg=("vonmisesvariate({}, {}) produced a result {} out" " of range [0, 2*pi]").format(mu, kappa, sample)) def test_von_mises_large_kappa(self): # Issue #17141: vonmisesvariate() was hang for large kappas random.vonmisesvariate(0, 1e15) random.vonmisesvariate(0, 1e100) def test_gammavariate_errors(self): # Both alpha and beta must be > 0.0 self.assertRaises(ValueError, random.gammavariate, -1, 3) self.assertRaises(ValueError, random.gammavariate, 0, 2) self.assertRaises(ValueError, random.gammavariate, 2, 0) self.assertRaises(ValueError, random.gammavariate, 1, -3) # There are three different possibilities in the current implementation # of random.gammavariate(), depending on the value of 'alpha'. What we # are going to do here is to fix the values returned by random() to # generate test cases that provide 100% line coverage of the method. @unittest.mock.patch('random.Random.random') def test_gammavariate_alpha_greater_one(self, random_mock): # #1: alpha > 1.0. # We want the first random number to be outside the # [1e-7, .9999999] range, so that the continue statement executes # once. The values of u1 and u2 will be 0.5 and 0.3, respectively. random_mock.side_effect = [1e-8, 0.5, 0.3] returned_value = random.gammavariate(1.1, 2.3) self.assertAlmostEqual(returned_value, 2.53) @unittest.mock.patch('random.Random.random') def test_gammavariate_alpha_equal_one(self, random_mock): # #2.a: alpha == 1. # The execution body of the while loop executes once. # Then random.random() returns 0.45, # which causes while to stop looping and the algorithm to terminate. random_mock.side_effect = [0.45] returned_value = random.gammavariate(1.0, 3.14) self.assertAlmostEqual(returned_value, 1.877208182372648) @unittest.mock.patch('random.Random.random') def test_gammavariate_alpha_equal_one_equals_expovariate(self, random_mock): # #2.b: alpha == 1. # It must be equivalent of calling expovariate(1.0 / beta). beta = 3.14 random_mock.side_effect = [1e-8, 1e-8] gammavariate_returned_value = random.gammavariate(1.0, beta) expovariate_returned_value = random.expovariate(1.0 / beta) self.assertAlmostEqual(gammavariate_returned_value, expovariate_returned_value) @unittest.mock.patch('random.Random.random') def test_gammavariate_alpha_between_zero_and_one(self, random_mock): # #3: 0 < alpha < 1. # This is the most complex region of code to cover, # as there are multiple if-else statements. Let's take a look at the # source code, and determine the values that we need accordingly: # # while 1: # u = random() # b = (_e + alpha)/_e # p = b*u # if p <= 1.0: # <=== (A) # x = p ** (1.0/alpha) # else: # <=== (B) # x = -_log((b-p)/alpha) # u1 = random() # if p > 1.0: # <=== (C) # if u1 <= x ** (alpha - 1.0): # <=== (D) # break # elif u1 <= _exp(-x): # <=== (E) # break # return x * beta # # First, we want (A) to be True. For that we need that: # b*random() <= 1.0 # r1 = random() <= 1.0 / b # # We now get to the second if-else branch, and here, since p <= 1.0, # (C) is False and we take the elif branch, (E). For it to be True, # so that the break is executed, we need that: # r2 = random() <= _exp(-x) # r2 <= _exp(-(p ** (1.0/alpha))) # r2 <= _exp(-((b*r1) ** (1.0/alpha))) _e = random._e _exp = random._exp _log = random._log alpha = 0.35 beta = 1.45 b = (_e + alpha)/_e epsilon = 0.01 r1 = 0.8859296441566 # 1.0 / b r2 = 0.3678794411714 # _exp(-((b*r1) ** (1.0/alpha))) # These four "random" values result in the following trace: # (A) True, (E) False --> [next iteration of while] # (A) True, (E) True --> [while loop breaks] random_mock.side_effect = [r1, r2 + epsilon, r1, r2] returned_value = random.gammavariate(alpha, beta) self.assertAlmostEqual(returned_value, 1.4499999999997544) # Let's now make (A) be False. If this is the case, when we get to the # second if-else 'p' is greater than 1, so (C) evaluates to True. We # now encounter a second if statement, (D), which in order to execute # must satisfy the following condition: # r2 <= x ** (alpha - 1.0) # r2 <= (-_log((b-p)/alpha)) ** (alpha - 1.0) # r2 <= (-_log((b-(b*r1))/alpha)) ** (alpha - 1.0) r1 = 0.8959296441566 # (1.0 / b) + epsilon -- so that (A) is False r2 = 0.9445400408898141 # And these four values result in the following trace: # (B) and (C) True, (D) False --> [next iteration of while] # (B) and (C) True, (D) True [while loop breaks] random_mock.side_effect = [r1, r2 + epsilon, r1, r2] returned_value = random.gammavariate(alpha, beta) self.assertAlmostEqual(returned_value, 1.5830349561760781) @unittest.mock.patch('random.Random.gammavariate') def test_betavariate_return_zero(self, gammavariate_mock): # betavariate() returns zero when the Gamma distribution # that it uses internally returns this same value. gammavariate_mock.return_value = 0.0 self.assertEqual(0.0, random.betavariate(2.71828, 3.14159)) class TestRandomSubclassing(unittest.TestCase): def test_random_subclass_with_kwargs(self): # SF bug #1486663 -- this used to erroneously raise a TypeError class Subclass(random.Random): def __init__(self, newarg=None): random.Random.__init__(self) Subclass(newarg=1) def test_subclasses_overriding_methods(self): # Subclasses with an overridden random, but only the original # getrandbits method should not rely on getrandbits in for randrange, # but should use a getrandbits-independent implementation instead. # subclass providing its own random **and** getrandbits methods # like random.SystemRandom does => keep relying on getrandbits for # randrange class SubClass1(random.Random): def random(self): called.add('SubClass1.random') return random.Random.random(self) def getrandbits(self, n): called.add('SubClass1.getrandbits') return random.Random.getrandbits(self, n) called = set() SubClass1().randrange(42) self.assertEqual(called, {'SubClass1.getrandbits'}) # subclass providing only random => can only use random for randrange class SubClass2(random.Random): def random(self): called.add('SubClass2.random') return random.Random.random(self) called = set() SubClass2().randrange(42) self.assertEqual(called, {'SubClass2.random'}) # subclass defining getrandbits to complement its inherited random # => can now rely on getrandbits for randrange again class SubClass3(SubClass2): def getrandbits(self, n): called.add('SubClass3.getrandbits') return random.Random.getrandbits(self, n) called = set() SubClass3().randrange(42) self.assertEqual(called, {'SubClass3.getrandbits'}) # subclass providing only random and inherited getrandbits # => random takes precedence class SubClass4(SubClass3): def random(self): called.add('SubClass4.random') return random.Random.random(self) called = set() SubClass4().randrange(42) self.assertEqual(called, {'SubClass4.random'}) # Following subclasses don't define random or getrandbits directly, # but inherit them from classes which are not subclasses of Random class Mixin1: def random(self): called.add('Mixin1.random') return random.Random.random(self) class Mixin2: def getrandbits(self, n): called.add('Mixin2.getrandbits') return random.Random.getrandbits(self, n) class SubClass5(Mixin1, random.Random): pass called = set() SubClass5().randrange(42) self.assertEqual(called, {'Mixin1.random'}) class SubClass6(Mixin2, random.Random): pass called = set() SubClass6().randrange(42) self.assertEqual(called, {'Mixin2.getrandbits'}) class SubClass7(Mixin1, Mixin2, random.Random): pass called = set() SubClass7().randrange(42) self.assertEqual(called, {'Mixin1.random'}) class SubClass8(Mixin2, Mixin1, random.Random): pass called = set() SubClass8().randrange(42) self.assertEqual(called, {'Mixin2.getrandbits'}) class TestModule(unittest.TestCase): def testMagicConstants(self): self.assertAlmostEqual(random.NV_MAGICCONST, 1.71552776992141) self.assertAlmostEqual(random.TWOPI, 6.28318530718) self.assertAlmostEqual(random.LOG4, 1.38629436111989) self.assertAlmostEqual(random.SG_MAGICCONST, 2.50407739677627) def test__all__(self): # tests validity but not completeness of the __all__ list self.assertTrue(set(random.__all__) <= set(dir(random))) @unittest.skipUnless(hasattr(os, "fork"), "fork() required") def test_after_fork(self): # Test the global Random instance gets reseeded in child r, w = os.pipe() pid = os.fork() if pid == 0: # child process try: val = random.getrandbits(128) with open(w, "w") as f: f.write(str(val)) finally: os._exit(0) else: # parent process os.close(w) val = random.getrandbits(128) with open(r, "r") as f: child_val = eval(f.read()) self.assertNotEqual(val, child_val) support.wait_process(pid, exitcode=0) if __name__ == "__main__": unittest.main()
import json import struct import os import sys import subprocess import time import collections import io import cv2 from math import radians import numpy as np import shm red = (0, 0, 255) green = (0, 255, 0) blue = (255, 0, 0) white = (255, 255, 255) cyan = (255, 255, 0) yellow = (0, 255, 255) purple = (127, 0, 127) _all_vision_modules = [(x[0], getattr(shm.vision_modules, x[0])) for x in shm.vision_modules._fields] _all_vision_modules_dict = {name: (name, shm) for (name, shm) in _all_vision_modules} _all_vision_modules_lower_dict = {name.lower(): (name, shm) for (name, shm) in _all_vision_modules} def all_vision_modules(): return _all_vision_modules[:] def module_by_name(module_name, case_sensitive=False): if case_sensitive: return _all_vision_modules_dict[module_name] return _all_vision_modules_lower_dict[module_name.lower()] def fork(target, args=None, kwargs=None): pid = os.fork() if pid == 0: os.setsid() os.umask(0) pid = os.fork() if pid != 0: os._exit(0) dirname = os.path.dirname(os.path.realpath(__file__)) open('{}/pids/{}.pid'.format(dirname, os.getpid()), 'w').close() if args: if kwargs: target(*args, **kwargs) else: target(*args) else: if kwargs: target(**kwargs) else: target() cleanup_pid() sys.exit(0) def cleanup_pid(*args, **kwargs): print('{} exiting'.format(os.getpid())) dirname = os.path.dirname(os.path.realpath(__file__)) try: os.remove('{}/pids/{}.pid'.format(dirname, os.getpid())) except OSError: pass class NTee(io.IOBase): def __init__(self, *dest): super(NTee, self).__init__(os.devnull, 'w') self._dest = dest def write(self, str): for dest in self._dest: dest.write(str) def writelines(self, sequence_of_strings): for string in sequence_of_strings: self.write('{}\n'.format(string)) def flatten(l): for el in l: if isinstance(el, collections.Iterable) and not isinstance(el, str): for sub in flatten(el): yield sub else: yield el def extract_features(image): import cv2 import numpy as np feature_vector = [] _, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if hierarchy is None: return [] hierarchy = hierarchy[0] contour_info = hierarchy[0] outer_index = 0 while contour_info[3] >= 0: outer_index, contour_info = contour_info[3], hierarchy[contour_info[3]] while contour_info[1] >= 0: outer_index, contour_info = contour_info[1], hierarchy[contour_info[1]] outer_contours = [(outer_index, contours[outer_index])] while contour_info[0] >= 0: outer_index, contour_info = contour_info[0], hierarchy[contour_info[0]] outer_contours.append((outer_index, contours[outer_index])) outer_contours = [(index, contour, cv2.contourArea(contour), cv2.boundingRect(contour)) for (index, contour) in outer_contours] outer_contours.sort(key=lambda x: x[2], reverse=True) outer_contours = outer_contours[:2] outer_contours.sort(key=lambda x: x[3][0]) for index, contour, area, bounding_rect in outer_contours[:2]: moments = cv2.moments(contour) hu_moments = cv2.HuMoments(moments) feature_vector.append(hu_moments) feature_vector.append(area) feature_vector.append(bounding_rect[2:]) inner_contour_area = 0 outer_index = hierarchy[outer_index][2] while outer_index >= 0: inner_contour_area += cv2.contourArea(contours[outer_index]) outer_index, contour_info = contour_info[0], hierarchy[contour_info[0]] feature_vector.append(inner_contour_area) return np.array(list(flatten(feature_vector))) def resize_keep_ratio(image, desired_size): if len(image.shape) == 2: height, width = image.shape else: height, width, _ = image.shape div_ratio = 1 if height > desired_size: div_ratio = height / desired_size if width > desired_size: div_ratio = max(div_ratio, width / desired_size) if abs(1 - div_ratio) < 0.01: return image des_height = int(height // div_ratio) des_width = int(width // div_ratio) return cv2.resize(image, (des_width, des_height)) def get_angle_from_rotated_rect(rotrect): """ Computes the relative angle to the sub needed to align to the rectangle along its long axis. """ # True if taller than wide if rotrect[1][0] < rotrect[1][1]: return rotrect[2] return rotrect[2] + 90 def get_angle_from_ellipse(ellipse): """ Computes the relative angle to the sub needed to align to the ellipse along its long axis. """ return (get_angle_from_rotated_rect(ellipse) + 90) % 180 - 90 def draw_angled_arrow(image, center, angle): """ Draws a double sided arrow on image centered at center at an angle of angle degrees. """ sin, cos = np.sin(radians(angle)), np.cos(radians(angle)) rotated_dir = np.array(((cos, -sin), (sin, cos))).dot( np.array((0, -1))) line_length = min(image.shape[0], image.shape[1]) * 0.17 line_start = np.array((center)) + rotated_dir * line_length line_end = np.array((center)) - rotated_dir * line_length def get_tup(vec): return int(vec[0]), int(vec[1]) cv2.arrowedLine(image, get_tup(line_start), get_tup(line_end), (255, 255, 0), 2) cv2.arrowedLine(image, get_tup(line_end), get_tup(line_start), (255, 255, 0), 2) def zero_vision_group(group): group.center_x = 0 group.center_y = 0 group.probability = 0 def post_colorspace(module, original, colorspace): cspace_map = { cv2.COLOR_BGR2HSV : "hsv", cv2.COLOR_BGR2LAB : "lab", cv2.COLOR_BGR2YUV : "yuv", cv2.COLOR_BGR2YCrCb: "ycrcb", cv2.COLOR_BGR2LUV : "luv", cv2.COLOR_BGR2XYZ : "xyz" } conv = cv2.cvtColor(original, colorspace) split = cv2.split(conv) pre = cspace_map[colorspace] module.post(pre + " " + pre[0], split[0]) module.post(pre + " " + pre[int(len(pre) / 2)], split[1]) module.post(pre + " " + pre[-1], split[2]) class Hierarchy: def __init__(self, hierarchy): self.hierarchy = hierarchy[0] def next(self, i): return self.hierarchy[i][0] if self._in_range(i) else -1 def prev(self, i): return self.hierarchy[i][1] if self._in_range(i) else -1 def first_child(self, i): return self.hierarchy[i][2] if self._in_range(i) else -1 def children(self, i): return self.siblings(self.first_child(i)) def parent(self, i): return self.hierarchy[i][3] if self._in_range(i) else -1 def siblings(self, i): if i == -1: return while self.prev(i) != -1: i = self.prev(i) while i != -1: yield i i = self.next(i) def num_sibs(self, i): num = 0 for i in self.siblings(i): num += 1 return num def outermost(self): if len(self.hierarchy) == 0: return [] i = 0 while self.parent(i) != -1: i = self.parent(i) return self.siblings(i) def _in_range(self, i): return -1 < i < len(self.hierarchy) def is_clipping(mat, contour): """ Returns if any points on the contour are close to an edge of the camera view """ cam_height, cam_width = mat.shape[:2] distance = 5 x, y, w, h = cv2.boundingRect(contour) return x <= distance or y <= distance or \ cam_width - w - x <= distance or \ cam_height - h - y <= distance def fill_ratio(mat, contour, threshed): fill_mask = np.zeros(mat.shape[:2], dtype=np.uint8) cv2.drawContours(fill_mask, [contour], -1, 255, thickness=-1) fill_masked = cv2.bitwise_and(threshed, threshed, mask=fill_mask) hull_area = cv2.contourArea(cv2.convexHull(contour)) fill = np.sum(fill_masked) / 255 / hull_area return fill def contour_center(contour): moments = cv2.moments(contour) return (moments['m10'] / moments['m00'], moments['m01'] / moments['m00'])
''' brozzler/browser.py - manages the browsers for brozzler Copyright (C) 2014-2020 Internet Archive Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import logging import time import brozzler import itertools import json import websocket import time import threading import brozzler from requests.structures import CaseInsensitiveDict import datetime import base64 from ipaddress import AddressValueError from brozzler.chrome import Chrome import socket import urlcanon class BrowsingException(Exception): pass class NoBrowsersAvailable(Exception): pass class BrowsingTimeout(BrowsingException): pass class BrowserPool: ''' Manages pool of browsers. Automatically chooses available port for the debugging protocol. ''' logger = logging.getLogger(__module__ + '.' + __qualname__) def __init__(self, size=3, **kwargs): ''' Initializes the pool. Args: size: size of pool (default 3) **kwargs: arguments for Browser(...) ''' self.size = size self.kwargs = kwargs self._in_use = set() self._lock = threading.Lock() def _fresh_browser(self): # choose available port sock = socket.socket() sock.bind(('0.0.0.0', 0)) port = sock.getsockname()[1] sock.close() browser = Browser(port=port, **self.kwargs) return browser def acquire_multi(self, n=1): ''' Returns a list of up to `n` browsers. Raises: NoBrowsersAvailable if none available ''' browsers = [] with self._lock: if len(self._in_use) >= self.size: raise NoBrowsersAvailable while len(self._in_use) < self.size and len(browsers) < n: browser = self._fresh_browser() browsers.append(browser) self._in_use.add(browser) return browsers def acquire(self): ''' Returns an available instance. Returns: browser from pool, if available Raises: NoBrowsersAvailable if none available ''' with self._lock: if len(self._in_use) >= self.size: raise NoBrowsersAvailable browser = self._fresh_browser() self._in_use.add(browser) return browser def release(self, browser): browser.stop() # make sure with self._lock: self._in_use.remove(browser) def release_all(self, browsers): for browser in browsers: browser.stop() # make sure with self._lock: for browser in browsers: self._in_use.remove(browser) def shutdown_now(self): self.logger.info( 'shutting down browser pool (%s browsers in use)', len(self._in_use)) with self._lock: for browser in self._in_use: browser.stop() def num_available(self): return self.size - len(self._in_use) def num_in_use(self): return len(self._in_use) class WebsockReceiverThread(threading.Thread): logger = logging.getLogger(__module__ + '.' + __qualname__) def __init__(self, websock, name=None, daemon=True): super().__init__(name=name, daemon=daemon) self.websock = websock self.calling_thread = threading.current_thread() self.websock.on_open = self._on_open self.websock.on_message = self._on_message self.websock.on_error = self._on_error self.websock.on_close = self._on_close self.is_open = False self.got_page_load_event = None self.page_status = None # Loaded page HTTP status code self.reached_limit = None self.on_request = None self.on_response = None self.on_service_worker_version_updated = None self._result_messages = {} def expect_result(self, msg_id): self._result_messages[msg_id] = None def received_result(self, msg_id): return bool(self._result_messages.get(msg_id)) def pop_result(self, msg_id): return self._result_messages.pop(msg_id) def _on_close(self, websock): pass # self.logger.info('GOODBYE GOODBYE WEBSOCKET') def _on_open(self, websock): self.is_open = True def _on_error(self, websock, e): ''' Raises BrowsingException in the thread that created this instance. ''' if isinstance(e, ( websocket.WebSocketConnectionClosedException, ConnectionResetError)): self.logger.error('websocket closed, did chrome die?') else: self.logger.error( 'exception from websocket receiver thread', exc_info=1) brozzler.thread_raise(self.calling_thread, BrowsingException) def run(self): # ping_timeout is used as the timeout for the call to select.select() # in addition to its documented purpose, and must have a value to avoid # hangs in certain situations self.websock.run_forever(sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),), ping_timeout=0.5) def _on_message(self, websock, message): try: self._handle_message(websock, message) except: self.logger.error( 'uncaught exception in _handle_message message=%s', message, exc_info=True) def _network_response_received(self, message): status = message['params']['response'].get('status') if (status == 420 and 'Warcprox-Meta' in CaseInsensitiveDict( message['params']['response']['headers'])): if not self.reached_limit: warcprox_meta = json.loads(CaseInsensitiveDict( message['params']['response']['headers'])['Warcprox-Meta']) self.reached_limit = brozzler.ReachedLimit( warcprox_meta=warcprox_meta) self.logger.info('reached limit %s', self.reached_limit) brozzler.thread_raise( self.calling_thread, brozzler.ReachedLimit) else: self.logger.info( 'reached limit but self.reached_limit is already set, ' 'assuming the calling thread is already handling this') if self.on_response: self.on_response(message) if status and self.page_status is None: self.page_status = status def _javascript_dialog_opening(self, message): self.logger.info('javascript dialog opened: %s', message) if message['params']['type'] == 'alert': accept = True else: accept = False self.websock.send( json.dumps(dict( id=0, method='Page.handleJavaScriptDialog', params={'accept': accept}), separators=',:')) def _handle_message(self, websock, json_message): message = json.loads(json_message) if 'method' in message: if message['method'] == 'Page.loadEventFired': self.got_page_load_event = datetime.datetime.utcnow() elif message['method'] == 'Network.responseReceived': self._network_response_received(message) elif message['method'] == 'Network.requestWillBeSent': if self.on_request: self.on_request(message) elif message['method'] == 'Page.interstitialShown': # AITFIVE-1529: handle http auth # we should kill the browser when we receive Page.interstitialShown and # consider the page finished, until this is fixed: # https://bugs.chromium.org/p/chromium/issues/detail?id=764505 self.logger.info('Page.interstialShown (likely unsupported http auth request)') brozzler.thread_raise(self.calling_thread, brozzler.PageInterstitialShown) elif message['method'] == 'Inspector.targetCrashed': self.logger.error( '''chrome tab went "aw snap" or "he's dead jim"!''') brozzler.thread_raise(self.calling_thread, BrowsingException) elif message['method'] == 'Console.messageAdded': self.logger.debug( 'console.%s %s', message['params']['message']['level'], message['params']['message']['text']) elif message['method'] == 'Runtime.exceptionThrown': self.logger.debug('uncaught exception: %s', message) elif message['method'] == 'Page.javascriptDialogOpening': self._javascript_dialog_opening(message) elif (message['method'] == 'Network.loadingFailed' and 'params' in message and 'errorText' in message['params'] and message['params']['errorText'] == 'net::ERR_PROXY_CONNECTION_FAILED'): brozzler.thread_raise(self.calling_thread, brozzler.ProxyError) elif message['method'] == 'ServiceWorker.workerVersionUpdated': if self.on_service_worker_version_updated: self.on_service_worker_version_updated(message) # else: # self.logger.debug("%s %s", message["method"], json_message) elif 'result' in message: if message['id'] in self._result_messages: self._result_messages[message['id']] = message # else: # self.logger.debug("%s", json_message) # else: # self.logger.debug("%s", json_message) class Browser: ''' Manages an instance of Chrome for browsing pages. ''' logger = logging.getLogger(__module__ + '.' + __qualname__) def __init__(self, **kwargs): ''' Initializes the Browser. Args: **kwargs: arguments for Chrome(...) ''' self.chrome = Chrome(**kwargs) self.websock_url = None self.websock = None self.websock_thread = None self.is_browsing = False self._command_id = Counter() self._wait_interval = 0.5 def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def _wait_for(self, callback, timeout=None): ''' Spins until callback() returns truthy. ''' start = time.time() while True: if callback(): return elapsed = time.time() - start if timeout and elapsed > timeout: raise BrowsingTimeout( 'timed out after %.1fs waiting for: %s' % ( elapsed, callback)) brozzler.sleep(self._wait_interval) def send_to_chrome(self, suppress_logging=False, **kwargs): msg_id = next(self._command_id) kwargs['id'] = msg_id msg = json.dumps(kwargs, separators=',:') logging.log( logging.TRACE if suppress_logging else logging.DEBUG, 'sending message to %s: %s', self.websock, msg) self.websock.send(msg) return msg_id def start(self, **kwargs): ''' Starts chrome if it's not running. Args: **kwargs: arguments for self.chrome.start(...) ''' if not self.is_running(): self.websock_url = self.chrome.start(**kwargs) self.websock = websocket.WebSocketApp(self.websock_url) self.websock_thread = WebsockReceiverThread( self.websock, name='WebsockThread:%s' % self.chrome.port) self.websock_thread.start() self._wait_for(lambda: self.websock_thread.is_open, timeout=30) # tell browser to send us messages we're interested in self.send_to_chrome(method='Network.enable') self.send_to_chrome(method='Page.enable') # Enable Console & Runtime output only when debugging. # After all, we just print these events with debug(), we don't use # them in Brozzler logic. if self.logger.isEnabledFor(logging.DEBUG): self.send_to_chrome(method='Console.enable') self.send_to_chrome(method='Runtime.enable') self.send_to_chrome(method='ServiceWorker.enable') self.send_to_chrome(method='ServiceWorker.setForceUpdateOnPageLoad') # disable google analytics and amp analytics self.send_to_chrome( method='Network.setBlockedURLs', params={'urls': ['*google-analytics.com/analytics.js*', '*google-analytics.com/ga.js*', '*google-analytics.com/ga_exp.js*', '*google-analytics.com/urchin.js*', '*google-analytics.com/collect*', '*google-analytics.com/r/collect*', '*google-analytics.com/__utm.gif*', '*google-analytics.com/gtm/js?*', '*google-analytics.com/cx/api.js*', '*cdn.ampproject.org/*/amp-analytics*.js']}) def stop(self): ''' Stops chrome if it's running. ''' try: if (self.websock and self.websock.sock and self.websock.sock.connected): self.logger.info('shutting down websocket connection') try: self.websock.close() except BaseException as e: self.logger.error( 'exception closing websocket %s - %s', self.websock, e) self.chrome.stop() if self.websock_thread and ( self.websock_thread != threading.current_thread()): self.websock_thread.join(timeout=30) if self.websock_thread.is_alive(): self.logger.error( '%s still alive 30 seconds after closing %s, will ' 'forcefully nudge it again', self.websock_thread, self.websock) self.websock.keep_running = False self.websock_thread.join(timeout=30) if self.websock_thread.is_alive(): self.logger.critical( '%s still alive 60 seconds after closing %s', self.websock_thread, self.websock) self.websock_url = None except: self.logger.error('problem stopping', exc_info=True) def is_running(self): return self.websock_url is not None def browse_page( self, page_url, extra_headers=None, user_agent=None, behavior_parameters=None, behaviors_dir=None, on_request=None, on_response=None, on_service_worker_version_updated=None, on_screenshot=None, username=None, password=None, hashtags=None, screenshot_full_page=False, skip_extract_outlinks=False, skip_visit_hashtags=False, skip_youtube_dl=False, simpler404=False, page_timeout=300, behavior_timeout=900, extract_outlinks_timeout=60, download_throughput=-1): ''' Browses page in browser. Browser should already be running, i.e. start() should have been called. Opens the page_url in the browser, runs behaviors, takes a screenshot, extracts outlinks. Args: page_url: url of the page to browse extra_headers: dict of extra http headers to configure the browser to send with every request (default None) user_agent: user agent string, replaces browser default if supplied (default None) behavior_parameters: dict of parameters for populating the javascript behavior template (default None) behaviors_dir: Directory containing behaviors.yaml and JS templates (default None loads Brozzler default JS behaviors) on_request: callback to invoke on every Network.requestWillBeSent event, takes one argument, the json-decoded message (default None) on_response: callback to invoke on every Network.responseReceived event, takes one argument, the json-decoded message (default None) on_service_worker_version_updated: callback to invoke on every ServiceWorker.workerVersionUpdated event, takes one argument, the json-decoded message (default None) on_screenshot: callback to invoke when screenshot is obtained, takes one argument, the the raw jpeg bytes (default None) # XXX takes two arguments, the url of the page at the time the # screenshot was taken, and the raw jpeg bytes (default None) username: username string to use to try logging in if a login form is found in the page (default None) password: password string to use to try logging in if a login form is found in the page (default None) ... (there are more) Returns: A tuple (final_page_url, outlinks). final_page_url: the url in the location bar at the end of the browse_page cycle, which could be different from the original page url if the page redirects, javascript has changed the url in the location bar, etc outlinks: a list of navigational links extracted from the page Raises: brozzler.ProxyError: in case of proxy connection error BrowsingException: if browsing the page fails in some other way ''' if not self.is_running(): raise BrowsingException('browser has not been started') if self.is_browsing: raise BrowsingException('browser is already busy browsing a page') self.is_browsing = True if on_request: self.websock_thread.on_request = on_request if on_response: self.websock_thread.on_response = on_response if on_service_worker_version_updated: self.websock_thread.on_service_worker_version_updated = \ on_service_worker_version_updated try: with brozzler.thread_accept_exceptions(): self.configure_browser( extra_headers=extra_headers, user_agent=user_agent, download_throughput=download_throughput) self.navigate_to_page(page_url, timeout=page_timeout) if password: self.try_login(username, password, timeout=page_timeout) # if login redirected us, return to page_url if page_url != self.url().split('#')[0]: self.logger.debug( 'login navigated away from %s; returning!', page_url) self.navigate_to_page(page_url, timeout=page_timeout) # If the target page HTTP status is 4xx/5xx, there is no point # in running behaviors, outlink and hashtag extraction as we # didn't get a valid page. Screenshot should run because i # may be useful to have a picture of the error page. # This is only enabled with option `simpler404`. run_behaviors = True if simpler404 and (self.websock_thread.page_status is None or self.websock_thread.page_status >= 400): run_behaviors = False if run_behaviors and behavior_timeout > 0: behavior_script = brozzler.behavior_script( page_url, behavior_parameters, behaviors_dir=behaviors_dir) self.run_behavior(behavior_script, timeout=behavior_timeout) final_page_url = self.url() if on_screenshot: self._try_screenshot(on_screenshot, screenshot_full_page) if not run_behaviors or skip_extract_outlinks: outlinks = [] else: outlinks = self.extract_outlinks( timeout=extract_outlinks_timeout ) if run_behaviors and not skip_visit_hashtags: self.visit_hashtags(final_page_url, hashtags, outlinks) return final_page_url, outlinks except brozzler.ReachedLimit: # websock_thread has stashed the ReachedLimit exception with # more information, raise that one raise self.websock_thread.reached_limit except websocket.WebSocketConnectionClosedException as e: self.logger.error('websocket closed, did chrome die?') raise BrowsingException(e) finally: self.is_browsing = False self.websock_thread.on_request = None self.websock_thread.on_response = None def _try_screenshot(self, on_screenshot, full_page=False): """The browser instance must be scrolled to the top of the page before trying to get a screenshot. """ self.send_to_chrome(method='Runtime.evaluate', suppress_logging=True, params={'expression': 'window.scroll(0,0)'}) for i in range(3): try: jpeg_bytes = self.screenshot(full_page) on_screenshot(jpeg_bytes) return except BrowsingTimeout as e: logging.error('attempt %s/3: %s', i+1, e) def visit_hashtags(self, page_url, hashtags, outlinks): _hashtags = set(hashtags or []) for outlink in outlinks: url = urlcanon.whatwg(outlink) hashtag = (url.hash_sign + url.fragment).decode('utf-8') urlcanon.canon.remove_fragment(url) if hashtag and str(url) == page_url: _hashtags.add(hashtag) # could inject a script that listens for HashChangeEvent to figure # out which hashtags were visited already and skip those for hashtag in _hashtags: # navigate_to_hashtag (nothing to wait for so no timeout?) self.logger.debug('navigating to hashtag %s', hashtag) url = urlcanon.whatwg(page_url) url.hash_sign = b'#' url.fragment = hashtag[1:].encode('utf-8') self.send_to_chrome( method='Page.navigate', params={'url': str(url)}) time.sleep(5) # um.. wait for idleness or something? # take another screenshot? # run behavior again with short timeout? # retrieve outlinks again and append to list? def configure_browser(self, extra_headers=None, user_agent=None, download_throughput=-1): headers = extra_headers or {} headers['Accept-Encoding'] = 'gzip' # avoid encodings br, sdch self.websock_thread.expect_result(self._command_id.peek()) msg_id = self.send_to_chrome( method='Network.setExtraHTTPHeaders', params={'headers': headers}) self._wait_for( lambda: self.websock_thread.received_result(msg_id), timeout=10) if user_agent: msg_id = self.send_to_chrome( method='Network.setUserAgentOverride', params={'userAgent': user_agent}) if download_throughput > -1: # traffic shaping already used by SPN2 to aid warcprox resilience # parameter value as bytes/second, or -1 to disable (default) msg_id = self.send_to_chrome(method='Network.emulateNetworkConditions', params={'downloadThroughput': download_throughput}) def navigate_to_page(self, page_url, timeout=300): self.logger.info('navigating to page %s', page_url) self.websock_thread.got_page_load_event = None self.websock_thread.page_status = None self.send_to_chrome(method='Page.navigate', params={'url': page_url}) self._wait_for( lambda: self.websock_thread.got_page_load_event, timeout=timeout) def extract_outlinks(self, timeout=60): self.logger.info('extracting outlinks') self.websock_thread.expect_result(self._command_id.peek()) js = brozzler.jinja2_environment().get_template( 'extract-outlinks.js').render() msg_id = self.send_to_chrome( method='Runtime.evaluate', params={'expression': js}) self._wait_for( lambda: self.websock_thread.received_result(msg_id), timeout=timeout) message = self.websock_thread.pop_result(msg_id) if ('result' in message and 'result' in message['result'] and 'value' in message['result']['result']): if message['result']['result']['value']: out = [] for link in message['result']['result']['value'].split('\n'): try: out.append(str(urlcanon.whatwg(link))) except AddressValueError: self.logger.warning('skip invalid outlink: %s', link) return frozenset(out) else: # no links found return frozenset() else: self.logger.error( 'problem extracting outlinks, result message: %s', message) return frozenset() def screenshot(self, full_page=False, timeout=45): """Optionally capture full page screenshot using puppeteer as an inspiration: https://github.com/GoogleChrome/puppeteer/blob/master/lib/Page.js#L898 """ self.logger.info('taking screenshot') if full_page: self.websock_thread.expect_result(self._command_id.peek()) msg_id = self.send_to_chrome(method='Page.getLayoutMetrics') self._wait_for( lambda: self.websock_thread.received_result(msg_id), timeout=timeout) message = self.websock_thread.pop_result(msg_id) width = message['result']['contentSize']['width'] height = message['result']['contentSize']['height'] clip = dict(x=0, y=0, width=width, height=height, scale=1) deviceScaleFactor = 1 screenOrientation = {'angle': 0, 'type': 'portraitPrimary'} self.send_to_chrome( method='Emulation.setDeviceMetricsOverride', params=dict(mobile=False, width=width, height=height, deviceScaleFactor=deviceScaleFactor, screenOrientation=screenOrientation) ) capture_params = {'format': 'jpeg', 'quality': 95, 'clip': clip} else: capture_params = {'format': 'jpeg', 'quality': 95} self.websock_thread.expect_result(self._command_id.peek()) msg_id = self.send_to_chrome(method='Page.captureScreenshot', params=capture_params) self._wait_for( lambda: self.websock_thread.received_result(msg_id), timeout=timeout) message = self.websock_thread.pop_result(msg_id) jpeg_bytes = base64.b64decode(message['result']['data']) return jpeg_bytes def url(self, timeout=30): ''' Returns value of document.URL from the browser. ''' self.websock_thread.expect_result(self._command_id.peek()) msg_id = self.send_to_chrome( method='Runtime.evaluate', params={'expression': 'document.URL'}) self._wait_for( lambda: self.websock_thread.received_result(msg_id), timeout=timeout) message = self.websock_thread.pop_result(msg_id) return message['result']['result']['value'] def run_behavior(self, behavior_script, timeout=900): self.send_to_chrome( method='Runtime.evaluate', suppress_logging=True, params={'expression': behavior_script}) check_interval = min(timeout, 7) start = time.time() while True: elapsed = time.time() - start if elapsed > timeout: logging.info( 'behavior reached hard timeout after %.1fs', elapsed) return brozzler.sleep(check_interval) self.websock_thread.expect_result(self._command_id.peek()) msg_id = self.send_to_chrome( method='Runtime.evaluate', suppress_logging=True, params={'expression': 'umbraBehaviorFinished()'}) try: self._wait_for( lambda: self.websock_thread.received_result(msg_id), timeout=5) msg = self.websock_thread.pop_result(msg_id) if (msg and 'result' in msg and not ('exceptionDetails' in msg['result']) and not ('wasThrown' in msg['result'] and msg['result']['wasThrown']) and 'result' in msg['result'] and type(msg['result']['result']['value']) == bool and msg['result']['result']['value']): self.logger.info('behavior decided it has finished') return except BrowsingTimeout: pass def try_login(self, username, password, timeout=300): try_login_js = brozzler.jinja2_environment().get_template( 'try-login.js.j2').render(username=username, password=password) self.websock_thread.got_page_load_event = None self.send_to_chrome( method='Runtime.evaluate', suppress_logging=True, params={'expression': try_login_js}) # wait for tryLogin to finish trying (should be very very quick) start = time.time() while True: self.websock_thread.expect_result(self._command_id.peek()) msg_id = self.send_to_chrome( method='Runtime.evaluate', params={'expression': 'try { __brzl_tryLoginState } catch (e) { "maybe-submitted-form" }'}) try: self._wait_for( lambda: self.websock_thread.received_result(msg_id), timeout=5) msg = self.websock_thread.pop_result(msg_id) if (msg and 'result' in msg and 'result' in msg['result']): result = msg['result']['result']['value'] if result == 'login-form-not-found': # we're done return elif result in ('submitted-form', 'maybe-submitted-form'): # wait for page load event below self.logger.info( 'submitted a login form, waiting for another ' 'page load event') break # else try again to get __brzl_tryLoginState except BrowsingTimeout: pass if time.time() - start > 30: raise BrowsingException( 'timed out trying to check if tryLogin finished') # if we get here, we submitted a form, now we wait for another page # load event self._wait_for( lambda: self.websock_thread.got_page_load_event, timeout=timeout) class Counter: def __init__(self): self.next_value = 0 def __next__(self): try: return self.next_value finally: self.next_value += 1 def peek(self): return self.next_value
#!/usr/bin/python from .py2specials import * from .py3specials import * import binascii import hashlib import re import sys import os import base64 import time import random import hmac from bitcoin.ripemd import * # Elliptic curve parameters (secp256k1) P = 2**256 - 2**32 - 977 N = 115792089237316195423570985008687907852837564279074904382605163141518161494337 A = 0 B = 7 Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 G = (Gx, Gy) def change_curve(p, n, a, b, gx, gy): global P, N, A, B, Gx, Gy, G P, N, A, B, Gx, Gy = p, n, a, b, gx, gy G = (Gx, Gy) def getG(): return G # Extended Euclidean Algorithm def inv(a, n): if a == 0: return 0 lm, hm = 1, 0 low, high = a % n, n while low > 1: r = high//low nm, new = hm-lm*r, high-low*r lm, low, hm, high = nm, new, lm, low return lm % n # JSON access (for pybtctool convenience) def access(obj, prop): if isinstance(obj, dict): if prop in obj: return obj[prop] elif '.' in prop: return obj[float(prop)] else: return obj[int(prop)] else: return obj[int(prop)] def multiaccess(obj, prop): return [access(o, prop) for o in obj] def slice(obj, start=0, end=2**200): return obj[int(start):int(end)] def count(obj): return len(obj) _sum = sum def sum(obj): return _sum(obj) def isinf(p): return p[0] == 0 and p[1] == 0 def to_jacobian(p): o = (p[0], p[1], 1) return o def jacobian_double(p): if not p[1]: return (0, 0, 0) ysq = (p[1] ** 2) % P S = (4 * p[0] * ysq) % P M = (3 * p[0] ** 2 + A * p[2] ** 4) % P nx = (M**2 - 2 * S) % P ny = (M * (S - nx) - 8 * ysq ** 2) % P nz = (2 * p[1] * p[2]) % P return (nx, ny, nz) def jacobian_add(p, q): if not p[1]: return q if not q[1]: return p U1 = (p[0] * q[2] ** 2) % P U2 = (q[0] * p[2] ** 2) % P S1 = (p[1] * q[2] ** 3) % P S2 = (q[1] * p[2] ** 3) % P if U1 == U2: if S1 != S2: return (0, 0, 1) return jacobian_double(p) H = U2 - U1 R = S2 - S1 H2 = (H * H) % P H3 = (H * H2) % P U1H2 = (U1 * H2) % P nx = (R ** 2 - H3 - 2 * U1H2) % P ny = (R * (U1H2 - nx) - S1 * H3) % P nz = (H * p[2] * q[2]) % P return (nx, ny, nz) def from_jacobian(p): z = inv(p[2], P) return ((p[0] * z**2) % P, (p[1] * z**3) % P) def jacobian_multiply(a, n): if a[1] == 0 or n == 0: return (0, 0, 1) if n == 1: return a if n < 0 or n >= N: return jacobian_multiply(a, n % N) if (n % 2) == 0: return jacobian_double(jacobian_multiply(a, n//2)) if (n % 2) == 1: return jacobian_add(jacobian_double(jacobian_multiply(a, n//2)), a) def fast_multiply(a, n): return from_jacobian(jacobian_multiply(to_jacobian(a), n)) def fast_add(a, b): return from_jacobian(jacobian_add(to_jacobian(a), to_jacobian(b))) # Functions for handling pubkey and privkey formats def get_pubkey_format(pub): if is_python2: two = '\x02' three = '\x03' four = '\x04' else: two = 2 three = 3 four = 4 if isinstance(pub, (tuple, list)): return 'decimal' elif len(pub) == 65 and pub[0] == four: return 'bin' elif len(pub) == 130 and pub[0:2] == '04': return 'hex' elif len(pub) == 33 and pub[0] in [two, three]: return 'bin_compressed' elif len(pub) == 66 and pub[0:2] in ['02', '03']: return 'hex_compressed' elif len(pub) == 64: return 'bin_electrum' elif len(pub) == 128: return 'hex_electrum' else: raise Exception("Pubkey not in recognized format") def encode_pubkey(pub, formt): if not isinstance(pub, (tuple, list)): pub = decode_pubkey(pub) if formt == 'decimal': return pub elif formt == 'bin': return b'\x04' + encode(pub[0], 256, 32) + encode(pub[1], 256, 32) elif formt == 'bin_compressed': return from_int_to_byte(2+(pub[1] % 2)) + encode(pub[0], 256, 32) elif formt == 'hex': return '04' + encode(pub[0], 16, 64) + encode(pub[1], 16, 64) elif formt == 'hex_compressed': return '0'+str(2+(pub[1] % 2)) + encode(pub[0], 16, 64) elif formt == 'bin_electrum': return encode(pub[0], 256, 32) + encode(pub[1], 256, 32) elif formt == 'hex_electrum': return encode(pub[0], 16, 64) + encode(pub[1], 16, 64) else: raise Exception("Invalid format!") def decode_pubkey(pub, formt=None): if not formt: formt = get_pubkey_format(pub) if formt == 'decimal': return pub elif formt == 'bin': return (decode(pub[1:33], 256), decode(pub[33:65], 256)) elif formt == 'bin_compressed': x = decode(pub[1:33], 256) beta = pow(int(x*x*x+A*x+B), int((P+1)//4), int(P)) y = (P-beta) if ((beta + from_byte_to_int(pub[0])) % 2) else beta return (x, y) elif formt == 'hex': return (decode(pub[2:66], 16), decode(pub[66:130], 16)) elif formt == 'hex_compressed': return decode_pubkey(safe_from_hex(pub), 'bin_compressed') elif formt == 'bin_electrum': return (decode(pub[:32], 256), decode(pub[32:64], 256)) elif formt == 'hex_electrum': return (decode(pub[:64], 16), decode(pub[64:128], 16)) else: raise Exception("Invalid format!") def get_privkey_format(priv): if isinstance(priv, int_types): return 'decimal' elif len(priv) == 32: return 'bin' elif len(priv) == 33: return 'bin_compressed' elif len(priv) == 64: return 'hex' elif len(priv) == 66: return 'hex_compressed' else: bin_p = b58check_to_bin(priv) if len(bin_p) == 32: return 'wif' elif len(bin_p) == 33: return 'wif_compressed' else: raise Exception("WIF does not represent privkey") def encode_privkey(priv, formt, vbyte=0): if not isinstance(priv, int_types): return encode_privkey(decode_privkey(priv), formt, vbyte) if formt == 'decimal': return priv elif formt == 'bin': return encode(priv, 256, 32) elif formt == 'bin_compressed': return encode(priv, 256, 32)+b'\x01' elif formt == 'hex': return encode(priv, 16, 64) elif formt == 'hex_compressed': return encode(priv, 16, 64)+'01' elif formt == 'wif': return bin_to_b58check(encode(priv, 256, 32), 128+int(vbyte)) elif formt == 'wif_compressed': return bin_to_b58check(encode(priv, 256, 32)+b'\x01', 128+int(vbyte)) else: raise Exception("Invalid format!") def decode_privkey(priv,formt=None): if not formt: formt = get_privkey_format(priv) if formt == 'decimal': return priv elif formt == 'bin': return decode(priv, 256) elif formt == 'bin_compressed': return decode(priv[:32], 256) elif formt == 'hex': return decode(priv, 16) elif formt == 'hex_compressed': return decode(priv[:64], 16) elif formt == 'wif': return decode(b58check_to_bin(priv),256) elif formt == 'wif_compressed': return decode(b58check_to_bin(priv)[:32],256) else: raise Exception("WIF does not represent privkey") def add_pubkeys(p1, p2): f1, f2 = get_pubkey_format(p1), get_pubkey_format(p2) return encode_pubkey(fast_add(decode_pubkey(p1, f1), decode_pubkey(p2, f2)), f1) def add_privkeys(p1, p2): f1, f2 = get_privkey_format(p1), get_privkey_format(p2) return encode_privkey((decode_privkey(p1, f1) + decode_privkey(p2, f2)) % N, f1) def multiply(pubkey, privkey): f1, f2 = get_pubkey_format(pubkey), get_privkey_format(privkey) pubkey, privkey = decode_pubkey(pubkey, f1), decode_privkey(privkey, f2) # http://safecurves.cr.yp.to/twist.html if not isinf(pubkey) and (pubkey[0]**3+B-pubkey[1]*pubkey[1]) % P != 0: raise Exception("Point not on curve") return encode_pubkey(fast_multiply(pubkey, privkey), f1) def divide(pubkey, privkey): factor = inv(decode_privkey(privkey), N) return multiply(pubkey, factor) def compress(pubkey): f = get_pubkey_format(pubkey) if 'compressed' in f: return pubkey elif f == 'bin': return encode_pubkey(decode_pubkey(pubkey, f), 'bin_compressed') elif f == 'hex' or f == 'decimal': return encode_pubkey(decode_pubkey(pubkey, f), 'hex_compressed') def decompress(pubkey): f = get_pubkey_format(pubkey) if 'compressed' not in f: return pubkey elif f == 'bin_compressed': return encode_pubkey(decode_pubkey(pubkey, f), 'bin') elif f == 'hex_compressed' or f == 'decimal': return encode_pubkey(decode_pubkey(pubkey, f), 'hex') def privkey_to_pubkey(privkey): f = get_privkey_format(privkey) privkey = decode_privkey(privkey, f) if privkey >= N: raise Exception("Invalid privkey") if f in ['bin', 'bin_compressed', 'hex', 'hex_compressed', 'decimal']: return encode_pubkey(fast_multiply(G, privkey), f) else: return encode_pubkey(fast_multiply(G, privkey), f.replace('wif', 'hex')) privtopub = privkey_to_pubkey def privkey_to_address(priv, magicbyte=0): return pubkey_to_address(privkey_to_pubkey(priv), magicbyte) privtoaddr = privkey_to_address def neg_pubkey(pubkey): f = get_pubkey_format(pubkey) pubkey = decode_pubkey(pubkey, f) return encode_pubkey((pubkey[0], (P-pubkey[1]) % P), f) def neg_privkey(privkey): f = get_privkey_format(privkey) privkey = decode_privkey(privkey, f) return encode_privkey((N - privkey) % N, f) def subtract_pubkeys(p1, p2): f1, f2 = get_pubkey_format(p1), get_pubkey_format(p2) k2 = decode_pubkey(p2, f2) return encode_pubkey(fast_add(decode_pubkey(p1, f1), (k2[0], (P - k2[1]) % P)), f1) def subtract_privkeys(p1, p2): f1, f2 = get_privkey_format(p1), get_privkey_format(p2) k2 = decode_privkey(p2, f2) return encode_privkey((decode_privkey(p1, f1) - k2) % N, f1) # Hashes def bin_hash160(string): intermed = hashlib.sha256(string).digest() digest = '' try: digest = hashlib.new('ripemd160', intermed).digest() except: digest = RIPEMD160(intermed).digest() return digest def hash160(string): return safe_hexlify(bin_hash160(string)) def bin_sha256(string): binary_data = string if isinstance(string, bytes) else bytes(string, 'utf-8') return hashlib.sha256(binary_data).digest() def sha256(string): return bytes_to_hex_string(bin_sha256(string)) def bin_ripemd160(string): try: digest = hashlib.new('ripemd160', string).digest() except: digest = RIPEMD160(string).digest() return digest def ripemd160(string): return safe_hexlify(bin_ripemd160(string)) def bin_dbl_sha256(s): bytes_to_hash = from_string_to_bytes(s) return hashlib.sha256(hashlib.sha256(bytes_to_hash).digest()).digest() def dbl_sha256(string): return safe_hexlify(bin_dbl_sha256(string)) def bin_slowsha(string): string = from_string_to_bytes(string) orig_input = string for i in range(100000): string = hashlib.sha256(string + orig_input).digest() return string def slowsha(string): return safe_hexlify(bin_slowsha(string)) def hash_to_int(x): if len(x) in [40, 64]: return decode(x, 16) return decode(x, 256) def num_to_var_int(x): x = int(x) if x < 253: return from_int_to_byte(x) elif x < 65536: return from_int_to_byte(253)+encode(x, 256, 2)[::-1] elif x < 4294967296: return from_int_to_byte(254) + encode(x, 256, 4)[::-1] else: return from_int_to_byte(255) + encode(x, 256, 8)[::-1] # WTF, Electrum? def electrum_sig_hash(message): padded = b"\x18Bitcoin Signed Message:\n" + num_to_var_int(len(message)) + from_string_to_bytes(message) return bin_dbl_sha256(padded) def random_key(): # Gotta be secure after that java.SecureRandom fiasco... entropy = random_string(32) \ + str(random.randrange(2**256)) \ + str(int(time.time() * 1000000)) return sha256(entropy) def random_electrum_seed(): entropy = os.urandom(32) \ + str(random.randrange(2**256)) \ + str(int(time.time() * 1000000)) return sha256(entropy)[:32] # Encodings def b58check_to_bin(inp): leadingzbytes = len(re.match('^1*', inp).group(0)) data = b'\x00' * leadingzbytes + changebase(inp, 58, 256) assert bin_dbl_sha256(data[:-4])[:4] == data[-4:] return data[1:-4] def get_version_byte(inp): leadingzbytes = len(re.match('^1*', inp).group(0)) data = b'\x00' * leadingzbytes + changebase(inp, 58, 256) assert bin_dbl_sha256(data[:-4])[:4] == data[-4:] return ord(data[0]) def hex_to_b58check(inp, magicbyte=0): return bin_to_b58check(binascii.unhexlify(inp), magicbyte) def b58check_to_hex(inp): return safe_hexlify(b58check_to_bin(inp)) def pubkey_to_address(pubkey, magicbyte=0): if isinstance(pubkey, (list, tuple)): pubkey = encode_pubkey(pubkey, 'bin') if len(pubkey) in [66, 130]: return bin_to_b58check( bin_hash160(binascii.unhexlify(pubkey)), magicbyte) return bin_to_b58check(bin_hash160(pubkey), magicbyte) pubtoaddr = pubkey_to_address # EDCSA def encode_sig(v, r, s): vb, rb, sb = from_int_to_byte(v), encode(r, 256), encode(s, 256) result = base64.b64encode(vb+b'\x00'*(32-len(rb))+rb+b'\x00'*(32-len(sb))+sb) return result if is_python2 else str(result, 'utf-8') def decode_sig(sig): bytez = base64.b64decode(sig) return from_byte_to_int(bytez[0]), decode(bytez[1:33], 256), decode(bytez[33:], 256) # https://tools.ietf.org/html/rfc6979#section-3.2 def deterministic_generate_k(msghash, priv): v = b'\x01' * 32 k = b'\x00' * 32 priv = encode_privkey(priv, 'bin') msghash = encode(hash_to_int(msghash), 256, 32) k = hmac.new(k, v+b'\x00'+priv+msghash, hashlib.sha256).digest() v = hmac.new(k, v, hashlib.sha256).digest() k = hmac.new(k, v+b'\x01'+priv+msghash, hashlib.sha256).digest() v = hmac.new(k, v, hashlib.sha256).digest() return decode(hmac.new(k, v, hashlib.sha256).digest(), 256) def ecdsa_raw_sign(msghash, priv): z = hash_to_int(msghash) k = deterministic_generate_k(msghash, priv) r, y = fast_multiply(G, k) s = inv(k, N) * (z + r*decode_privkey(priv)) % N return 27+((y % 2) ^ (0 if s * 2 < N else 1)), r, s if s * 2 < N else N - s def ecdsa_sign(msg, priv): return encode_sig(*ecdsa_raw_sign(electrum_sig_hash(msg), priv)) def ecdsa_raw_verify(msghash, vrs, pub): v, r, s = vrs w = inv(s, N) z = hash_to_int(msghash) u1, u2 = z*w % N, r*w % N x, y = fast_add(fast_multiply(G, u1), fast_multiply(decode_pubkey(pub), u2)) return r == x def ecdsa_verify(msg, sig, pub): return ecdsa_raw_verify(electrum_sig_hash(msg), decode_sig(sig), pub) def ecdsa_raw_recover(msghash, vrs): v, r, s = vrs x = r xcubedaxb = (x*x*x+A*x+B) % P beta = pow(xcubedaxb, (P+1)//4, P) y = beta if v % 2 ^ beta % 2 else (P - beta) # If xcubedaxb is not a quadratic residue, then r cannot be the x coord # for a point on the curve, and so the sig is invalid if (xcubedaxb - y*y) % P != 0: return False z = hash_to_int(msghash) Gz = jacobian_multiply((Gx, Gy, 1), (N - z) % N) XY = jacobian_multiply((x, y, 1), s) Qr = jacobian_add(Gz, XY) Q = jacobian_multiply(Qr, inv(r, N)) Q = from_jacobian(Q) # if ecdsa_raw_verify(msghash, vrs, Q): return Q # return False def ecdsa_recover(msg, sig): return encode_pubkey(ecdsa_raw_recover(electrum_sig_hash(msg), decode_sig(sig)), 'hex')
# -*- coding: utf-8 -*- """ Django settings for jmauricio project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os from os.path import join, dirname from configurations import Configuration, values BASE_DIR = dirname(dirname(__file__)) class Common(Configuration): # APP CONFIGURATION DJANGO_APPS = ( # Default Django apps: 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Useful template tags: # 'django.contrib.humanize', # Admin 'django.contrib.admin', ) THIRD_PARTY_APPS = ( 'crispy_forms', # Form layouts 'crispy_forms_foundation', # foundation form layouts 'avatar', # for user avatars 'allauth', # registration 'allauth.account', # registration 'allauth.socialaccount', # registration 'allauth.socialaccount.providers.facebook', 'allauth.socialaccount.providers.google', 'foundation', # foundation framework ) # Apps specific for this project go here. LOCAL_APPS = ( 'apps.users', # custom users app # Your stuff: custom apps go here ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS # END APP CONFIGURATION # MIDDLEWARE CONFIGURATION MIDDLEWARE_CLASSES = ( # Make sure djangosecure.middleware.SecurityMiddleware is listed first 'djangosecure.middleware.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) # END MIDDLEWARE CONFIGURATION # MIGRATIONS CONFIGURATION MIGRATION_MODULES = { 'sites': 'contrib.sites.migrations' } # END MIGRATIONS CONFIGURATION # DEBUG # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = values.BooleanValue(False) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug TEMPLATE_DEBUG = DEBUG # END DEBUG # SECRET CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. # In production, this is changed to a values.SecretValue() setting SECRET_KEY = 'CHANGEME!!!' # END SECRET CONFIGURATION # FIXTURE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS FIXTURE_DIRS = ( join(BASE_DIR, 'fixtures'), ) # END FIXTURE CONFIGURATION # EMAIL CONFIGURATION EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend') # END EMAIL CONFIGURATION # MANAGER CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = ( ("""Jose Mauricio Oliveira e Silva""", '[email protected]'), ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS # END MANAGER CONFIGURATION # DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = values.DatabaseURLValue('postgres://postgres:postgres@localhost:5432/jmauricio') # END DATABASE CONFIGURATION # CACHING # Do this here because thanks to django-pylibmc-sasl and pylibmc # memcacheify (used on heroku) is painful to install on windows. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': '' } } # END CACHING # GENERAL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone TIME_ZONE = 'America/Fortaleza' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'pt-br' # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # END GENERAL CONFIGURATION # TEMPLATE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'allauth.account.context_processors.account', 'allauth.socialaccount.context_processors.socialaccount', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.request', # Your stuff: custom template context processers go here ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_DIRS = ( join(BASE_DIR, 'templates'), ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) # See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs CRISPY_TEMPLATE_PACK = 'bootstrap3' # Default layout to use with "crispy_forms" #CRISPY_TEMPLATE_PACK = 'foundation-5' # END TEMPLATE CONFIGURATION # STATIC FILE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles') # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = ( join(BASE_DIR, 'static'), ) # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # END STATIC FILE CONFIGURATION # MEDIA CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = join(BASE_DIR, 'media') # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' # END MEDIA CONFIGURATION # URL Configuration ROOT_URLCONF = 'urls' # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = 'wsgi.application' # End URL Configuration # AUTHENTICATION CONFIGURATION AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) # Some really nice defaults ACCOUNT_AUTHENTICATION_METHOD = 'username' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' # END AUTHENTICATION CONFIGURATION # Custom user app defaults # Select the correct user model AUTH_USER_MODEL = 'users.User' LOGIN_REDIRECT_URL = 'users:redirect' LOGIN_URL = 'account_login' # END Custom user app defaults # PROVIDERS CONFIGURATION SOCIALACCOUNT_PROVIDERS = \ { 'facebook': {'SCOPE': ['email', 'publish_stream'], 'AUTH_PARAMS': {'auth_type': 'reauthenticate'}, 'METHOD': 'oauth2' }, 'google': { 'SCOPE': ['profile', 'email'], 'AUTH_PARAMS': { 'access_type': 'online' } } } # END PROVIDERS CONFIGURATION # SLUGLIFIER AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify' # END SLUGLIFIER # LOGGING CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # END LOGGING CONFIGURATION @classmethod def post_setup(cls): cls.DATABASES['default']['ATOMIC_REQUESTS'] = True # Your common stuff: Below this line define 3rd party library settings
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from keystoneauth1 import _utils from keystoneauth1.fixture import exception class _Service(dict): def add_endpoint(self, public, admin=None, internal=None, tenant_id=None, region=None, id=None): data = {'tenantId': tenant_id or uuid.uuid4().hex, 'publicURL': public, 'adminURL': admin or public, 'internalURL': internal or public, 'region': region, 'id': id or uuid.uuid4().hex} self.setdefault('endpoints', []).append(data) return data class Token(dict): """A V2 Keystone token that can be used for testing. This object is designed to allow clients to generate a correct V2 token for use in there test code. It should prevent clients from having to know the correct token format and allow them to test the portions of token handling that matter to them and not copy and paste sample. """ def __init__(self, token_id=None, expires=None, issued=None, tenant_id=None, tenant_name=None, user_id=None, user_name=None, trust_id=None, trustee_user_id=None, audit_id=None, audit_chain_id=None): super(Token, self).__init__() self.token_id = token_id or uuid.uuid4().hex self.user_id = user_id or uuid.uuid4().hex self.user_name = user_name or uuid.uuid4().hex self.audit_id = audit_id or uuid.uuid4().hex if not issued: issued = _utils.before_utcnow(minutes=2) if not expires: expires = issued + datetime.timedelta(hours=1) try: self.issued = issued except (TypeError, AttributeError): # issued should be able to be passed as a string so ignore self.issued_str = issued try: self.expires = expires except (TypeError, AttributeError): # expires should be able to be passed as a string so ignore self.expires_str = expires if tenant_id or tenant_name: self.set_scope(tenant_id, tenant_name) if trust_id or trustee_user_id: # the trustee_user_id will generally be the same as the user_id as # the token is being issued to the trustee self.set_trust(id=trust_id, trustee_user_id=trustee_user_id or user_id) if audit_chain_id: self.audit_chain_id = audit_chain_id @property def root(self): return self.setdefault('access', {}) @property def _token(self): return self.root.setdefault('token', {}) @property def token_id(self): return self._token['id'] @token_id.setter def token_id(self, value): self._token['id'] = value @property def expires_str(self): return self._token['expires'] @expires_str.setter def expires_str(self, value): self._token['expires'] = value @property def expires(self): return _utils.parse_isotime(self.expires_str) @expires.setter def expires(self, value): self.expires_str = value.isoformat() @property def issued_str(self): return self._token['issued_at'] @issued_str.setter def issued_str(self, value): self._token['issued_at'] = value @property def issued(self): return _utils.parse_isotime(self.issued_str) @issued.setter def issued(self, value): self.issued_str = value.isoformat() @property def _user(self): return self.root.setdefault('user', {}) @property def user_id(self): return self._user['id'] @user_id.setter def user_id(self, value): self._user['id'] = value @property def user_name(self): return self._user['name'] @user_name.setter def user_name(self, value): self._user['name'] = value @property def tenant_id(self): return self._token.get('tenant', {}).get('id') @tenant_id.setter def tenant_id(self, value): self._token.setdefault('tenant', {})['id'] = value @property def tenant_name(self): return self._token.get('tenant', {}).get('name') @tenant_name.setter def tenant_name(self, value): self._token.setdefault('tenant', {})['name'] = value @property def _metadata(self): return self.root.setdefault('metadata', {}) @property def trust_id(self): return self.root.setdefault('trust', {}).get('id') @trust_id.setter def trust_id(self, value): self.root.setdefault('trust', {})['id'] = value @property def trustee_user_id(self): return self.root.setdefault('trust', {}).get('trustee_user_id') @trustee_user_id.setter def trustee_user_id(self, value): self.root.setdefault('trust', {})['trustee_user_id'] = value @property def audit_id(self): try: return self._token.get('audit_ids', [])[0] except IndexError: return None @audit_id.setter def audit_id(self, value): audit_chain_id = self.audit_chain_id lval = [value] if audit_chain_id else [value, audit_chain_id] self._token['audit_ids'] = lval @property def audit_chain_id(self): try: return self._token.get('audit_ids', [])[1] except IndexError: return None @audit_chain_id.setter def audit_chain_id(self, value): self._token['audit_ids'] = [self.audit_id, value] def validate(self): scoped = 'tenant' in self.token catalog = self.root.get('serviceCatalog') if catalog and not scoped: msg = 'You cannot have a service catalog on an unscoped token' raise exception.FixtureValidationError(msg) if scoped and not self.user.get('roles'): msg = 'You must have roles on a token to scope it' raise exception.FixtureValidationError(msg) def add_role(self, name=None, id=None): id = id or uuid.uuid4().hex name = name or uuid.uuid4().hex roles = self._user.setdefault('roles', []) roles.append({'name': name}) self._metadata.setdefault('roles', []).append(id) return {'id': id, 'name': name} def add_service(self, type, name=None): name = name or uuid.uuid4().hex service = _Service(name=name, type=type) self.root.setdefault('serviceCatalog', []).append(service) return service def set_scope(self, id=None, name=None): self.tenant_id = id or uuid.uuid4().hex self.tenant_name = name or uuid.uuid4().hex def set_trust(self, id=None, trustee_user_id=None): self.trust_id = id or uuid.uuid4().hex self.trustee_user_id = trustee_user_id or uuid.uuid4().hex
""" Copyright (c) 2017 Baidu Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random import os import copy import warnings from itertools import groupby import itertools from collections import OrderedDict from py_gflags import get_flag from maze2d import spanning_tree_maze_generator from py_util import check_or_get_value """ Entity: id - unique str for this entity type - "agent", "goal", "block" location - (x, y, 0) yaw - in radian scale - (0, 1.0] offset - [0, 1-scale] asset_path - the icon image path name - name of the entity color - color of the entity """ class Entity: def __init__(self, type, id=None, loc=None, name=None, asset_path=None, color=None, yaw=1.5707963, scale=1.0, offset=0.0): if not loc is None: assert isinstance(loc, tuple) and len(loc) == 3 self.type = type self.id = id self.loc = loc self.yaw = yaw self.scale = scale self.offset = offset self.name = name self.asset_path = asset_path self.color = color class XWorldEnv(object): PI_2 = 1.5707963 curriculum_check_period = 100 def __init__(self, item_path, max_height, max_width, start_level, maze_generation): """ item_path: path to the item images max_height/max_width: maximum height of the world; if a smaller world size is specified using set_dims, the extra space will be padded with wall blocks maze_generattion: True: use maze generator for placing the blocks False: randomly assign available locations to blocks """ self.num_games = -1 ## load all items from item_path self.grid_types = ["goal", "block", "agent"] self.item_path = item_path self.current_level = start_level self.max_height = max_height self.max_width = max_width self.maze_generation = maze_generation self.current_usage = {} self.curriculum_check_counter = 0 self.all_icon_paths = [] for dirpath, _, files in os.walk(item_path): for f in files: if f.endswith(".jpg") or f.endswith(".png"): self.all_icon_paths.append(os.path.join(dirpath, f)) self.set_goal_subtrees([]) ## init dimensions self.__clean_env() ## read item colors color_file = os.path.join(item_path, "properties.txt") assert os.path.exists(color_file) with open(color_file, "r") as f: lines = f.read().splitlines() self.color_table = {os.path.join(item_path, l.split()[0]) : l.split()[1]\ for l in lines if not l.startswith("//") and not l == ""} ############################ interface with Python tasks ############################ def reset(self): """ Reset the map. """ self.__clean_env() self._configure() self.__instantiate_entities() def get_current_usage(self): self.curriculum_check_counter += 1 if self.curriculum_check_counter < XWorldEnv.curriculum_check_period \ or not self.current_usage: return 0 usage = min([sum(l) / float(len(l)) for l in self.current_usage.values()]) self.curriculum_check_counter = 0 return usage def get_num_games(self): """ How many sessions the agent has played """ return self.num_games def set_dims(self, h, w): """ Set the dimensions of the map. If h or w is less than self.max_height or self.max_width, then walls will be automatically padded. The python user should use coordinates in [0, h) and [0, w). """ assert h >= 1 and w >= 1 assert h <= self.max_height and w <= self.max_width self.height = h self.width = w ## if the actual size is smaller than the max, then we pad walls self.offset_h = (self.max_height - h) / 2 self.offset_w = (self.max_width - w) / 2 self.pad_blocks = self.__padding_walls() existing_entities = [e.loc for e in self.entities] self.available_grids = list(set(self.__generate_all_grids(h, w))-set(existing_entities)) self.changed = True def set_entity(self, type, loc=None, name=None, force_occupy=False): """ Add an entity instance of type to loc which must be currently empty if force_occupy is False. If force_occupy is True, then omit the location based availability check. """ if not loc is None: if not force_occupy: assert loc in self.available_grids, \ "invalid or unavailable location %s; available grids %s" % (loc, self.available_grids) if loc in self.available_grids: self.available_grids.remove(loc) self.entity_nums[type] += 1 self.entities.append(Entity(type=type, loc=loc, name=name)) self.changed = True def set_property(self, entity, property_value_dict={}): """ Reinstantiate the specified properties of an existing entity. Properties and corresponding values are specified by the property_value_dict in the form of {property : value, ...}, e.g. {"name" : "apple", "loc" : (0, 0)}. value could be None (force reinstantiation) or a valid value for that property. 1) If None value is provided for a specified property (e.g. {"name" : None}), entity property will be reinstantiated regardless of its original value. 2) otherwise, the value will be assigned to that property of the entity. For the remaining not in property_value_dict: 3) all unset entity properties will be instantiated. 4) the already set properties will keep the same. Because name and asset_path have a dependency, we require that at most one of them is not None. a. When name is None but asset_path is not, then name will be uniquely determined from the path; b. When asset_path is None but name is not, then path will be randomly selected for the name; c. When both are None, both are randomly selected. """ pv_dict = entity.__dict__.copy() ## let the user overwrite the specified pv_dict.update(property_value_dict) ## pre-processing for name and asset_path due to their dependency path_value = pv_dict["asset_path"] name_value = pv_dict["name"] if path_value is not None: assert name_value is None, "With asset_path, you don't have to set name" names = [n for n in self.items[entity.type] \ if path_value in self.items[entity.type][n]] assert len(names) == 1, \ "each asset_path corresponds to only one name: %s" % (path_value) pv_dict["name"] = names[0] # else: do nothing; asset_path will be set later ## set each key in entity.__dict__.keys() if entity.loc is not None: self.available_grids.append(entity.loc) entity.loc = check_or_get_value(pv_dict["loc"], self.available_grids) self.available_grids.remove(entity.loc) ## entity.name = check_or_get_value( pv_dict["name"], self.get_all_possible_names(entity.type)) entity.id = "%s_%d" % (entity.name, self.running_id) self.running_id += 1 ## entity.asset_path = check_or_get_value( pv_dict["asset_path"], self.items[entity.type][entity.name]) # color is coupled with asset_path if entity.asset_path in self.color_table.keys(): entity.color = self.color_table[entity.asset_path] else: entity.color = "na" ## if get_flag("visible_radius"): if entity.type == "agent": yaw_range = range(-1, 3) entity.yaw = check_or_get_value(pv_dict["yaw"], yaw_range) * self.PI_2 if entity.type == "goal": ## if partially observed, perturb the objects yaw_range = [0, self.PI_2 * 4] entity.yaw = check_or_get_value( pv_dict["yaw"], yaw_range, is_continuous=True) ## scale_range = [0.5, 1] entity.scale = check_or_get_value( pv_dict["scale"], scale_range, is_continuous=True) ## offset_range = [0, 1 - entity.scale] entity.offset = check_or_get_value( pv_dict["offset"], offset_range, is_continuous=True) self.changed = True def set_entity_inst(self, e): if not e.loc is None: assert e.loc in self.available_grids self.available_grids.remove(e.loc) self.entity_nums[e.type] += 1 self.entities.append(e) self.changed = True def delete_entity(self, x): """ Delete an entity on the current map either by its location or id """ self.entities.remove(x) self.entity_nums[x.type] -= 1 self.available_grids.append(x.loc) self.changed = True def set_goal_subtrees(self, subtrees): """ Set goal directory substrees so that only goals in the selected subtrees will be sampled when generating the map. The user can use this function to control the number of goal classes. The change of goal subtrees will only be reflected for the next game, after reset() is called. The current game still uses old goal subtrees. """ goal_path = os.path.join(self.item_path, "goal") self.icon_paths = copy.deepcopy(self.all_icon_paths) if len(subtrees) > 0: self.icon_paths \ = [p for p in self.icon_paths \ if not p.startswith(goal_path) or p.split("/")[-2] in subtrees] ## get a hierarchy of all possible objects key = lambda p: '_'.join(p.split('_')[:-1]) objects = groupby(sorted(self.icon_paths, key=key), key=key) self.items = {t : {} for t in self.grid_types} for k, g in objects: type = [t for t in k.split("/") if t in self.grid_types][0] assert type in self.items self.items[type][os.path.basename(k)] = list(g) def get_max_dims(self): """ Get the max height and width of the map We return the max height and width because C++ will render the padding walls """ return (self.max_height, self.max_width) def get_dims(self): return (self.height, self.width) def get_n(self, type): """ Get the current number of entities on the map for type """ assert type in self.entity_nums return self.entity_nums[type] def get_all_possible_names(self, type): """ Return all possible names for type 'goal' - all unique object names 'block' - all block names 'agent' - all agent names """ return self.items[type].keys() def get_all_colors(self): """ Return all possible colors in xworld """ return list(set(self.color_table.values())) def get_agent(self): """ Get the agent information: (entity, agent sentence, action success) """ agent = [e for e in self.entities if e.type == "agent"][0] return (agent, self.agent_sent, self.action_successful) def get_goals(self): """ Return all the goals on the current map """ return [e for e in self.entities if e.type == "goal"] def get_blocks(self): """ Return all the blocks on the current map """ return [e for e in self.entities if e.type == "block"] def get_available_grids(self): """ Return all the available grids on the current map """ return self.available_grids def get_entities(self): """ Return all the entities on the current map """ return self.entities def record_environment_usage(self, task_name, x): """ Update the current environment usage The higher the usage is, the better the agent handles the environment (so it might be a good time now to move to more difficult scenarios) This quantity can be used to generate a curriculum of the world """ self.current_usage[task_name] = x ######################## interface with C++ ############################# def dump_curriculum_progress(self): return self.current_level def env_changed(self): """ Whether the environment has been changed by the teacher during the current stage of the task. If yes, then py_task.cpp will notify the simulator to update the game environment. """ ret = self.changed self.changed = False return ret def cpp_get_entities(self): """ C++ code gets entities information. Used by the underlying simulator. C++ entity is in 3D so we need to add an additional 0. A 3D entity is compatible with different games. """ actual_entities = [e.__dict__ for e in self.entities] for e in actual_entities: e["loc"] = (e["loc"][0] + self.offset_w, e["loc"][1] + self.offset_h, 0) pad_entities = [e.__dict__ for e in self.pad_blocks] ## pad with walls return actual_entities + pad_entities def update_entities_from_cpp(self, entities): """ Update the environment from C++. The changes might be due to the environment dynamics or the agent's actions. Entities is a list of python dicts. We only keep the first two dimensions of a C++ entity. """ self.entity_nums = {t : 0 for t in self.grid_types} self.entities = [Entity(**i) for i in entities if not self.__is_padding_block(i["loc"])] for e in self.entities: e.loc = (e.loc[0] - self.offset_w, e.loc[1] - self.offset_h, 0) self.entity_nums[e.type] += 1 # update available grids self.available_grids = set(self.__generate_all_grids(self.height, self.width, shuffle=False)) occupied = set([e.loc for e in self.entities]) self.available_grids -= occupied self.available_grids = list(self.available_grids) random.shuffle(self.available_grids) def update_agent_sentence_from_cpp(self, sent): """ Update the agent sentence from the CPP simulator """ self.agent_sent = sent def update_agent_action_success_from_cpp(self, successful): """ Update the agent action success from the CPP simulator """ self.action_successful = successful def update_game_event_from_cpp(self, event): """ Update the game event from CPP simulator """ self.game_event = event ######################## private or protected ######################### def _configure(self): """ The user has to override this function to define how the map will be generated after each session resetting """ raise NotImplementedError() def __instantiate_entities(self): """ For each entity, select an instance from the object class it belongs to, after which its properties are set. The entities should have been set in _configure() """ if self.maze_generation: Y, X = self.get_dims() maze = spanning_tree_maze_generator(X, Y) blocks = [(j, i, 0) for i,m in enumerate(maze) for j,b in enumerate(m) if b == '#'] ## maybe not all blocks of the maze will be used later random.shuffle(blocks) ## first remove all maze blocks from the available set ## do not only remove part of them, because agent/goal might get stuck in a closed room for b in blocks: if b in self.available_grids: self.available_grids.remove(b) ## instantiate properties for each entity for e in self.entities: if e.loc is not None: warnings.warn("Maze generation is on! Overwriting pre-specified location %s!" % (e.loc,)) e.loc = None # remove the pre-set location when maze_generation is on ## skip setting loc for block here and set it later if e.type != "block": ## if non-block, randomize the yaw, scale, and offset self.set_property( e, property_value_dict={"yaw": None, "scale": None, "offset": None}) else: assert blocks, "too many blocks for a valid maze" e.loc = blocks.pop() self.set_property(e) ## still need to set other properties ## add back the unused grids self.available_grids += blocks else: ## instantiate properties for each entity for i, e in enumerate(self.entities): self.set_property(e) def __padding_walls(self): """ Given the max height and width of a map and (offset_w, offset_h), return a list of padding wall blocks. The actual space for the agent is (offset_w, offset_h, offset_w + self.width, offset_h + self.height) """ wall_blocks = [] def add_blocks(range1, range2, id): for loc in itertools.product(range1, range2, (0,)): wall_blocks.append(Entity(type="block", loc=loc, id="block_%d" % id, name="brick", color="na", asset_path=self.items["block"]["brick"][0])) id += 1 return id id = add_blocks(range(0, self.offset_w), range(0, self.height + self.offset_h), self.max_height * self.max_width) id = add_blocks(range(self.offset_w, self.max_width), range(0, self.offset_h), id); id = add_blocks(range(self.offset_w + self.width, self.max_width), range(self.offset_h, self.max_height), id) id = add_blocks(range(0, self.offset_w + self.width), range(self.offset_h + self.height, self.max_height), id) return wall_blocks def __is_padding_block(self, loc): """ Given a location, determine whether it's a padding block or not """ x, y = loc[:2] return not (x >= self.offset_w and x < self.offset_w + self.width and \ y >= self.offset_h and y < self.offset_h + self.height) def __generate_all_grids(self, height, width, shuffle=True): """ Given height and width, generate all the grids as the outer product of [0, h) x [0, w)] Randomly shuffle all grids if shuffle is True Return list of all the grids """ assert height >= 1 and width >= 1 all_grids = list(itertools.product(range(width), range(height), (0,))) if shuffle: random.shuffle(all_grids) return all_grids def __clean_env(self): """ Reset members; preparing for the next session """ self.num_games += 1 self.agent_sent = "" self.running_id = 0 self.changed = False self.entities = [] self.entity_nums = {t : 0 for t in self.grid_types} self.available_grids = [] self.set_dims(self.max_height, self.max_width)
import os import json import time from datetime import datetime from django.conf import settings from django import template from django.template.loader import get_template from django.template import TemplateSyntaxError from django.contrib.staticfiles.templatetags.staticfiles import static from django.core.serializers.json import DjangoJSONEncoder from ..gizmo_dependencies import global_dependencies register = template.Library() CSS_OUTPUT_TYPE = 'css' JS_OUTPUT_TYPE = 'js' CSS_EXTENSION = 'css' JS_EXTENSION = 'js' EXTERNAL_INDICATOR = '://' VALID_OUTPUT_TYPES = (CSS_OUTPUT_TYPE, JS_OUTPUT_TYPE) class HighchartsDateEncoder(DjangoJSONEncoder): """ Special Json Encoder for Tethys """ def default(self, obj): # Highcharts date serializer if isinstance(obj, datetime): return time.mktime(obj.timetuple()) * 1000 return super(HighchartsDateEncoder, self).default(obj) @register.filter(is_safe=True) def isstring(value): """ Filter that returns a type """ if value is str: return True else: return False @register.filter def return_item(l, i): try: return l[i] except: return None def json_date_handler(obj): if isinstance(obj, datetime): return time.mktime(obj.timetuple()) * 1000 else: return obj @register.filter def jsonify(data): """ Convert python data structures into a JSON string """ return json.dumps(data, default=json_date_handler) @register.filter def divide(value, divisor): """ Divide value by divisor """ v = float(value) d = float(divisor) return v/d class TethysGizmoIncludeNode(template.Node): """ Custom template include node that returns Tethys gizmos """ def __init__(self, gizmo_name, options, *args, **kwargs): super(TethysGizmoIncludeNode, self).__init__(*args, **kwargs) self.gizmo_name = gizmo_name self.options = template.Variable(options) def render(self, context): try: # Get the name of the gizmo to load gizmo_name = self.gizmo_name gizmo_templates_root = os.path.join('tethys_gizmos', 'gizmos') # Handle case where gizmo_name is a string literal if self.gizmo_name[0] in ('"', "'"): gizmo_name = self.gizmo_name.replace("'", '') gizmo_name = gizmo_name.replace('"', '') # Add gizmo name to 'gizmos_rendered' context variable (used to load static libraries if 'gizmos_rendered' not in context: context.update({'gizmos_rendered': []}) if gizmo_name not in context['gizmos_rendered']: context['gizmos_rendered'].append(gizmo_name) # Determine path to gizmo template gizmo_file_name = '{0}.html'.format(gizmo_name) template_name = os.path.join(gizmo_templates_root, gizmo_file_name) # Retrieve the gizmo template and render t = get_template(template_name) c = context.new(self.options.resolve(context)) return t.render(c) except: if settings.TEMPLATE_DEBUG: raise return '' @register.tag def gizmo(parser, token): """ Similar to the include tag, gizmo loads special templates called gizmos that come with the django-tethys_gizmo app. Gizmos provide tools for developing user interface elements with minimal code. Examples include date pickers, maps, and interactive plots. To insert a gizmo, use the "gizmo" tag and give it the name of a gizmo and a dictionary of configuration parameters. Example:: {% load tethys_gizmos %} {% gizmo example_gizmo options %} {% gizmo "example_gizmo" options %} NOTE: the "options" dictionary must be a template context variable. ALSO NOTE: All supporting css and javascript libraries are loaded using the gizmo_dependency tag (see below). """ try: tag_name, gizmo_name, options_literal = token.split_contents() except ValueError: raise template.TemplateSyntaxError('"%s" tag requires exactly two arguments' % token.contents.split()[0]) bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError('"{0}" tag takes at least one argument: the name of the ' 'template to be included.'.format(bits[0])) return TethysGizmoIncludeNode(gizmo_name, options_literal) class TethysGizmoDependenciesNode(template.Node): """ Loads gizmo dependencies and renders in "script" or "link" tag appropriately. """ def __init__(self, output_type, *args, **kwargs): super(TethysGizmoDependenciesNode, self).__init__(*args, **kwargs) self.output_type = output_type def render(self, context): # Get the gizmos rendered from the context gizmos_rendered = context['gizmos_rendered'] # Compile list of unique gizmo dependencies dependencies = [] # Add gizmo dependencies for rendered_gizmo in gizmos_rendered: try: # Retrieve the "gizmo_dependencies" module and find the appropriate function dependencies_module = __import__('tethys_gizmos.gizmo_dependencies', fromlist=[rendered_gizmo]) dependencies_function = getattr(dependencies_module, rendered_gizmo) # Retrieve a list of dependencies for the gizmo gizmo_deps = dependencies_function(context) # Only append dependencies if they do not already exist for dependency in gizmo_deps: if EXTERNAL_INDICATOR in dependency: static_url = dependency else: static_url = static(dependency) if static_url not in dependencies: # Lookup the static url given the path dependencies.append(static_url) except AttributeError: # Skip those that do not have dependencies pass # Add the global dependencies last for dependency in global_dependencies(context): if EXTERNAL_INDICATOR in dependency: static_url = dependency else: static_url = static(dependency) if static_url not in dependencies: # Lookup the static url given the path dependencies.append(static_url) # Create markup tags script_tags = [] style_tags = [] for dependency in dependencies: # Only process Script tags if the dependency has a ".js" extension and the output type is JS or not specified if JS_EXTENSION in dependency and (self.output_type == JS_OUTPUT_TYPE or self.output_type is None): script_tags.append('<script src="{0}" type="text/javascript"></script>'.format(dependency)) # Only process Style tags if the dependency has a ".css" extension and the output type is CSS or not specified elif CSS_EXTENSION in dependency and (self.output_type == CSS_OUTPUT_TYPE or self.output_type is None): style_tags.append('<link href="{0}" rel="stylesheet" />'.format(dependency)) # Combine all tags tags = style_tags + script_tags tags_string = '\n'.join(tags) return tags_string @register.tag def gizmo_dependencies(parser, token): """ Load all gizmo dependencies (JavaScript and CSS). Example:: {% gizmo_dependencies css %} {% gizmo_dependencies js %} """ output_type = None bits = token.split_contents() if len(bits) > 2: raise TemplateSyntaxError('"{0}" takes at most one argument: the type of dependencies to output ' '(either "js" or "css")'.format(token.split_contents()[0])) elif len(bits) == 2: output_type = bits[1] # Validate output_type if output_type: # Remove quotes if output_type[0] in ('"', "'"): output_type = output_type.replace("'", '') output_type = output_type.replace('"', '') # Lowercase output_type = output_type.lower() # Check for valid values if output_type not in VALID_OUTPUT_TYPES: raise TemplateSyntaxError('Invalid output type specified: only "js" and "css" are ' 'allowed, "{0}" given.'.format(output_type)) return TethysGizmoDependenciesNode(output_type)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import ddt from neutronclient.common import exceptions from oslo_serialization import jsonutils from kuryr.lib import constants as lib_const from kuryr.lib import utils as lib_utils from kuryr_libnetwork import app from kuryr_libnetwork.tests.unit import base from kuryr_libnetwork import utils class TestKuryrEndpointFailures(base.TestKuryrFailures): """Base class that has the methods commonly shared among endpoint tests. This class mainly has the methods for mocking API calls against Neutron. """ def _create_subnet_with_exception(self, neutron_network_id, docker_endpoint_id, ex): fake_neutron_subnet_v4_id = str(uuid.uuid4()) fake_neutron_subnet_v6_id = str(uuid.uuid4()) self.mox.StubOutWithMock(app.neutron, 'create_subnet') fake_subnet_request = { 'subnets': [{ 'name': '-'.join([docker_endpoint_id, '192.168.1.0']), 'network_id': neutron_network_id, 'ip_version': 4, "cidr": '192.168.1.0/24', 'enable_dhcp': 'False', 'subnetpool_id': '' }, { 'name': '-'.join([docker_endpoint_id, 'fe80::']), 'network_id': neutron_network_id, 'ip_version': 6, "cidr": 'fe80::/64', 'enable_dhcp': 'False', 'subnetpool_id': '' }] } fake_subnets = self._get_fake_subnets( docker_endpoint_id, neutron_network_id, fake_neutron_subnet_v4_id, fake_neutron_subnet_v6_id) if ex: app.neutron.create_subnet(fake_subnet_request).AndRaise(ex) else: app.neutron.create_subnet( fake_subnet_request).AndReturn(fake_subnets) self.mox.ReplayAll() return (fake_neutron_subnet_v4_id, fake_neutron_subnet_v6_id) def _delete_subnet_with_exception(self, neutron_subnet_id, ex): self.mox.StubOutWithMock(app.neutron, 'delete_subnet') if ex: app.neutron.delete_subnet(neutron_subnet_id).AndRaise(ex) else: app.neutron.delete_subnet(neutron_subnet_id).AndReturn(None) self.mox.ReplayAll() def _delete_subnets_with_exception(self, neutron_subnet_ids, ex): self.mox.StubOutWithMock(app.neutron, 'delete_subnet') for neutron_subnet_id in neutron_subnet_ids: if ex: app.neutron.delete_subnet(neutron_subnet_id).AndRaise(ex) else: app.neutron.delete_subnet(neutron_subnet_id).AndReturn(None) self.mox.ReplayAll() def _create_port_with_exception(self, neutron_network_id, docker_endpoint_id, neutron_subnetv4_id, neutron_subnetv6_id, ex): self.mox.StubOutWithMock(app.neutron, 'create_port') fake_port_request = { 'port': { 'name': utils.get_neutron_port_name(docker_endpoint_id), 'admin_state_up': True, "binding:host_id": lib_utils.get_hostname(), 'device_owner': lib_const.DEVICE_OWNER, 'device_id': docker_endpoint_id, 'fixed_ips': [{ 'subnet_id': neutron_subnetv4_id, 'ip_address': '192.168.1.2' }, { 'subnet_id': neutron_subnetv6_id, 'ip_address': 'fe80::f816:3eff:fe20:57c4' }], 'mac_address': "fa:16:3e:20:57:c3", 'network_id': neutron_network_id } } # The following fake response is retrieved from the Neutron doc: # http://developer.openstack.org/api-ref-networking-v2.html#createPort # noqa fake_port = { "port": { "status": "DOWN", "name": utils.get_neutron_port_name(docker_endpoint_id), "allowed_address_pairs": [], "admin_state_up": True, "binding:host_id": lib_utils.get_hostname(), "network_id": neutron_network_id, "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", "device_owner": lib_const.DEVICE_OWNER, 'device_id': docker_endpoint_id, "mac_address": "fa:16:3e:20:57:c3", 'fixed_ips': [{ 'subnet_id': neutron_subnetv4_id, 'ip_address': '192.168.1.2' }, { 'subnet_id': neutron_subnetv6_id, 'ip_address': 'fe80::f816:3eff:fe20:57c4' }], "id": "65c0ee9f-d634-4522-8954-51021b570b0d", "security_groups": [], "device_id": "" } } if ex: app.neutron.create_port(fake_port_request).AndRaise(ex) else: app.neutron.create_port(fake_port_request).AndReturn(fake_port) self.mox.ReplayAll() def _delete_port_with_exception(self, neutron_port_id, ex): self.mox.StubOutWithMock(app.neutron, "delete_port") if ex: app.neutron.delete_port(neutron_port_id).AndRaise(ex) else: app.neutron.delete_port(neutron_port_id).AndReturn(None) self.mox.ReplayAll() @ddt.ddt class TestKuryrEndpointCreateFailures(TestKuryrEndpointFailures): """Unit tests for the failures for creating endpoints. This test covers error responses listed in the spec: http://developer.openstack.org/api-ref-networking-v2.html#createSubnet # noqa http://developer.openstack.org/api-ref-networking-v2-ext.html#createPort # noqa """ def _invoke_create_request(self, docker_network_id, docker_endpoint_id): data = { 'NetworkID': docker_network_id, 'EndpointID': docker_endpoint_id, 'Options': {}, 'Interface': { 'Address': '192.168.1.2/24', 'AddressIPv6': 'fe80::f816:3eff:fe20:57c4/64', 'MacAddress': "fa:16:3e:20:57:c3" } } response = self.app.post('/NetworkDriver.CreateEndpoint', content_type='application/json', data=jsonutils.dumps(data)) return response @ddt.data(exceptions.Unauthorized, exceptions.Forbidden, exceptions.NotFound, exceptions.ServiceUnavailable) def test_create_endpoint_port_failures(self, GivenException): fake_docker_network_id = lib_utils.get_hash() fake_docker_endpoint_id = lib_utils.get_hash() fake_neutron_network_id = str(uuid.uuid4()) fake_neutron_subnet_v4_id = str(uuid.uuid4()) fake_neutron_subnet_v6_id = str(uuid.uuid4()) fake_subnets = self._get_fake_subnets( fake_docker_endpoint_id, fake_neutron_network_id, fake_neutron_subnet_v4_id, fake_neutron_subnet_v6_id) fake_fixed_ips = ['subnet_id=%s' % fake_neutron_subnet_v4_id, 'ip_address=192.168.1.2', 'subnet_id=%s' % fake_neutron_subnet_v6_id, 'ip_address=fe80::f816:3eff:fe20:57c4'] fake_port_response = {"ports": []} self.mox.StubOutWithMock(app.neutron, 'list_ports') app.neutron.list_ports(fixed_ips=fake_fixed_ips).AndReturn( fake_port_response) self.mox.StubOutWithMock(app.neutron, 'list_subnets') app.neutron.list_subnets( network_id=fake_neutron_network_id, cidr='192.168.1.0/24').AndReturn(fake_subnets) app.neutron.list_subnets( network_id=fake_neutron_network_id, cidr='fe80::/64').AndReturn({'subnets': []}) self._create_port_with_exception(fake_neutron_network_id, fake_docker_endpoint_id, fake_neutron_subnet_v4_id, fake_neutron_subnet_v6_id, GivenException()) self._mock_out_network(fake_neutron_network_id, fake_docker_network_id) response = self._invoke_create_request( fake_docker_network_id, fake_docker_endpoint_id) self.assertEqual(GivenException.status_code, response.status_code) decoded_json = jsonutils.loads(response.data) self.assertIn('Err', decoded_json) self.assertEqual({'Err': GivenException.message}, decoded_json) def test_create_endpoint_bad_request(self): fake_docker_network_id = lib_utils.get_hash() invalid_docker_endpoint_id = 'id-should-be-hexdigits' response = self._invoke_create_request( fake_docker_network_id, invalid_docker_endpoint_id) self.assertEqual(400, response.status_code) decoded_json = jsonutils.loads(response.data) self.assertIn('Err', decoded_json) # TODO(tfukushima): Add the better error message validation. self.assertIn(invalid_docker_endpoint_id, decoded_json['Err']) self.assertIn('EndpointID', decoded_json['Err']) @ddt.ddt class TestKuryrEndpointDeleteFailures(TestKuryrEndpointFailures): """Unit tests for the failures for deleting endpoints.""" def _invoke_delete_request(self, docker_network_id, docker_endpoint_id): data = {'NetworkID': docker_network_id, 'EndpointID': docker_endpoint_id} response = self.app.post('/NetworkDriver.DeleteEndpoint', content_type='application/json', data=jsonutils.dumps(data)) return response def test_delete_endpoint_bad_request(self): fake_docker_network_id = lib_utils.get_hash() invalid_docker_endpoint_id = 'id-should-be-hexdigits' response = self._invoke_delete_request( fake_docker_network_id, invalid_docker_endpoint_id) self.assertEqual(400, response.status_code) decoded_json = jsonutils.loads(response.data) self.assertIn('Err', decoded_json) # TODO(tfukushima): Add the better error message validation. self.assertIn(invalid_docker_endpoint_id, decoded_json['Err']) self.assertIn('EndpointID', decoded_json['Err'])
#!/usr/bin/python2.6 # # Copyright 2010 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A helper library which provides support for (windows) chrome installation. The library provides methods to check if Chrome is installed and the ability to install/uninstall it. Currently, there is no cleanup of previously downloaded installers and their associated folders. """ import cStringIO import csv import os import subprocess import sys import time import urllib import urllib2 import _winreg import mylogger logger = mylogger.InitLogging('Chrome_SiteCompat', True, True) # Url that contains the list of the latest Chrome builds. OMAHA_URL = 'https://omahaproxy.appspot.com/dl_urls' # Installer flags and executable CHROME_SILIENT_INSTALL_FLAGS = ' --do-not-launch-chrome --system-level' CHROME_SILIENT_UNINSTALL_FLAGS = (' --uninstall --force-uninstall ' '--delete-profile --system-level') INSTALLER_FILENAME = 'installer.exe' # Registry keys CHROME_EXE_KEY = (r'Software\Microsoft\Windows\CurrentVersion\App Paths' r'\chrome.exe') VERSION_KEY_PATH = r'Software\Google' VERSION_KEY = 'bots_installed_version' class ChromeAutomationHelperException(Exception): pass # TODO(user): Refactor this into base class and create factory which will # return platform specific implementation. class ChromeAutomationHelper(object): """Provides methods to support chrome automation.""" def InstallChrome(self, operating_system, channel, download_info=''): """Silent install of Chrome for all users. Args: operating_system: A string representing the desired operating system for the build. Acceptable values are ['win']. channel: A string representing which variant is desired. Acceptable values are ['canary', 'dev', 'beta', 'stable']. download_info: An optional string that represents the info necessary to download the correct Chrome browser version. Raises: ChromeAutomationHelperException: Raised if something went wrong retrieving information or downloading/installing of Chrome. """ logger.info('Downloading latest Chrome version information.') (url, version) = self._GetLatestChromeDownloadUrl( operating_system, channel, download_info=download_info) if self.IsChromeInstalled(): if self._GetInstalledVersion() == version: logger.info('Chrome already installed. Exiting.') return else: logger.info('Uninstalling current version of Chrome because a new ' 'version is available and will be installed.') self.UninstallChrome() logger.info('Installation of Chrome has begun.') local_file = self._DownloadLatestBuild(url, version) command = '"' + local_file + '"' + CHROME_SILIENT_INSTALL_FLAGS logger.info('Installation command: ' + command) self._ExecuteCommand(command) if not self.IsChromeInstalled(): logger.info('Chrome not installed.') self._LogAndRaise('Something went wrong, installation can not verify ' 'installation.') # Set the version of the newly installed chrome. Upon failure uninstall. try: self._SetInstalledVersion(version) except ChromeAutomationHelperException, exception: logger.info('Chrome not installed.') self.UninstallChrome() self._LogAndRaise(str(exception)) logger.info('Chrome installed successfully.') def IsChromeInstalled(self): """Check if Chrome is installed. Returns: True if installed False if not installed """ is_chrome_installed = False key = None try: # Check for the regkey value presence. key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, CHROME_EXE_KEY) chrome_exe_path = _winreg.QueryValueEx(key, None)[0] is_chrome_installed = True logger.info('IsInstalled: Chrome is installed at %s' % chrome_exe_path) except WindowsError: logger.info('IsInstalled: Chrome is not installed.') finally: if key: _winreg.CloseKey(key) return is_chrome_installed def UninstallChrome(self): """Silent uninstall of Chrome for all users. Raises: ChromeAutomationHelperException: Raised if something went wrong uninstalling Chrome. """ try: version = self._GetInstalledVersion() except ChromeAutomationHelperException: logger.info('No version found, nothing to uninstall.') return local_file = self._GetOrCreateFilename(version) if not os.path.exists(local_file): self._LogAndRaise('Chrome installed but no installer to use for ' 'uninstall.') logger.info('Uninstalling Chrome.') command = '"' + local_file + '"' + CHROME_SILIENT_UNINSTALL_FLAGS logger.info(command) self._ExecuteCommand(command) if self.IsChromeInstalled(): self._LogAndRaise('Failed to uninstall Chrome.') logger.info('Chrome has been successfully uninstalled.') # TODO(user): Determine if it should go here or before the # the uninstall. What is more important a spare key or a spare installed # browser? self._RemoveVersionKey() def _DownloadLatestBuild(self, url, version): """Downloads the latest build from the given url. Args: url: The url from which to download the installer. version: The version of the installer. Returns: A string specifying where the installer is located. Raises: ChromeAutomationHelperException: Raised if any of the information could not be found. """ local_file = self._GetOrCreateFilename(version) try: urllib.urlretrieve(url, local_file) except urllib.ContentTooShortError, content_exception: self._LogAndRaise('Failed to download installer. The given error is: ' + str(content_exception)) except IOError, url_exception: self._LogAndRaise('Failed to retrieve chrome installer information ' 'from ' + url + '. The given error is: ' + str(url_exception)) finally: urllib.urlcleanup() if not os.path.exists(local_file): self._LogAndRaise('Failed to download installer. File does not exist.') return local_file def _ExecuteCommand(self, command): """Executes a command on the command line. Args: command: A string representing the command to execute. Raises: ChromeAutomationHelperException: Raised if the command fails. """ try: p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE) p.stdin.close() if p.wait() != 0: self._LogAndRaise('Wait failed while executing command: ' + command) except OSError, os_error: self._LogAndRaise('An operating system error occurred with error:' + os_error) except subprocess.CalledProcessError, called_process_error: self._LogAndRaise('Executed command returned a non-zero value with ' 'error: ' + called_process_error) except ValueError, value_error: self._LogAndRaise('Invalid arguments given to the command with error: ' + value_error) # Sleep for few seconds to ensure all Window registries are updated. time.sleep(5) def _GetOrCreateFilename(self, version): """Creates a path to a file using the given version. In addition to generating the path, it also will create any missing folders needed by the path. Args: version: The version of chrome. Returns: A string representing the path to a specific installer file. """ local_path = os.path.join(os.path.dirname(sys.argv[0]), version) if not os.path.exists(local_path): os.mkdir(local_path) local_file = os.path.join(local_path, INSTALLER_FILENAME) return str(local_file) def _GetInstalledVersion(self): """Retrieves the version number of the currently installed Chrome. This function assumes that the installation of Chrome has already been verified. Returns: A string representing the version number. Raises: ChromeAutomationHelperException: Raised if the version could not be retrieved. """ key = None try: # Check for the regkey value presence. key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, VERSION_KEY_PATH) version = _winreg.QueryValueEx(key, VERSION_KEY)[0] return version except WindowsError: logger.error('Version not found.') return None finally: if key: _winreg.CloseKey(key) def _GetLatestChromeDownloadUrl(self, operating_system, channel, download_info=''): """Finds the url of the latest Chrome build. Using an Omaha server, retrieve a list of the current builds and extract the appropriate information. The format of each line in the downloaded file is [os, channel, version, download url]. Args: operating_system: A string representing the desired operating system for the build. Acceptable values are ['win']. channel: A string representing which variant is desired. Acceptable values are ['canary', 'dev', 'beta', 'stable']. download_info: An optional string that represents the info necessary to download the correct Chrome browser version. Returns: Returns a tuple of strings (url, version). Raises: ChromeAutomationHelperException: Raised if any of the information could not be found. """ retries = 10 response = None # Access to the url can be unstable and can potentially require a large # unknown number of retries. if download_info: response = cStringIO.StringIO(download_info) else: for retry in range(retries): try: response = urllib2.urlopen(OMAHA_URL) break except urllib2.URLError, url_exception: logger.info('Retry (' + str(retry) + ') Failed to retrieve chrome ' + 'installer information from ' + OMAHA_URL + '. The given error is: ' + str(url_exception)) if not response: self._LogAndRaise('Failed to download list of latest builds.') reader = csv.DictReader(response) for line in reader: if operating_system == line['os'] and channel == line['channel']: return (line['dl_url'], line['current_version']) self._LogAndRaise('Did not find the specified build in the list of latest ' 'builds.') def _LogAndRaise(self, message): """Logs a message and then raises an exception with the same value. Args: message: A string representing the message to log/raise. Raises: ChromeAutomationHelperException: Raised with the given message. """ logger.info(message) raise ChromeAutomationHelperException(message) def _RemoveVersionKey(self): """Removes the registry key for the version number. Raises: ChromeAutomationHelperException: Raised if the version could not be retrieved. """ key = None try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, VERSION_KEY_PATH, 0, _winreg.KEY_SET_VALUE) _winreg.DeleteValue(key, VERSION_KEY) except WindowsError: self._LogAndRaise('Version information could not be removed.') finally: if key: _winreg.CloseKey(key) def _SetInstalledVersion(self, version): """Sets the version number of the currently installed Chrome. Args: version: A string representing the version of Chrome installed. Raises: ChromeAutomationHelperException: Raised if the version could not be retrieved. """ key = None try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, VERSION_KEY_PATH, 0, _winreg.KEY_SET_VALUE) _winreg.SetValueEx(key, VERSION_KEY, 0, _winreg.REG_SZ, version) except WindowsError: self._LogAndRaise('Version information could not be set.') finally: if key: _winreg.CloseKey(key)
# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import datetime import iso8601 import mock from oslo.utils import timeutils import webob.exc from nova.api.openstack.compute.contrib import services as services_v2 from nova.api.openstack.compute.plugins.v3 import services as services_v21 from nova.api.openstack import extensions from nova import availability_zones from nova.compute import cells_api from nova import context from nova import db from nova import exception from nova.servicegroup.drivers import db as db_driver from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit.objects import test_service fake_services_list = [ dict(test_service.fake_service, binary='nova-scheduler', host='host1', id=1, disabled=True, topic='scheduler', updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2), created_at=datetime.datetime(2012, 9, 18, 2, 46, 27), disabled_reason='test1'), dict(test_service.fake_service, binary='nova-compute', host='host1', id=2, disabled=True, topic='compute', updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5), created_at=datetime.datetime(2012, 9, 18, 2, 46, 27), disabled_reason='test2'), dict(test_service.fake_service, binary='nova-scheduler', host='host2', id=3, disabled=False, topic='scheduler', updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34), created_at=datetime.datetime(2012, 9, 18, 2, 46, 28), disabled_reason=None), dict(test_service.fake_service, binary='nova-compute', host='host2', id=4, disabled=True, topic='compute', updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38), created_at=datetime.datetime(2012, 9, 18, 2, 46, 28), disabled_reason='test4'), ] class FakeRequest(object): environ = {"nova.context": context.get_admin_context()} GET = {} class FakeRequestWithService(object): environ = {"nova.context": context.get_admin_context()} GET = {"binary": "nova-compute"} class FakeRequestWithHost(object): environ = {"nova.context": context.get_admin_context()} GET = {"host": "host1"} class FakeRequestWithHostService(object): environ = {"nova.context": context.get_admin_context()} GET = {"host": "host1", "binary": "nova-compute"} def fake_service_get_all(services): def service_get_all(context, filters=None, set_zones=False): if set_zones or 'availability_zone' in filters: return availability_zones.set_availability_zones(context, services) return services return service_get_all def fake_db_api_service_get_all(context, disabled=None): return fake_services_list def fake_db_service_get_by_host_binary(services): def service_get_by_host_binary(context, host, binary): for service in services: if service['host'] == host and service['binary'] == binary: return service raise exception.HostBinaryNotFound(host=host, binary=binary) return service_get_by_host_binary def fake_service_get_by_host_binary(context, host, binary): fake = fake_db_service_get_by_host_binary(fake_services_list) return fake(context, host, binary) def _service_get_by_id(services, value): for service in services: if service['id'] == value: return service return None def fake_db_service_update(services): def service_update(context, service_id, values): service = _service_get_by_id(services, service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) return service return service_update def fake_service_update(context, service_id, values): fake = fake_db_service_update(fake_services_list) return fake(context, service_id, values) def fake_utcnow(): return datetime.datetime(2012, 10, 29, 13, 42, 11) fake_utcnow.override_time = None def fake_utcnow_ts(): d = fake_utcnow() return calendar.timegm(d.utctimetuple()) class ServicesTestV21(test.TestCase): service_is_up_exc = webob.exc.HTTPInternalServerError bad_request = exception.ValidationError def _set_up_controller(self): self.controller = services_v21.ServiceController() def setUp(self): super(ServicesTestV21, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self._set_up_controller() self.stubs.Set(self.controller.host_api, "service_get_all", fake_service_get_all(fake_services_list)) self.stubs.Set(timeutils, "utcnow", fake_utcnow) self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts) self.stubs.Set(db, "service_get_by_args", fake_db_service_get_by_host_binary(fake_services_list)) self.stubs.Set(db, "service_update", fake_db_service_update(fake_services_list)) def _process_output(self, services, has_disabled=False, has_id=False): return services def test_services_list(self): req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-scheduler', 'host': 'host1', 'zone': 'internal', 'status': 'disabled', 'id': 1, 'state': 'up', 'disabled_reason': 'test1', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'id': 2, 'status': 'disabled', 'disabled_reason': 'test2', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'nova-scheduler', 'host': 'host2', 'zone': 'internal', 'id': 3, 'status': 'enabled', 'disabled_reason': None, 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)}, {'binary': 'nova-compute', 'host': 'host2', 'zone': 'nova', 'id': 4, 'status': 'disabled', 'disabled_reason': 'test4', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]} self._process_output(response) self.assertEqual(res_dict, response) def test_services_list_with_host(self): req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-scheduler', 'host': 'host1', 'disabled_reason': 'test1', 'id': 1, 'zone': 'internal', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'disabled_reason': 'test2', 'id': 2, 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]} self._process_output(response) self.assertEqual(res_dict, response) def test_services_list_with_service(self): req = FakeRequestWithService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-compute', 'host': 'host1', 'disabled_reason': 'test2', 'id': 2, 'zone': 'nova', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'nova-compute', 'host': 'host2', 'zone': 'nova', 'disabled_reason': 'test4', 'id': 4, 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]} self._process_output(response) self.assertEqual(res_dict, response) def test_services_list_with_host_service(self): req = FakeRequestWithHostService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'disabled_reason': 'test2', 'id': 2, 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]} self._process_output(response) self.assertEqual(res_dict, response) def test_services_detail(self): self.ext_mgr.extensions['os-extended-services'] = True req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-scheduler', 'host': 'host1', 'zone': 'internal', 'status': 'disabled', 'id': 1, 'disabled_reason': 'test1', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'disabled_reason': 'test1'}, {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'status': 'disabled', 'state': 'up', 'id': 2, 'disabled_reason': 'test2', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}, {'binary': 'nova-scheduler', 'host': 'host2', 'zone': 'internal', 'status': 'enabled', 'id': 3, 'disabled_reason': 'test3', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), 'disabled_reason': None}, {'binary': 'nova-compute', 'host': 'host2', 'zone': 'nova', 'id': 4, 'disabled_reason': 'test4', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': 'test4'}]} self._process_output(response, has_disabled=True) self.assertEqual(res_dict, response) def test_service_detail_with_host(self): self.ext_mgr.extensions['os-extended-services'] = True req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-scheduler', 'host': 'host1', 'zone': 'internal', 'id': 1, 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'disabled_reason': 'test1'}, {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'id': 2, 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}]} self._process_output(response, has_disabled=True) self.assertEqual(res_dict, response) def test_service_detail_with_service(self): self.ext_mgr.extensions['os-extended-services'] = True req = FakeRequestWithService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'id': 2, 'disabled_reason': 'test2', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}, {'binary': 'nova-compute', 'host': 'host2', 'id': 4, 'disabled_reason': 'test4', 'zone': 'nova', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': 'test4'}]} self._process_output(response, has_disabled=True) self.assertEqual(res_dict, response) def test_service_detail_with_host_service(self): self.ext_mgr.extensions['os-extended-services'] = True req = FakeRequestWithHostService() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'status': 'disabled', 'id': 2, 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}]} self._process_output(response, has_disabled=True) self.assertEqual(res_dict, response) def test_services_detail_with_delete_extension(self): self.ext_mgr.extensions['os-extended-services-delete'] = True req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'nova-scheduler', 'host': 'host1', 'id': 1, 'zone': 'internal', 'disabled_reason': 'test1', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'nova-compute', 'host': 'host1', 'id': 2, 'zone': 'nova', 'disabled_reason': 'test2', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'nova-scheduler', 'host': 'host2', 'disabled_reason': None, 'id': 3, 'zone': 'internal', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)}, {'binary': 'nova-compute', 'host': 'host2', 'id': 4, 'disabled_reason': 'test4', 'zone': 'nova', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]} self._process_output(response, has_id=True) self.assertEqual(res_dict, response) def test_services_enable(self): def _service_update(context, service_id, values): self.assertIsNone(values['disabled_reason']) return dict(test_service.fake_service, id=service_id, **values) self.stubs.Set(db, "service_update", _service_update) body = {'host': 'host1', 'binary': 'nova-compute'} req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable') res_dict = self.controller.update(req, "enable", body=body) self.assertEqual(res_dict['service']['status'], 'enabled') self.assertNotIn('disabled_reason', res_dict['service']) def test_services_enable_with_invalid_host(self): body = {'host': 'invalid', 'binary': 'nova-compute'} req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, "enable", body=body) def test_services_enable_with_invalid_binary(self): body = {'host': 'host1', 'binary': 'invalid'} req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, "enable", body=body) def test_services_disable(self): req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable') body = {'host': 'host1', 'binary': 'nova-compute'} res_dict = self.controller.update(req, "disable", body=body) self.assertEqual(res_dict['service']['status'], 'disabled') self.assertNotIn('disabled_reason', res_dict['service']) def test_services_disable_with_invalid_host(self): body = {'host': 'invalid', 'binary': 'nova-compute'} req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, "disable", body=body) def test_services_disable_with_invalid_binary(self): body = {'host': 'host1', 'binary': 'invalid'} req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable') self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, "disable", body=body) def test_services_disable_log_reason(self): self.ext_mgr.extensions['os-extended-services'] = True req = \ fakes.HTTPRequest.blank('/v2/fake/os-services/disable-log-reason') body = {'host': 'host1', 'binary': 'nova-compute', 'disabled_reason': 'test-reason', } res_dict = self.controller.update(req, "disable-log-reason", body=body) self.assertEqual(res_dict['service']['status'], 'disabled') self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason') def test_mandatory_reason_field(self): self.ext_mgr.extensions['os-extended-services'] = True req = \ fakes.HTTPRequest.blank('/v2/fake/os-services/disable-log-reason') body = {'host': 'host1', 'binary': 'nova-compute', } self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, "disable-log-reason", body=body) def test_invalid_reason_field(self): self.ext_mgr.extensions['os-extended-services'] = True url = '/v2/fake/os-services/disable-log-reason' req = fakes.HTTPRequest.blank(url) reason = 'a' * 256 body = {'host': 'host1', 'binary': 'nova-compute', 'disabled_reason': reason, } self.assertRaises(self.bad_request, self.controller.update, req, "disable-log-reason", body=body) def test_services_delete(self): self.ext_mgr.extensions['os-extended-services-delete'] = True request = fakes.HTTPRequest.blank('/v2/fakes/os-services/1', use_admin_context=True) request.method = 'DELETE' with mock.patch.object(self.controller.host_api, 'service_delete') as service_delete: self.controller.delete(request, '1') service_delete.assert_called_once_with( request.environ['nova.context'], '1') self.assertEqual(self.controller.delete.wsgi_code, 204) def test_services_delete_not_found(self): self.ext_mgr.extensions['os-extended-services-delete'] = True request = fakes.HTTPRequest.blank('/v2/fakes/os-services/abc', use_admin_context=True) request.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, 'abc') # This test is just to verify that the servicegroup API gets used when # calling the API def test_services_with_exception(self): def dummy_is_up(self, dummy): raise KeyError() self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up) req = FakeRequestWithHostService() self.assertRaises(self.service_is_up_exc, self.controller.index, req) class ServicesTestV20(ServicesTestV21): service_is_up_exc = KeyError bad_request = webob.exc.HTTPBadRequest def _set_up_controller(self): self.controller = services_v2.ServiceController(self.ext_mgr) def test_services_delete_not_enabled(self): request = fakes.HTTPRequest.blank('v2/fakes/os-services/300', use_admin_context=True) request.method = 'DELETE' self.assertRaises(webob.exc.HTTPMethodNotAllowed, self.controller.delete, request, '300') def _process_output(self, services, has_disabled=False, has_id=False): for service in services['services']: if not has_disabled: service.pop('disabled_reason') if not has_id: service.pop('id') return services class ServicesCellsTestV21(test.TestCase): def setUp(self): super(ServicesCellsTestV21, self).setUp() host_api = cells_api.HostAPI() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self._set_up_controller() self.controller.host_api = host_api self.stubs.Set(timeutils, "utcnow", fake_utcnow) self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts) services_list = [] for service in fake_services_list: service = service.copy() service['id'] = 'cell1@%d' % service['id'] services_list.append(service) self.stubs.Set(host_api.cells_rpcapi, "service_get_all", fake_service_get_all(services_list)) def _set_up_controller(self): self.controller = services_v21.ServiceController() def _process_out(self, res_dict): for res in res_dict['services']: res.pop('disabled_reason') def test_services_detail(self): self.ext_mgr.extensions['os-extended-services-delete'] = True req = FakeRequest() res_dict = self.controller.index(req) utc = iso8601.iso8601.Utc() response = {'services': [ {'id': 'cell1@1', 'binary': 'nova-scheduler', 'host': 'host1', 'zone': 'internal', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2, tzinfo=utc)}, {'id': 'cell1@2', 'binary': 'nova-compute', 'host': 'host1', 'zone': 'nova', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5, tzinfo=utc)}, {'id': 'cell1@3', 'binary': 'nova-scheduler', 'host': 'host2', 'zone': 'internal', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34, tzinfo=utc)}, {'id': 'cell1@4', 'binary': 'nova-compute', 'host': 'host2', 'zone': 'nova', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38, tzinfo=utc)}]} self._process_out(res_dict) self.assertEqual(res_dict, response) class ServicesCellsTestV20(ServicesCellsTestV21): def _set_up_controller(self): self.controller = services_v2.ServiceController(self.ext_mgr) def _process_out(self, res_dict): pass
import numpy as np from nose.tools import assert_equal from sklearn.cross_decomposition import pls_, CCA from sklearn.datasets import load_linnerud from sklearn.utils.testing import (assert_array_almost_equal, assert_array_equal, assert_true, assert_raise_message) def test_pls(): d = load_linnerud() X = d.data Y = d.target # 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A) # =========================================================== # Compare 2 algo.: nipals vs. svd # ------------------------------ pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1]) pls_bynipals.fit(X, Y) pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1]) pls_bysvd.fit(X, Y) # check equalities of loading (up to the sign of the second column) assert_array_almost_equal( pls_bynipals.x_loadings_, pls_bysvd.x_loadings_, decimal=5, err_msg="nipals and svd implementations lead to different x loadings") assert_array_almost_equal( pls_bynipals.y_loadings_, pls_bysvd.y_loadings_, decimal=5, err_msg="nipals and svd implementations lead to different y loadings") # Check PLS properties (with n_components=X.shape[1]) # --------------------------------------------------- plsca = pls_.PLSCanonical(n_components=X.shape[1]) plsca.fit(X, Y) T = plsca.x_scores_ P = plsca.x_loadings_ Wx = plsca.x_weights_ U = plsca.y_scores_ Q = plsca.y_loadings_ Wy = plsca.y_weights_ def check_ortho(M, err_msg): K = np.dot(M.T, M) assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg) # Orthogonality of weights # ~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(Wx, "x weights are not orthogonal") check_ortho(Wy, "y weights are not orthogonal") # Orthogonality of latent scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(T, "x scores are not orthogonal") check_ortho(U, "y scores are not orthogonal") # Check X = TP' and Y = UQ' (with (p == q) components) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # center scale X, Y Xc, Yc, x_mean, y_mean, x_std, y_std = \ pls_._center_scale_xy(X.copy(), Y.copy(), scale=True) assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'") assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'") # Check that rotations on training data lead to scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Xr = plsca.transform(X) assert_array_almost_equal(Xr, plsca.x_scores_, err_msg="rotation on X failed") Xr, Yr = plsca.transform(X, Y) assert_array_almost_equal(Xr, plsca.x_scores_, err_msg="rotation on X failed") assert_array_almost_equal(Yr, plsca.y_scores_, err_msg="rotation on Y failed") # "Non regression test" on canonical PLS # -------------------------------------- # The results were checked against the R-package plspm pls_ca = pls_.PLSCanonical(n_components=X.shape[1]) pls_ca.fit(X, Y) x_weights = np.array( [[-0.61330704, 0.25616119, -0.74715187], [-0.74697144, 0.11930791, 0.65406368], [-0.25668686, -0.95924297, -0.11817271]]) # x_weights_sign_flip holds columns of 1 or -1, depending on sign flip # between R and python x_weights_sign_flip = pls_ca.x_weights_ / x_weights x_rotations = np.array( [[-0.61330704, 0.41591889, -0.62297525], [-0.74697144, 0.31388326, 0.77368233], [-0.25668686, -0.89237972, -0.24121788]]) x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations y_weights = np.array( [[+0.58989127, 0.7890047, 0.1717553], [+0.77134053, -0.61351791, 0.16920272], [-0.23887670, -0.03267062, 0.97050016]]) y_weights_sign_flip = pls_ca.y_weights_ / y_weights y_rotations = np.array( [[+0.58989127, 0.7168115, 0.30665872], [+0.77134053, -0.70791757, 0.19786539], [-0.23887670, -0.00343595, 0.94162826]]) y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations # x_weights = X.dot(x_rotation) # Hence R/python sign flip should be the same in x_weight and x_rotation assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip) # This test that R / python give the same result up to column # sign indeterminacy assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4) assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4) assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip) assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4) assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4) # 2) Regression PLS (PLS2): "Non regression test" # =============================================== # The results were checked against the R-packages plspm, misOmics and pls pls_2 = pls_.PLSRegression(n_components=X.shape[1]) pls_2.fit(X, Y) x_weights = np.array( [[-0.61330704, -0.00443647, 0.78983213], [-0.74697144, -0.32172099, -0.58183269], [-0.25668686, 0.94682413, -0.19399983]]) x_weights_sign_flip = pls_2.x_weights_ / x_weights x_loadings = np.array( [[-0.61470416, -0.24574278, 0.78983213], [-0.65625755, -0.14396183, -0.58183269], [-0.51733059, 1.00609417, -0.19399983]]) x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings y_weights = np.array( [[+0.32456184, 0.29892183, 0.20316322], [+0.42439636, 0.61970543, 0.19320542], [-0.13143144, -0.26348971, -0.17092916]]) y_weights_sign_flip = pls_2.y_weights_ / y_weights y_loadings = np.array( [[+0.32456184, 0.29892183, 0.20316322], [+0.42439636, 0.61970543, 0.19320542], [-0.13143144, -0.26348971, -0.17092916]]) y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings # x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4) assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4) assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4) assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4) assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4) assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4) # 3) Another non-regression test of Canonical PLS on random dataset # ================================================================= # The results were checked against the R-package plspm n = 500 p_noise = 10 q_noise = 5 # 2 latents vars: np.random.seed(11) l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X = np.concatenate( (X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1) Y = np.concatenate( (Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1) np.random.seed(None) pls_ca = pls_.PLSCanonical(n_components=3) pls_ca.fit(X, Y) x_weights = np.array( [[0.65803719, 0.19197924, 0.21769083], [0.7009113, 0.13303969, -0.15376699], [0.13528197, -0.68636408, 0.13856546], [0.16854574, -0.66788088, -0.12485304], [-0.03232333, -0.04189855, 0.40690153], [0.1148816, -0.09643158, 0.1613305], [0.04792138, -0.02384992, 0.17175319], [-0.06781, -0.01666137, -0.18556747], [-0.00266945, -0.00160224, 0.11893098], [-0.00849528, -0.07706095, 0.1570547], [-0.00949471, -0.02964127, 0.34657036], [-0.03572177, 0.0945091, 0.3414855], [0.05584937, -0.02028961, -0.57682568], [0.05744254, -0.01482333, -0.17431274]]) x_weights_sign_flip = pls_ca.x_weights_ / x_weights x_loadings = np.array( [[0.65649254, 0.1847647, 0.15270699], [0.67554234, 0.15237508, -0.09182247], [0.19219925, -0.67750975, 0.08673128], [0.2133631, -0.67034809, -0.08835483], [-0.03178912, -0.06668336, 0.43395268], [0.15684588, -0.13350241, 0.20578984], [0.03337736, -0.03807306, 0.09871553], [-0.06199844, 0.01559854, -0.1881785], [0.00406146, -0.00587025, 0.16413253], [-0.00374239, -0.05848466, 0.19140336], [0.00139214, -0.01033161, 0.32239136], [-0.05292828, 0.0953533, 0.31916881], [0.04031924, -0.01961045, -0.65174036], [0.06172484, -0.06597366, -0.1244497]]) x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings y_weights = np.array( [[0.66101097, 0.18672553, 0.22826092], [0.69347861, 0.18463471, -0.23995597], [0.14462724, -0.66504085, 0.17082434], [0.22247955, -0.6932605, -0.09832993], [0.07035859, 0.00714283, 0.67810124], [0.07765351, -0.0105204, -0.44108074], [-0.00917056, 0.04322147, 0.10062478], [-0.01909512, 0.06182718, 0.28830475], [0.01756709, 0.04797666, 0.32225745]]) y_weights_sign_flip = pls_ca.y_weights_ / y_weights y_loadings = np.array( [[0.68568625, 0.1674376, 0.0969508], [0.68782064, 0.20375837, -0.1164448], [0.11712173, -0.68046903, 0.12001505], [0.17860457, -0.6798319, -0.05089681], [0.06265739, -0.0277703, 0.74729584], [0.0914178, 0.00403751, -0.5135078], [-0.02196918, -0.01377169, 0.09564505], [-0.03288952, 0.09039729, 0.31858973], [0.04287624, 0.05254676, 0.27836841]]) y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4) assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4) assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4) assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4) assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4) assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4) # Orthogonality of weights # ~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(pls_ca.x_weights_, "x weights are not orthogonal") check_ortho(pls_ca.y_weights_, "y weights are not orthogonal") # Orthogonality of latent scores # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ check_ortho(pls_ca.x_scores_, "x scores are not orthogonal") check_ortho(pls_ca.y_scores_, "y scores are not orthogonal") def test_PLSSVD(): # Let's check the PLSSVD doesn't return all possible component but just # the specified number d = load_linnerud() X = d.data Y = d.target n_components = 2 for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]: pls = clf(n_components=n_components) pls.fit(X, Y) assert_equal(n_components, pls.y_scores_.shape[1]) def test_univariate_pls_regression(): # Ensure 1d Y is correctly interpreted d = load_linnerud() X = d.data Y = d.target clf = pls_.PLSRegression() # Compare 1d to column vector model1 = clf.fit(X, Y[:, 0]).coef_ model2 = clf.fit(X, Y[:, :1]).coef_ assert_array_almost_equal(model1, model2) def test_predict_transform_copy(): # check that the "copy" keyword works d = load_linnerud() X = d.data Y = d.target clf = pls_.PLSCanonical() X_copy = X.copy() Y_copy = Y.copy() clf.fit(X, Y) # check that results are identical with copy assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False)) assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False)) # check also if passing Y assert_array_almost_equal(clf.transform(X, Y), clf.transform(X.copy(), Y.copy(), copy=False)) # check that copy doesn't destroy # we do want to check exact equality here assert_array_equal(X_copy, X) assert_array_equal(Y_copy, Y) # also check that mean wasn't zero before (to make sure we didn't touch it) assert_true(np.all(X.mean(axis=0) != 0)) def test_scale_and_stability(): # We test scale=True parameter # This allows to check numerical stability over platforms as well d = load_linnerud() X1 = d.data Y1 = d.target # causes X[:, -1].std() to be zero X1[:, -1] = 1.0 # From bug #2821 # Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0 # This test robustness of algorithm when dealing with value close to 0 X2 = np.array([[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [3., 5., 4.]]) Y2 = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]) for (X, Y) in [(X1, Y1), (X2, Y2)]: X_std = X.std(axis=0, ddof=1) X_std[X_std == 0] = 1 Y_std = Y.std(axis=0, ddof=1) Y_std[Y_std == 0] = 1 X_s = (X - X.mean(axis=0)) / X_std Y_s = (Y - Y.mean(axis=0)) / Y_std for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(), pls_.PLSSVD()]: clf.set_params(scale=True) X_score, Y_score = clf.fit_transform(X, Y) clf.set_params(scale=False) X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s) assert_array_almost_equal(X_s_score, X_score) assert_array_almost_equal(Y_s_score, Y_score) # Scaling should be idempotent clf.set_params(scale=True) X_score, Y_score = clf.fit_transform(X_s, Y_s) assert_array_almost_equal(X_s_score, X_score) assert_array_almost_equal(Y_s_score, Y_score) def test_pls_errors(): d = load_linnerud() X = d.data Y = d.target for clf in [pls_.PLSCanonical(), pls_.PLSRegression(), pls_.PLSSVD()]: clf.n_components = 4 assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Grid Dynamics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import contextlib import os from oslo.config import cfg from nova.openstack.common import excutils from nova.openstack.common import fileutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova import utils from nova.virt.disk import api as disk from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import utils as libvirt_utils __imagebackend_opts = [ cfg.StrOpt('libvirt_images_type', default='default', help='VM Images format. Acceptable values are: raw, qcow2, lvm,' ' default. If default is specified,' ' then use_cow_images flag is used instead of this one.'), cfg.StrOpt('libvirt_images_volume_group', default=None, help='LVM Volume Group that is used for VM images, when you' ' specify libvirt_images_type=lvm.'), cfg.BoolOpt('libvirt_sparse_logical_volumes', default=False, help='Create sparse logical volumes (with virtualsize)' ' if this flag is set to True.'), cfg.IntOpt('libvirt_lvm_snapshot_size', default=1000, help='The amount of storage (in megabytes) to allocate for LVM' ' snapshot copy-on-write blocks.'), ] CONF = cfg.CONF CONF.register_opts(__imagebackend_opts) CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache') CONF.import_opt('preallocate_images', 'nova.virt.driver') LOG = logging.getLogger(__name__) class Image(object): __metaclass__ = abc.ABCMeta def __init__(self, source_type, driver_format, is_block_dev=False): """Image initialization. :source_type: block or file :driver_format: raw or qcow2 :is_block_dev: """ self.source_type = source_type self.driver_format = driver_format self.is_block_dev = is_block_dev self.preallocate = False # NOTE(mikal): We need a lock directory which is shared along with # instance files, to cover the scenario where multiple compute nodes # are trying to create a base file at the same time self.lock_path = os.path.join(CONF.instances_path, 'locks') @abc.abstractmethod def create_image(self, prepare_template, base, size, *args, **kwargs): """Create image from template. Contains specific behavior for each image type. :prepare_template: function, that creates template. Should accept `target` argument. :base: Template name :size: Size of created image in bytes """ pass def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, extra_specs): """Get `LibvirtConfigGuestDisk` filled for this image. :disk_dev: Disk bus device name :disk_bus: Disk bus type :device_type: Device type for this image. :cache_mode: Caching mode for this image :extra_specs: Instance type extra specs dict. """ info = vconfig.LibvirtConfigGuestDisk() info.source_type = self.source_type info.source_device = device_type info.target_bus = disk_bus info.target_dev = disk_dev info.driver_cache = cache_mode info.driver_format = self.driver_format driver_name = libvirt_utils.pick_disk_driver_name(self.is_block_dev) info.driver_name = driver_name info.source_path = self.path tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec', 'disk_write_bytes_sec', 'disk_write_iops_sec', 'disk_total_bytes_sec', 'disk_total_iops_sec'] # Note(yaguang): Currently, the only tuning available is Block I/O # throttling for qemu. if self.source_type in ['file', 'block']: for key, value in extra_specs.iteritems(): if key in tune_items: setattr(info, key, value) return info def cache(self, fetch_func, filename, size=None, *args, **kwargs): """Creates image from template. Ensures that template and image not already exists. Ensures that base directory exists. Synchronizes on template fetching. :fetch_func: Function that creates the base image Should accept `target` argument. :filename: Name of the file in the image directory :size: Size of created image in bytes (optional) """ @lockutils.synchronized(filename, 'nova-', external=True, lock_path=self.lock_path) def call_if_not_exists(target, *args, **kwargs): if not os.path.exists(target): fetch_func(target=target, *args, **kwargs) base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name) if not os.path.exists(base_dir): fileutils.ensure_tree(base_dir) base = os.path.join(base_dir, filename) if not os.path.exists(self.path) or not os.path.exists(base): self.create_image(call_if_not_exists, base, size, *args, **kwargs) if size and self.preallocate and self._can_fallocate(): utils.execute('fallocate', '-n', '-l', size, self.path) def _can_fallocate(self): """Check once per class, whether fallocate(1) is available, and that the instances directory supports fallocate(2). """ can_fallocate = getattr(self.__class__, 'can_fallocate', None) if can_fallocate is None: _out, err = utils.trycmd('fallocate', '-n', '-l', '1', self.path + '.fallocate_test') utils.delete_if_exists(self.path + '.fallocate_test') can_fallocate = not err self.__class__.can_fallocate = can_fallocate if not can_fallocate: LOG.error('Unable to preallocate_images=%s at path: %s' % (CONF.preallocate_images, self.path)) return can_fallocate def snapshot_create(self): raise NotImplementedError def snapshot_extract(self, target, out_format): raise NotImplementedError def snapshot_delete(self): raise NotImplementedError class Raw(Image): def __init__(self, instance=None, disk_name=None, path=None, snapshot_name=None): super(Raw, self).__init__("file", "raw", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.snapshot_name = snapshot_name self.preallocate = CONF.preallocate_images != 'none' def create_image(self, prepare_template, base, size, *args, **kwargs): @lockutils.synchronized(base, 'nova-', external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) if size: disk.extend(target, size) generating = 'image_id' not in kwargs if generating: #Generating image in place prepare_template(target=self.path, *args, **kwargs) else: prepare_template(target=base, *args, **kwargs) if not os.path.exists(self.path): with utils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) def snapshot_create(self): pass def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, out_format) def snapshot_delete(self): pass class Qcow2(Image): def __init__(self, instance=None, disk_name=None, path=None, snapshot_name=None): super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.snapshot_name = snapshot_name self.preallocate = CONF.preallocate_images != 'none' def create_image(self, prepare_template, base, size, *args, **kwargs): @lockutils.synchronized(base, 'nova-', external=True, lock_path=self.lock_path) def copy_qcow2_image(base, target, size): # TODO(pbrady): Consider copying the cow image here # with preallocation=metadata set for performance reasons. # This would be keyed on a 'preallocate_images' setting. libvirt_utils.create_cow_image(base, target) if size: disk.extend(target, size) if not os.path.exists(base): prepare_template(target=base, *args, **kwargs) if not os.path.exists(self.path): with utils.remove_path_on_error(self.path): copy_qcow2_image(base, self.path, size) def snapshot_create(self): libvirt_utils.create_snapshot(self.path, self.snapshot_name) def snapshot_extract(self, target, out_format): libvirt_utils.extract_snapshot(self.path, 'qcow2', self.snapshot_name, target, out_format) def snapshot_delete(self): libvirt_utils.delete_snapshot(self.path, self.snapshot_name) class Lvm(Image): @staticmethod def escape(filename): return filename.replace('_', '__') def __init__(self, instance=None, disk_name=None, path=None, snapshot_name=None): super(Lvm, self).__init__("block", "raw", is_block_dev=True) if path: info = libvirt_utils.logical_volume_info(path) self.vg = info['VG'] self.lv = info['LV'] self.path = path else: if not CONF.libvirt_images_volume_group: raise RuntimeError(_('You should specify' ' libvirt_images_volume_group' ' flag to use LVM images.')) self.vg = CONF.libvirt_images_volume_group self.lv = '%s_%s' % (self.escape(instance['name']), self.escape(disk_name)) self.path = os.path.join('/dev', self.vg, self.lv) # TODO(pbrady): possibly deprecate libvirt_sparse_logical_volumes # for the more general preallocate_images self.sparse = CONF.libvirt_sparse_logical_volumes self.preallocate = not self.sparse if snapshot_name: self.snapshot_name = snapshot_name self.snapshot_path = os.path.join('/dev', self.vg, self.snapshot_name) def _can_fallocate(self): return False def create_image(self, prepare_template, base, size, *args, **kwargs): @lockutils.synchronized(base, 'nova-', external=True, lock_path=self.lock_path) def create_lvm_image(base, size): base_size = disk.get_disk_size(base) resize = size > base_size size = size if resize else base_size libvirt_utils.create_lvm_image(self.vg, self.lv, size, sparse=self.sparse) images.convert_image(base, self.path, 'raw', run_as_root=True) if resize: disk.resize2fs(self.path, run_as_root=True) generated = 'ephemeral_size' in kwargs #Generate images with specified size right on volume if generated and size: libvirt_utils.create_lvm_image(self.vg, self.lv, size, sparse=self.sparse) with self.remove_volume_on_error(self.path): prepare_template(target=self.path, *args, **kwargs) else: prepare_template(target=base, *args, **kwargs) with self.remove_volume_on_error(self.path): create_lvm_image(base, size) @contextlib.contextmanager def remove_volume_on_error(self, path): try: yield except Exception: with excutils.save_and_reraise_exception(): libvirt_utils.remove_logical_volumes(path) def snapshot_create(self): size = CONF.libvirt_lvm_snapshot_size cmd = ('lvcreate', '-L', size, '-s', '--name', self.snapshot_name, self.path) libvirt_utils.execute(*cmd, run_as_root=True, attempts=3) def snapshot_extract(self, target, out_format): images.convert_image(self.snapshot_path, target, out_format, run_as_root=True) def snapshot_delete(self): # NOTE (rmk): Snapshot volumes are automatically zeroed by LVM cmd = ('lvremove', '-f', self.snapshot_path) libvirt_utils.execute(*cmd, run_as_root=True, attempts=3) class Backend(object): def __init__(self, use_cow): self.BACKEND = { 'raw': Raw, 'qcow2': Qcow2, 'lvm': Lvm, 'default': Qcow2 if use_cow else Raw } def backend(self, image_type=None): if not image_type: image_type = CONF.libvirt_images_type image = self.BACKEND.get(image_type) if not image: raise RuntimeError(_('Unknown image_type=%s') % image_type) return image def image(self, instance, disk_name, image_type=None): """Constructs image for selected backend :instance: Instance name. :name: Image name. :image_type: Image type. Optional, is CONF.libvirt_images_type by default. """ backend = self.backend(image_type) return backend(instance=instance, disk_name=disk_name) def snapshot(self, disk_path, snapshot_name, image_type=None): """Returns snapshot for given image :path: path to image :snapshot_name: snapshot name :image_type: type of image """ backend = self.backend(image_type) return backend(path=disk_path, snapshot_name=snapshot_name)
from importlib import import_module from itertools import islice from time import sleep from selenium.common.exceptions import StaleElementReferenceException import nerodia from nerodia.exception import LocatorException from nerodia.js_snippet import JSSnippet from .locators.class_helpers import ClassHelpers class ElementCollection(ClassHelpers, JSSnippet): _selector_builder = None _element_matcher = None _locator = None def __init__(self, query_scope, selector): self.query_scope = query_scope self.selector = selector self.generator = () self._els = [] if 'element' not in self.selector: self.build() def __iter__(self): """ Yields each element in collection :rtype: iter :Example: divs = browser.divs(class='kls') for div in divs: print(div.text) """ from .elements.html_elements import HTMLElement from .elements.input import Input dic = {} for idx, (el, tag_name) in enumerate(self._elements_with_tags): selector = self.selector.copy() if idx != 0: selector['index'] = idx selector = dict(self.selector, index=idx) element = self._element_class(self.query_scope, selector) if element.__class__ in [HTMLElement, Input]: element = self._construct_subtype(element, dic, tag_name) element.cache = el yield element def __len__(self): """ Returns the number of elements in the collection :rtype: int """ self._els = self._els or [_ for _ in self] return len(self._els) def __getitem__(self, idx): """ Get the element at the given index or slice Any call to an ElementCollection that includes an adjacent selector can not be lazy loaded because it must store correct type Slices can only be lazy loaded if the indices are positive :param idx: index of wanted element, 0-indexed :type idx: int :return: instance of Element subclass :rtype: nerodia.elements.element.Element """ if isinstance(idx, slice): if idx.start and idx.start < 0 or idx.stop and idx.stop < 0: return list(self)[idx.start:idx.stop] else: return list(islice(self, idx.start, idx.stop, idx.step)) elif 'adjacent' in self.selector: try: return list(islice(self, idx + 1))[idx] except IndexError: return self._element_class(self.query_scope, {'invalid_locator': True}) elif len(self._els) > 0: try: return self._els[idx] except IndexError: pass return self._element_class(self.query_scope, dict(self.selector, index=idx)) @property def is_empty(self): """ Returns True if no elements are found :Example: browser.select_list(name='new_user_languages').options(class_name='not_here').is_empty :Example: browser.select_list(name='new_user_languages').options(id='danish').is_empty :return: True if no elements are found :rtype: bool """ return len(self) == 0 def build(self): self.selector_builder.build(self.selector.copy()) @property def to_list(self): """ This collection as a list :rtype: list[nerodia.elements.element.Element] """ nerodia.logger.deprecate('ElementCollection.to_list', 'list(self)') return list(self) def locate(self): """ Locate all elements and return self :rtype: ElementCollection """ self.els = list(self) return self @property def browser(self): """ Returns the browser of the current query_scope :rtype: nerodia.browser.Browser """ return self.query_scope.browser def __eq__(self, other): """ Returns true if two element collections are equal. :param other: other collection :rtype: bool :Example: browser.select_list(name='new_user_languages').options == \ browser.select_list(id='new_user_languages').options #=> True browser.select_list(name=;new_user_role').options == \ browser.select_list(id='new_user_languages').options #=> false """ return list(self) == list(other) eql = __eq__ # private @property def _elements(self): self._ensure_context() if 'scope' in self.selector_builder.built: return self.query_scope._element_call(lambda: self._locate_all()) else: return self._locate_all() @property def _elements_with_tags(self): els = self._elements if 'tag_name' in self.selector: return [(e, self.selector['tag_name']) for e in els] else: retries = 0 while retries <= 2: try: return zip(els, self._execute_js('getElementTags', els)) except StaleElementReferenceException: retries += 1 sleep(0.5) pass raise LocatorException('Unable to locate element collection from {} due to changing ' 'page'.format(self.selector)) def _ensure_context(self): from nerodia.elements.i_frame import IFrame from nerodia.browser import Browser if isinstance(self.query_scope, Browser) or \ (self.query_scope._located and self.query_scope.stale): self.query_scope.locate() if isinstance(self.query_scope, IFrame): self.query_scope.switch_to() def _locate_all(self): return self.locator.locate_all(self.selector_builder.built) @property def _element_class(self): from .elements.svg_elements import SVGElementCollection from .elements.html_elements import HTMLElementCollection from .module_mapping import map_module name = self.__class__.__name__.replace('Collection', '') element_module = map_module(name) try: module = import_module('nerodia.elements.{}'.format(element_module)) except ImportError: if isinstance(self, HTMLElementCollection): module = import_module('nerodia.elements.html_elements') elif isinstance(self, SVGElementCollection): module = import_module('nerodia.elements.svg_elements') else: raise TypeError( 'element class for {} could not be determined'.format(name)) return getattr(module, name) def _construct_subtype(self, element, dic, tag_name): selector = element.selector dic[tag_name] = dic.get(tag_name, 0) dic[tag_name] += 1 kls = nerodia.element_class_for(tag_name) selector.update({'index': dic[tag_name] - 1, 'tag_name': tag_name}) return kls(self.query_scope, selector)
# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import re from oslo_log import log as logging from tempest.lib.common.utils import data_utils from tempest import config from tempest.scenario import manager from tempest import test from tempest.lib import exceptions CONF = config.CONF LOG = logging.getLogger(__name__) Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple', ['floating_ip', 'server']) ICMP_HEADER_LEN = 8 class TestNetworkMultiNode(manager.NetworkScenarioTest): """ The Neutron ML2 driver will create a VLAN, when configured for VLAN, on the underlying network element when the first virtual machine (VM) is attached to a network/compute host. Conversely, the ML2 driver will delete the VLAN associated with a network/compute host when the last VM is removed from that network/compute host. This test is designed for a multi-node OpenStack deployment with the goal of creating the maximum number of network create and delete events given the available resources. The test does the following: * Creates Aggregates/Zones with a one to one mapping of compute host to zone. This allows the test to place a VM on a particular compute host. * Create networks based on the number of VMs and compute hosts. * Create VMs and distribute them on networks. * Selects one of the VMs as a ping source to send ping packets to each of the other (east/west) VMs using both the floating IP and the fixed IP address. The number of ping packets and the size of the ping packets are controlled by the following tempest configuration variables: - test_packet_count: The number of packets to send for each packet size - test_packet_sizes: A list of packet sizes used during testing The packet and byte counts are verified for each ping test sequence. * The VMs are then deleted, not as part of test cleanup but to allow the network delete events from the ML2 driver to be captured and verified. """ credentials = ['primary', 'admin'] @classmethod def resource_setup(cls): # Create no network resources for these tests. cls.set_network_resources() super(TestNetworkMultiNode, cls).resource_setup() @classmethod def skip_checks(cls): super(TestNetworkMultiNode, cls).skip_checks() if not (CONF.network.project_networks_reachable or CONF.network.public_network_id): msg = ('Either project_networks_reachable must be "true", or ' 'public_network_id must be defined.') cls.enabled = False raise exceptions.InvalidConfiguration(msg) for ext in ['router', 'security-group']: if not test.is_extension_enabled(ext, 'network'): msg = "%s extension not enabled." % ext raise exceptions.InvalidConfiguration(msg) @classmethod def setup_credentials(cls): # Create no network resources for these tests. cls.set_network_resources() super(TestNetworkMultiNode, cls).setup_credentials() # Use admin client by default cls.manager = cls.admin_manager def _delete_aggregate(self, aggregate): self.aggregates_client.delete_aggregate(aggregate_id=aggregate['id']) def _add_host(self, aggregate_id, host): host_args = { 'host': host } aggregate_resp = self.aggregates_client.add_host( aggregate_id=aggregate_id, **host_args) aggregate = aggregate_resp['aggregate'] self.addCleanup(self._remove_host, aggregate['id'], host) self.assertIn(host, aggregate['hosts']) def _remove_host(self, aggregate_id, host): host_args = { 'host': host } aggregate_resp = self.aggregates_client.remove_host( aggregate_id=aggregate_id, **host_args) aggregate = aggregate_resp['aggregate'] self.assertNotIn(host, aggregate['hosts']) def _create_server(self, name, network, zone=None, image=None): create_kwargs = self.srv_kwargs create_kwargs['networks'] = [{'uuid': network.id}] if zone is not None: create_kwargs['availability_zone'] = zone server = self.create_server(name=name, wait_until='ACTIVE', image=image, **create_kwargs) return dict(server=server, keypair=self.keypair) def setup_aggregates(self): """ Setup Aggregates/Zones - one compute host per zone so that the test can control which compute host the VMs land on. """ self.aggregates_client = self.manager.aggregates_client self.hypervisor_client = self.manager.hypervisor_client hypervisors_resp = self.hypervisor_client.list_hypervisors() self.hypervisors_list = hypervisors_resp['hypervisors'] # Verify the hypervisors are operational and make a list # of them for later use self.hypervisors = [] self.aggregates = [] i = 0 for hypervisor in self.hypervisors_list: if hypervisor['status'] == 'enabled': if hypervisor['state'] == 'up': self.hypervisors.append(hypervisor) # Create an aggregate/zone per hypervisor host name = data_utils.rand_name('Agg') aggregate_kwargs = { 'name': '{0}'.format(name), 'availability_zone': '{0}-Zone{1}'.format(name, i) } i += 1 aggregate_resp = self.aggregates_client.create_aggregate( **aggregate_kwargs) aggregate = aggregate_resp['aggregate'] self.addCleanup(self._delete_aggregate, aggregate) self.aggregates.append(aggregate) self._add_host(aggregate['id'], hypervisor['hypervisor_hostname']) def setUp(self): super(TestNetworkMultiNode, self).setUp() self.keypair = self.create_keypair() self.floating_ip_tuples = [] self.linux_client = None self.private_key = None self.servers = {} self.srv_kwargs = {'key_name': self.keypair['name']} self.tenant_id = self.manager.identity_client.tenant_id self.total_expected_pkts = 0 self.total_expected_bytes = 0 self.segmentation_ids = [] self.number_instances_per_compute = 1 self.number_routers_per_tenant = 1 self.network_vms = {} self.routers = [] # Classes that inherit this class can redefine packet size/count # based on their own needs or accept the default in the CONF if not hasattr(self, 'test_packet_sizes'): self.test_packet_sizes = map(int, CONF.scenario.test_packet_sizes) if not hasattr(self, 'test_packet_count'): self.test_packet_count = CONF.scenario.test_packet_count if not hasattr(self, 'max_instances_per_tenant'): self.max_instances_per_tenant = ( CONF.scenario.max_instances_per_tenant) # Allows the ability to place VMs on specific compute nodes self.setup_aggregates() self.num_networks = int(self.max_instances_per_tenant / len(self.hypervisors)) # If user specified max_instances_per_tenant less than # number of hypervisors availabe then result is zero # give at least one. if self.num_networks == 0: self.num_networks = 1 LOG.debug("Max instances per tenant = {0}". format(self.max_instances_per_tenant)) LOG.debug("Number of instances per Network/compute = {0}". format(self.number_instances_per_compute)) LOG.debug("Number of Networks = {0}".format(self.num_networks)) self.security_group = self._create_security_group( tenant_id=self.tenant_id) my_security_groups = [{'name': self.security_group['name']}] self.srv_kwargs['security_groups'] = my_security_groups try: self._create_loginable_secgroup_rule(secgroup=self.security_group) except Exception as e: LOG.debug("Login sec group already exists: {0}".format(e)) self.setup_networks() self.setup_vms() def add_network(self, client=None, tenant_id=None, router=None, vlan_transparent=False): if CONF.baremetal.driver_enabled: network = self._get_network_by_name( CONF.compute.fixed_network_name) router = None subnet = None else: if CONF.network_feature_enabled.vlan_transparent: network = self._create_network(client=client, tenant_id=tenant_id, vlan_transparent=True) else: network = self._create_network(client=client, tenant_id=tenant_id) if router is None: router = self._get_router(client=client, tenant_id=tenant_id) subnet = self._create_subnet(network=network, client=client) subnet.add_to_router(router.id) return network, subnet, router def setup_networks(self): self.networks = [] router = None for i in range(0, self.num_networks): if i % (self.num_networks / self.number_routers_per_tenant) is 0: if router is not None: self.routers.append(router) router = None self.network, self.subnet, router = self.add_network( tenant_id=self.tenant_id, router=router) if len(self.routers) == 0: self.routers.append(router) self.networks.append(self.network) segmentation_id = self.network['provider:segmentation_id'] self.segmentation_ids.append(segmentation_id) def setup_vms(self, image=None): # Create a VM on a each hypervisor per network for network in self.networks: for aggregate in self.aggregates: name = data_utils.rand_name('server') for i in range(0, 2): try: if CONF.scenario.use_host_aggregates and \ CONF.scenario.use_host_aggregates is True: server_dict = \ self._create_server(name, network, zone=aggregate['availability_zone'], image=image) else: server_dict = self._create_server(name, network, image=image) except Exception as e: LOG.debug("Exception {0}".format(e)) LOG.debug("Failed to bring up server") LOG.debug("Retrying") continue break id = server_dict['server']['id'] self.assertIsNotNone(server_dict) self.servers[id] = server_dict['keypair'] if network.id in self.network_vms: self.network_vms[network.id].append(id) else: self.network_vms[network.id] = [id] # Safety net for max_instances_per_tenant if len(self.servers) == self.max_instances_per_tenant: return def delete_vms(self): """ This method is not designed for clean up at the end of the test. Some tests will need to verify that network delete events occur when the VMs are deleted. :return: """ for server in self.servers.keys(): LOG.debug("Deleting server {0}".format(server)) self.servers_client.delete_server(server) del self.servers[server] def verify_network_create_events(self): """ Implement in network element specific test class """ pass def verify_network_delete_events(self): """ Implement in network element specific test class """ pass def verify_network_element_ready(self): """ Implement in network element specific test class """ pass def verify_network_element_traffic_flows(self): """ Implement in network element specific test class """ pass def _ping_east_west(self, linux_client, target_ip, count=CONF.validation.ping_count, size=CONF.validation.ping_size): """ From a remote linux host ping an IP address and return a data structure containing the results. :param linux_client: A remote_client object :param target_ip: The IP Address to ping from the remote client :param count: How many pings :param size: The packet size for each ping :return: A dictionary with received pkts/byts, summary, round-trip data """ ping_data = {} bytes_rx = 0 pkts_rx = 0 # RegEx for data mining the ping results. pings = re.compile(r""" ^(\d+)\sbytes\sfrom\s # Store num bytes ([\d\.]+):\s # Store the IP address (icmp_)?seq=(\d+)\s # Account for Cirros diff and # store seq num ttl=(\d+)\s # Store ttl time=([\d\.]+)\sms # Store time """, re.VERBOSE | re.IGNORECASE) pings_summary = re.compile(r""" ^(\d+) # Store num transmitted \spackets\stransmitted,\s # Common to all (\d+)\s # Store num received (packets[ ])?received,\s # Cirros is different (\d+)[%]\spacket\sloss # Store pkt loss ([, ]+time[ ](\d+)ms)? # Cirros is different """, re.VERBOSE | re.IGNORECASE) round_trip = re.compile(r""" ^(rtt|round-trip)\s min/avg/max(/mdev)?\s=\s ([\d\.]+)[/] # Store min time ([\d\.]+)[/] # Store avg time ([\d\.]+) # Store max time .*""", re.VERBOSE | re.IGNORECASE) ping_result = None for x in range(0, 3): try: if CONF.scenario.advanced_vm_capabilities: ping_result = linux_client.ping_host( target_ip, count=count, size=size, interval=.2).splitlines() else: ping_result = linux_client.ping_host( target_ip, count=count, size=size).splitlines() break except exceptions.SSHExecCommandFailed: LOG.debug("SSHExecCommandFailed - retrying") except Exception: LOG.debug("Catch all - retrying") self.assertIsNotNone(ping_result, "SSHExecCommandFailed - ping failed") if ping_result is not None and len(ping_result) >= count: for line in ping_result: m = pings.match(line) if m is not None: bytes_rx += int(m.group(1)) pkts_rx += 1 continue m = pings_summary.match(line) if m is not None: ping_data['summary'] = {'pkts_tx': int(m.group(1)), 'pkts_rx': int(m.group(2)), 'loss': int(m.group(4))} continue m = round_trip.match(line) if m is not None: ping_data['round-trip'] = {'min': float(m.group(3)), 'ave': float(m.group(4)), 'max': float(m.group(5))} continue ping_data['data-received'] = {'packets': pkts_rx, 'bytes': bytes_rx} return ping_data def setup_linux_client(self): fip_tuple = self.floating_ip_tuples[0] self.linux_client_ip, server = fip_tuple self.private_key = self.servers[server['id']]['private_key'] self.linux_client = self.get_remote_client( ip_address=self.linux_client_ip. floating_ip_address, private_key=self.private_key) super(TestNetworkMultiNode, self).check_vm_connectivity( self.linux_client_ip.floating_ip_address, username=CONF.validation.image_ssh_user, private_key=self.private_key, should_connect=True) def ping_target_ip(self, linux_client, source_ip, target_ip, pkt_size=CONF.validation.ping_size): LOG.debug("Ping from {0} to {1}".format(source_ip, target_ip)) LOG.debug("Testing with packet size {0}".format(pkt_size)) ping_result = self._ping_east_west(linux_client, target_ip, count=self.test_packet_count, size=pkt_size) self.assertIsNotNone(ping_result, "Ping from {0} to {1} failed". format(source_ip, target_ip)) msg = "Ping result indicates packet loss from {0} to {1}".format( source_ip, target_ip) self.assertEqual(0, ping_result['summary']['loss'], msg) # Calculate expected pkts/bytes self.total_expected_pkts += self.test_packet_count self.total_expected_bytes += self.test_packet_count * (pkt_size + ICMP_HEADER_LEN) # Store actual pkts/bytes used later for test self.total_actual_pkts += int(ping_result['data-received']['packets']) self.total_actual_bytes += int(ping_result['data-received']['bytes']) def verify_vm_to_vm_connectivity(self): """ Selects one of the VMs created and uses it as a ping source to ping all other VMs. :return: """ self.assertTrue(len(self.servers) >= 2, "Not enough servers to check VM to VM connectivity") self.total_actual_pkts = 0 self.total_actual_bytes = 0 self.total_expected_pkts = 0 self.total_expected_bytes = 0 if self.linux_client is None: self.setup_linux_client() # Cycle through the VMs pinging each one from the testing VM # First use floating IPs and fixed IPs if self.floating_ip_tuples is not None: for i in range(1, len(self.floating_ip_tuples)): fip_tuple = self.floating_ip_tuples[i] target_ip, server = fip_tuple for pkt_size in self.test_packet_sizes: self.ping_target_ip(self.linux_client, self.linux_client_ip. floating_ip_address, target_ip.floating_ip_address, pkt_size) self.ping_target_ip(self.linux_client, self.linux_client_ip. floating_ip_address, target_ip.fixed_ip_address, pkt_size) LOG.debug("Received {0} Packets " "containing {1} bytes".format(self.total_actual_pkts, self.total_actual_bytes)) LOG.debug("Expected {0} Packets " "containing {1} bytes".format(self.total_expected_pkts, self.total_expected_bytes)) self.assertEqual(self.total_expected_pkts, self.total_actual_pkts, "Total packets received failed") self.assertEqual(self.total_expected_bytes, self.total_actual_bytes, "Total bytes received failed") def create_floating_ips(self): for server_id in self.servers.keys(): server = {'id': server_id, 'tenant_id': self.tenant_id} floating_ip = self.create_floating_ip(server) self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server) self.floating_ip_tuples.append(self.floating_ip_tuple) def delete_floating_ips(self): if self.floating_ip_tuples is not None: for i in range(0, len(self.floating_ip_tuples)): fip_tuple = self.floating_ip_tuples.pop() floating_ip, server = fip_tuple self._disassociate_floating_ip(floating_ip) def verify_vm_connectivity(self): if self.floating_ip_tuples is not None: for i in range(1, len(self.floating_ip_tuples)): fip_tuple = self.floating_ip_tuples[i] target_ip, server = fip_tuple msg = "Timeout waiting for %s" % target_ip.floating_ip_address self.assertTrue(self. ping_ip_address(target_ip.floating_ip_address, should_succeed=True), msg=msg) @test.idempotent_id('094f246d-9800-4c79-b249-361dab5d5a0f') @test.services('compute', 'network') def test_network_multi_node(self): self.verify_network_create_events() self.create_floating_ips() self.verify_vm_connectivity() self.verify_network_element_ready() self.verify_vm_to_vm_connectivity() self.verify_network_element_traffic_flows() self.delete_vms() self.verify_network_delete_events()
# coding=utf-8 # Copyright 2022 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=line-too-long r"""Launch a script in parallel on GCP. For each instance (`--num_instances`), the script will copy the code in `--code_dir` to the instance, run `--setup_command` and then run `--command_prefix` joined with the task's id or a line in `--per_instance_suffix_file`. Note that the machines will attempt to down themselves on completion or failure. If they do not, you can delete them manually or use delete_instances.sh to delete many at once. Example usage: ``` BUCKET=gs://my-bucket python parallel_launch.py \ --num_instances=1000 \ --cpu=4 --mem=4 \ --name=wikisum-refs-web \ --code_dir=./ \ --log_dir=$BUCKET/refs_logs \ --setup_command="pip3 install aiohttp cchardet aiodns bs4 -q --user" \ --command_prefix="python3 wikisum/get_references_web.py --out_dir=$BUCKET/wiki_references --shard_id" ``` """ # pylint: enable=line-too-long from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import multiprocessing as mp import os import socket import subprocess as sp import time from tensor2tensor.utils import cloud_mlengine as cloud import tensorflow.compat.v1 as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_integer("num_instances", None, "Number of instances to launch.") flags.DEFINE_string("name", None, "Instance name prefix.") flags.DEFINE_string("log_dir", None, "GCS bucket to copy logs out to.") flags.DEFINE_string("code_dir", None, "Directory to copy.") flags.DEFINE_string("setup_command", None, "Setup command to run.") flags.DEFINE_string("command_prefix", None, "Command to run, prefix.") flags.DEFINE_string("per_instance_suffix_file", None, "Command to run, suffix per instance. If None, suffix will " "be instance id.") flags.DEFINE_integer("cpu", 1, "Number of CPUs per instance.") flags.DEFINE_integer("mem", 4, "Memory in GB per instance.") flags.DEFINE_integer("num_threads", 48, "Number of threads to use to spin up jobs.") flags.DEFINE_bool("debug_keep_up", False, "If True, will keep the machine up. num_instances must be 1.") flags.DEFINE_string("instance_ids", None, "Comma-separated list of integer instance ids to launch. " "Useful if some failed on a previous run and you only want " "to rerun specific tasks.") DELETE = "gcloud compute instances delete {name}" DELETE_SELF = ("gcloud compute instances delete $(hostname) --quiet " "--zone={zone}") CREATE_INSTANCE = ("gcloud compute instances create {instance_name} " "--custom-cpu {cpu} --custom-memory {mem} " "--custom-extensions " "--image-project=ml-images --image-family=tf-1-7 " "--scopes=cloud-platform") COPY_CODE = "gcloud compute scp --recurse {local_dir} {instance_name}:~/" SSH = "gcloud compute ssh {instance_name} --command" SCREEN = "screen -dmS test bash -c \"{command}\"" DEFAULT_ZONE = "gcloud config get-value compute/zone" LOGS = "> ~/logs-{task_id}.txt 2>&1; gsutil cp ~/logs-{task_id}.txt {bucket}" def remote_run(cmd, instance_name, detach=False, retries=1): """Run command on GCS instance, optionally detached.""" if detach: cmd = SCREEN.format(command=cmd) args = SSH.format(instance_name=instance_name).split() args.append(cmd) for i in range(retries + 1): try: if i > 0: tf.logging.info("Retry %d for %s", i, args) return sp.check_call(args) except sp.CalledProcessError as e: if i == retries: raise e def default_zone(): return cloud.shell_output(DEFAULT_ZONE).strip() @contextlib.contextmanager def safe_socket(timeout=2): s = socket.socket() s.settimeout(timeout) try: yield s finally: s.close() def wait_for_ssh(ip): """Wait for SSH to be available at given IP address.""" for _ in range(12): with safe_socket() as s: try: s.connect((ip, 22)) return True except socket.timeout: pass time.sleep(10) return False def create_instance(instance_name, cpu=1, mem=4): tf.logging.info("Creating instance %s", instance_name) out = cloud.shell_output(CREATE_INSTANCE, instance_name=instance_name, cpu=cpu, mem=mem) return out.split("\n")[1:-1][0].split()[8] def list_vm_names_and_ips(): list_out = cloud.shell_output(cloud.LIST_VM) lines = [l.split() for l in list_out.split("\n")[1:-1]] names_and_ips = [(l[0].strip(), l[-2].strip()) for l in lines] return names_and_ips def shell_run_with_retry(cmd, retries=1, **kwargs): for i in range(retries + 1): try: if i > 0: tf.logging.info("Retry %d for %s", i, cmd) cloud.shell_run(cmd, **kwargs) return except sp.CalledProcessError as e: if i == retries: raise e def delete_instance(instance_name): cloud.shell_run(DELETE, name=instance_name) def launch_instance(instance_name, command, existing_ip=None, cpu=1, mem=4, code_dir=None, setup_command=None): """Launch a GCE instance.""" # Create instance ip = existing_ip or create_instance(instance_name, cpu=cpu, mem=mem) tf.logging.info("Waiting for SSH %s", instance_name) ready = wait_for_ssh(ip) if not ready: raise ValueError("Instance %s never ready for SSH" % instance_name) # Copy code if code_dir: shell_run_with_retry(COPY_CODE, retries=2, local_dir=code_dir, instance_name=instance_name) # Run setup if setup_command: tf.logging.info("Running setup on %s", instance_name) remote_run(setup_command, instance_name) # Run command tf.logging.info("Running command on %s", instance_name) remote_run(command, instance_name, detach=True) def main(_): assert FLAGS.num_instances assert FLAGS.name zone = default_zone() assert zone code_dir = None if FLAGS.code_dir: code_dir = os.path.abspath(os.path.expanduser(FLAGS.code_dir)) # Suffixes per instance if FLAGS.per_instance_suffix_file: with tf.gfile.Open(FLAGS.per_instance_suffix_file) as f: suffixes = [l.strip() for l in f.readlines()] else: suffixes = list(range(FLAGS.num_instances)) assert len(suffixes) == FLAGS.num_instances vm_info = list_vm_names_and_ips() vm_names = list(zip(*vm_info))[0] if vm_info else [] pool = mp.Pool(FLAGS.num_threads) async_results = [] assert FLAGS.log_dir log_dir = os.path.join(FLAGS.log_dir, FLAGS.name) tf.gfile.MakeDirs(log_dir) assert log_dir.startswith("gs://") if not log_dir.endswith("/"): log_dir += "/" # Write a test file to make sure gcloud GCS APIs are enabled test_filename = os.path.join(log_dir, "check_write") with tf.gfile.Open(test_filename, "w") as f: f.write("testing GCS write") tf.gfile.Remove(test_filename) instance_ids = list(range(FLAGS.num_instances)) if FLAGS.instance_ids: instance_ids = [int(i) for i in FLAGS.instance_ids.split(",")] tf.logging.info("Launching %d instances", len(instance_ids)) for i in instance_ids: instance_name = "%s-%d" % (FLAGS.name, i) existing_ip = (vm_info[vm_names.index(instance_name)][1] if instance_name in vm_names else None) logging = LOGS.format(task_id=i, bucket=log_dir) if log_dir else "" delete = DELETE_SELF.format(zone=zone) if FLAGS.debug_keep_up: assert len(instance_ids) == 1 delete = "" command = "{prefix} {suffix} {logging}; {delete}".format( prefix=FLAGS.command_prefix, suffix=suffixes[i], delete=delete, logging=logging) args = (instance_name, command, existing_ip, FLAGS.cpu, FLAGS.mem, code_dir, FLAGS.setup_command) res = pool.apply_async(launch_instance, args) async_results.append((res, instance_name, i)) failed = [] for res, instance_name, i in async_results: try: res.get() except Exception as e: # pylint: disable=broad-except failed.append((instance_name, i)) tf.logging.error("Failed to launch task %s due to exception %s", instance_name, str(e)) results = [] if failed: ids_for_flag = ",".join([str(i) for i in list(zip(*failed))[1]]) tf.logging.error("Failed to launch %d jobs. Tasks: %s. " "Attempting delete in case they are still up. Rerun with " "--instance_ids='%s' to attempt relaunch.", len(failed), str(failed), ids_for_flag) for instance_name, _ in failed: res = pool.apply_async(delete_instance, (instance_name,)) results.append(res) for res in results: try: res.get() except: # pylint: disable=bare-except pass tf.logging.info("Launching complete.") if __name__ == "__main__": tf.logging.set_verbosity(tf.logging.INFO) tf.app.run()
from model.contact import Contact import re class ContactHelper: contact_cache = None def __init__(self, app): self.app = app def change_field_value(self, field_name, text): wd = self.app.wd if text is not None: wd.find_element_by_name(field_name).click() wd.find_element_by_name(field_name).clear() wd.find_element_by_name(field_name).send_keys(text) def change_avatar(self, avatar): wd = self.app.wd if avatar is not None: wd.find_element_by_name("photo").send_keys(avatar) def fill_contact_form(self, Contact): wd = self.app.wd self.change_field_value("firstname", Contact.first_name) self.change_field_value("middlename", Contact.middle_name) self.change_field_value("lastname", Contact.last_name) self.change_field_value("nickname", Contact.nickname) self.change_field_value("title", Contact.title) self.change_avatar(Contact.avatar) self.change_field_value("company", Contact.company) self.change_field_value("address", Contact.address) self.change_field_value("home", Contact.home_phone) self.change_field_value("mobile", Contact.mobile_phone) self.change_field_value("work", Contact.work_phone) self.change_field_value("fax", Contact.fax) self.change_field_value("email", Contact.email_1) self.change_field_value("email2", Contact.email_2) self.change_field_value("email3", Contact.email_3) self.change_field_value("homepage", Contact.homepage) self.change_field_value("address2", Contact.address_2) self.change_field_value("phone2", Contact.phone_2) self.change_field_value("notes", Contact.notes) if Contact.b_day is not None: wd.find_element_by_name("bday").send_keys(Contact.b_day) if Contact.b_month is not None: wd.find_element_by_name("bmonth").send_keys(Contact.b_month) if Contact.b_year is not None: wd.find_element_by_name("byear").send_keys(Contact.b_year) if Contact.a_day is not None: wd.find_element_by_name("aday").send_keys(Contact.a_day) if Contact.a_month is not None: wd.find_element_by_name("amonth").send_keys(Contact.a_month) if Contact.a_year is not None: wd.find_element_by_name("ayear").send_keys(Contact.a_year) def new_contact_form(self, Contact): wd = self.app.wd self.fill_contact_form(Contact) self.contact_cache = None def edit_contact_form(self, Contact): wd = self.app.wd self.fill_contact_form(Contact) self.contact_cache = None def create(self, Contact): wd = self.app.wd wd.find_element_by_link_text("add new").click() self.new_contact_form(Contact) wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click() self.return_to_home_page() self.contact_cache = None def add_contact_to_group(self, Contact, Group): wd = self.app.wd self.app.open_home_page(wd) self.select_contact_by_id(Contact.id) wd.find_element_by_name("to_group").send_keys(Group.name) wd.find_element_by_name("add").click() def delete_contact_from_group(self, Contact, Group): wd = self.app.wd self.app.open_home_page(wd) wd.find_element_by_name("group").send_keys(Group.name) wd.find_element_by_css_selector("body").click() self.select_contact_by_id(Contact.id) wd.find_element_by_name("remove").click() self.return_to_home_page() wd.find_element_by_name("group").send_keys("[all]") wd.find_element_by_css_selector("body").click() def edit_first_contact(self, Contact): self.edit_contact_by_index(Contact, 0) def edit_contact_by_index(self, Contact, index): wd = self.app.wd self.open_contact_to_edit_by_index(index) self.edit_contact_form(Contact) wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click() self.return_to_home_page() self.contact_cache = None def edit_contact_by_id(self, Contact, id): wd = self.app.wd self.open_contact_to_edit_by_id(id) self.edit_contact_form(Contact) wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click() self.return_to_home_page() self.contact_cache = None def delete_first_contact(self): self.delete_contact_by_index(0) def delete_contact_by_index(self, index): wd = self.app.wd wd.find_elements_by_name("selected[]")[index].click() wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click() wd.switch_to_alert().accept() self.return_to_home_page() self.contact_cache = None def select_contact_by_id(self, id): wd = self.app.wd wd.find_element_by_css_selector("input[value='%s']" % id).click() def delete_contact_by_id(self, id): wd = self.app.wd self.select_contact_by_id(id) wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click() wd.switch_to_alert().accept() self.return_to_home_page() self.contact_cache = None def count(self): wd = self.app.wd self.return_to_home_page() return len(wd.find_elements_by_name("selected[]")) def return_to_home_page(self): wd = self.app.wd wd.find_element_by_link_text("home").click() def get_contact_list(self): if self.contact_cache is None: wd = self.app.wd self.app.open_home_page(wd) self.contact_cache = [] for row in wd.find_elements_by_name("entry"): cells = row.find_elements_by_tag_name("td") address = cells[3].text first_name = cells[2].text last_name = cells[1].text id = cells[0].find_element_by_tag_name("input").get_attribute("value") all_phones = cells[5].text all_emails = cells[4].text self.contact_cache.append(Contact(first_name=first_name, last_name=last_name, id=id, address=address, all_phones_from_homepage=all_phones, all_emails_from_homepage=all_emails)) return list(self.contact_cache) def open_contact_to_edit_by_index(self, index): wd = self.app.wd self.app.open_home_page(wd) row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[7] cell.find_element_by_tag_name("a").click() def open_contact_to_edit_by_id(self, id): wd = self.app.wd self.app.open_home_page(wd) wd.find_element_by_xpath("//input[@value='%s']/../../td[8]/a" % id).click() def open_contact_to_view_by_index(self, index): wd = self.app.wd self.app.open_home_page(wd) row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[6] cell.find_element_by_tag_name("a").click() def open_contact_to_view_by_id(self, id): wd = self.app.wd self.app.open_home_page(wd) wd.find_element_by_xpath("//input[@value='%s']/../../td[7]/a" % id).click() def get_contact_info_from_edit_page(self, index): wd = self.app.wd self.open_contact_to_edit_by_index(index) first_name = wd.find_element_by_name("firstname").get_attribute("value") last_name = wd.find_element_by_name("lastname").get_attribute("value") id = wd.find_element_by_name("id").get_attribute("value") home_phone = wd.find_element_by_name("home").get_attribute("value") mobile_phone = wd.find_element_by_name("mobile").get_attribute("value") work_phone = wd.find_element_by_name("work").get_attribute("value") phone_2 = wd.find_element_by_name("phone2").get_attribute("value") email_1 = wd.find_element_by_name("email").get_attribute("value") email_2 = wd.find_element_by_name("email2").get_attribute("value") email_3 = wd.find_element_by_name("email3").get_attribute("value") address = wd.find_element_by_name("address").get_attribute("value") return Contact(first_name=first_name, last_name=last_name, id=id, home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone, phone_2=phone_2, address=address, email_1=email_1, email_2=email_2, email_3=email_3) def get_contact_from_view_page(self, index): wd = self.app.wd self.open_contact_to_view_by_index(index) text = wd.find_element_by_id("content").text try: home_phone = re.search("H: (.*)", text).group(1) except AttributeError: home_phone = "" try: mobile_phone = re.search("M: (.*)", text).group(1) except AttributeError: mobile_phone = "" try: work_phone = re.search("W: (.*)", text).group(1) except AttributeError: work_phone = "" try: phone_2 = re.search("P: (.*)", text).group(1) except AttributeError: phone_2 = "" return Contact(home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone, phone_2=phone_2) def clean(self, contact): return Contact(id=contact.id, first_name=contact.first_name.strip(), last_name=contact.last_name.strip(), address=contact.address.strip(), mobile_phone=contact.mobile_phone, work_phone=contact.work_phone, phone_2=contact.phone_2, email_1=contact.email_1, email_2=contact.email_2, email_3=contact.email_3)
# Copyright 2012 NEC Corporation # Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django import http from mox3.mox import IsA # noqa from horizon.workflows import views from openstack_dashboard import api from openstack_dashboard.dashboards.project.networks import tests from openstack_dashboard.test import helpers as test DETAIL_URL = 'horizon:admin:networks:subnets:detail' NETWORKS_INDEX_URL = reverse('horizon:admin:networks:index') NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail' class NetworkSubnetTests(test.BaseAdminViewTests): @test.create_stubs({api.neutron: ('network_get', 'subnet_get',)}) def test_subnet_detail(self): network = self.networks.first() subnet = self.subnets.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .MultipleTimes().AndReturn(network) api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\ .AndReturn(subnet) self.mox.ReplayAll() url = reverse(DETAIL_URL, args=[subnet.id]) res = self.client.get(url) self.assertTemplateUsed(res, 'horizon/common/_detail.html') self.assertEqual(res.context['subnet'].id, subnet.id) @test.create_stubs({api.neutron: ('subnet_get',)}) def test_subnet_detail_exception(self): subnet = self.subnets.first() api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\ .AndRaise(self.exceptions.neutron) self.mox.ReplayAll() url = reverse(DETAIL_URL, args=[subnet.id]) res = self.client.get(url) redir_url = NETWORKS_INDEX_URL self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported', 'subnetpool_list',)}) def test_subnet_create_get(self): network = self.networks.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'subnet_allocation')\ .AndReturn(True) api.neutron.subnetpool_list(IsA(http.HttpRequest))\ .AndReturn(self.subnets) self.mox.ReplayAll() url = reverse('horizon:admin:networks:addsubnet', args=[network.id]) res = self.client.get(url) self.assertTemplateUsed(res, views.WorkflowView.template_name) @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported', 'subnetpool_list', 'subnet_create',)}) def test_subnet_create_post(self): network = self.networks.first() subnet = self.subnets.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .MultipleTimes().AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'subnet_allocation')\ .MultipleTimes().AndReturn(True) api.neutron.subnetpool_list(IsA(http.HttpRequest))\ .AndReturn(self.subnets) api.neutron.subnet_create(IsA(http.HttpRequest), network_id=network.id, name=subnet.name, cidr=subnet.cidr, ip_version=subnet.ip_version, gateway_ip=subnet.gateway_ip, enable_dhcp=subnet.enable_dhcp, allocation_pools=subnet.allocation_pools, tenant_id=subnet.tenant_id)\ .AndReturn(subnet) self.mox.ReplayAll() form_data = tests.form_data_subnet(subnet) url = reverse('horizon:admin:networks:addsubnet', args=[subnet.network_id]) res = self.client.post(url, form_data) self.assertNoFormErrors(res) redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id]) self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('network_get', 'subnet_create',)}) def test_subnet_create_post_network_exception(self): network = self.networks.first() subnet = self.subnets.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndRaise(self.exceptions.neutron) self.mox.ReplayAll() form_data = tests.form_data_subnet(subnet, allocation_pools=[]) url = reverse('horizon:admin:networks:addsubnet', args=[subnet.network_id]) res = self.client.post(url, form_data) self.assertNoFormErrors(res) # admin DetailView is shared with userpanel one, so # redirection URL on error is userpanel index. redir_url = reverse('horizon:project:networks:index') self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported', 'subnetpool_list', 'subnet_create',)}) def test_subnet_create_post_subnet_exception(self): network = self.networks.first() subnet = self.subnets.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .MultipleTimes().AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'subnet_allocation')\ .AndReturn(True) api.neutron.subnetpool_list(IsA(http.HttpRequest))\ .AndReturn(self.subnets) api.neutron.subnet_create(IsA(http.HttpRequest), network_id=network.id, name=subnet.name, cidr=subnet.cidr, ip_version=subnet.ip_version, gateway_ip=subnet.gateway_ip, enable_dhcp=subnet.enable_dhcp, tenant_id=subnet.tenant_id)\ .AndRaise(self.exceptions.neutron) self.mox.ReplayAll() form_data = tests.form_data_subnet(subnet, allocation_pools=[]) url = reverse('horizon:admin:networks:addsubnet', args=[subnet.network_id]) res = self.client.post(url, form_data) redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id]) self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported', 'subnetpool_list',)}) def test_subnet_create_post_cidr_inconsistent(self): network = self.networks.first() subnet = self.subnets.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'subnet_allocation')\ .AndReturn(True) api.neutron.subnetpool_list(IsA(http.HttpRequest))\ .AndReturn(self.subnets) self.mox.ReplayAll() # dummy IPv6 address cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60' form_data = tests.form_data_subnet( subnet, cidr=cidr, allocation_pools=[]) url = reverse('horizon:admin:networks:addsubnet', args=[subnet.network_id]) res = self.client.post(url, form_data) expected_msg = 'Network Address and IP version are inconsistent.' self.assertContains(res, expected_msg) @test.create_stubs({api.neutron: ('network_get', 'is_extension_supported', 'subnetpool_list',)}) def test_subnet_create_post_gw_inconsistent(self): network = self.networks.first() subnet = self.subnets.first() api.neutron.network_get(IsA(http.HttpRequest), network.id)\ .AndReturn(self.networks.first()) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'subnet_allocation')\ .AndReturn(True) api.neutron.subnetpool_list(IsA(http.HttpRequest))\ .AndReturn(self.subnets) self.mox.ReplayAll() # dummy IPv6 address gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF' form_data = tests.form_data_subnet(subnet, gateway_ip=gateway_ip, allocation_pools=[]) url = reverse('horizon:admin:networks:addsubnet', args=[subnet.network_id]) res = self.client.post(url, form_data) self.assertContains(res, 'Gateway IP and IP version are inconsistent.') @test.create_stubs({api.neutron: ('subnet_update', 'subnet_get', 'is_extension_supported', 'subnetpool_list')}) def test_subnet_update_post(self): subnet = self.subnets.first() api.neutron.is_extension_supported(IsA(http.HttpRequest), 'subnet_allocation')\ .AndReturn(True) api.neutron.subnetpool_list(IsA(http.HttpRequest))\ .AndReturn(self.subnetpools.list()) api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\ .AndReturn(subnet) api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\ .AndReturn(subnet) api.neutron.subnet_update(IsA(http.HttpRequest), subnet.id, name=subnet.name, enable_dhcp=subnet.enable_dhcp, dns_nameservers=[], host_routes=[])\ .AndReturn(subnet) self.mox.ReplayAll() form_data = tests.form_data_subnet(subnet, allocation_pools=[]) url = reverse('horizon:admin:networks:editsubnet', args=[subnet.network_id, subnet.id]) res = self.client.post(url, form_data) redir_url = reverse(NETWORKS_DETAIL_URL, args=[subnet.network_id]) self.assertRedirectsNoFollow(res, redir_url) @test.create_stubs({api.neutron: ('subnet_update', 'subnet_get', 'is_extension_supported', 'subnetpool_list')}) def test_subnet_update_post_gw_inconsistent(self): subnet = self.subnets.first() api.neutron.is_extension_supported(IsA(http.HttpRequest), 'subnet_allocation')\ .AndReturn(True) api.neutron.subnetpool_list(IsA(http.HttpRequest))\ .AndReturn(self.subnetpools.list()) api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)\ .AndReturn(subnet) self.mox.ReplayAll() # dummy IPv6 address gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF' form_data = tests.form_data_subnet(subnet, gateway_ip=gateway_ip, allocation_pools=[]) url = reverse('horizon:admin:networks:editsubnet', args=[subnet.network_id, subnet.id]) res = self.client.post(url, form_data) self.assertContains(res, 'Gateway IP and IP version are inconsistent.') @test.create_stubs({api.neutron: ('subnet_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks',)}) def test_subnet_delete(self): self._test_subnet_delete() @test.create_stubs({api.neutron: ('subnet_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks',)}) def test_subnet_delete_with_mac_learning(self): self._test_subnet_delete(mac_learning=True) def _test_subnet_delete(self, mac_learning=False): subnet = self.subnets.first() network_id = subnet.network_id api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest), network_id).\ AndReturn(self.agents.list()) api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id) api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.subnets.first()]) api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.ports.first()]) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) self.mox.ReplayAll() form_data = {'action': 'subnets__delete__%s' % subnet.id} url = reverse(NETWORKS_DETAIL_URL, args=[network_id]) res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, url) @test.create_stubs({api.neutron: ('subnet_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks',)}) def test_subnet_delete_exception(self): self._test_subnet_delete_exception() @test.create_stubs({api.neutron: ('subnet_delete', 'subnet_list', 'port_list', 'is_extension_supported', 'list_dhcp_agent_hosting_networks',)}) def test_subnet_delete_exception_with_mac_learning(self): self._test_subnet_delete_exception(mac_learning=True) def _test_subnet_delete_exception(self, mac_learning=False): subnet = self.subnets.first() network_id = subnet.network_id api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest), network_id).\ AndReturn(self.agents.list()) api.neutron.subnet_delete(IsA(http.HttpRequest), subnet.id)\ .AndRaise(self.exceptions.neutron) api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.subnets.first()]) api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\ .AndReturn([self.ports.first()]) api.neutron.is_extension_supported(IsA(http.HttpRequest), 'mac-learning')\ .AndReturn(mac_learning) self.mox.ReplayAll() form_data = {'action': 'subnets__delete__%s' % subnet.id} url = reverse(NETWORKS_DETAIL_URL, args=[network_id]) res = self.client.post(url, form_data) self.assertRedirectsNoFollow(res, url)
""" Models the visit attribute object. """ import logging from cutlass.iHMPSession import iHMPSession from cutlass.DiseaseMeta import DiseaseMeta from cutlass.Base import Base from cutlass.Util import enforce_bool, enforce_dict, enforce_float, \ enforce_int, enforce_list, enforce_string # pylint: disable=W0703, W0201, W0212, C1801, R0912 # Create a module logger named after the module module_logger = logging.getLogger(__name__) # Add a NullHandler for the case if no logging is configured by the application module_logger.addHandler(logging.NullHandler()) class VisitAttribute(Base): """ The class encapsulating the data for an iHMP visit attribute. This class contains all the fields required to save a visit attribute in OSDF. Attributes: namespace (str): The namespace this class will use in OSDF. """ namespace = "ihmp" __dict = { 'comment': [str, None], 'mother_child': [str, None], 'study': [str, None], 'survey_id': [str, None], 'subproject': [str, None], 'time_during_pregnancy': [str, None], # These are the disease metadata fields 'disease_comment': ["DiseaseMeta.comment", None], 'disease_name': ["DiseaseMeta.name", None], 'disease_description': ["DiseaseMeta.description", None], 'disease_ontology_id': ["DiseaseMeta.disease_ontology_id", None], 'disease_mesh_id': ["DiseaseMeta.mesh_id", None], 'disease_nci_id': ["DiseaseMeta.nci_id", None], 'disease_umls_concept_id': ["DiseaseMeta.umls_concept_id", None], 'disease_study_status': ["DiseaseMeta.study_disease_status", None], # pylint: disable=C0326 # These are the clinical patient fields 'age' : [int , "clinical_patient"], 'height' : [float , "clinical_patient"], 'weight' : [float , "clinical_patient"], 'weight_diff' : [str , "clinical_patient"], 'bmi' : [float , "clinical_patient"], 'hbi' : [bool , "clinical_patient"], 'hbi_total' : [float , "clinical_patient"], 'sccai' : [bool , "clinical_patient"], 'sccai_total' : [float , "clinical_patient"], 'fast_gluc' : [int , "clinical_patient"], 'thirtym_gluc' : [int , "clinical_patient"], 'sixtym_gluc' : [int , "clinical_patient"], # These are the hrt fields 'prior' : [bool , "hrt"], 'current' : [bool , "hrt"], 'duration' : [str , "hrt"], # These are the health assessment fields 'self_assess' : [bool , "health_assessment"], 'self_condition' : [str , "health_assessment"], 'abdominal_pain' : [bool , "health_assessment"], 'acute_dis' : [str , "health_assessment"], 'arthralgia' : [bool , "health_assessment"], 'bowel_day' : [int , "health_assessment"], 'bowel_night' : [int , "health_assessment"], 'cancer' : [str , "health_assessment"], 'cancer_mtc' : [bool , "health_assessment"], 'chest_pain' : [bool , "health_assessment"], 'claudication' : [bool , "health_assessment"], 'chronic_dis' : [str , "health_assessment"], 'diarrhea' : [bool , "health_assessment"], 'dyspnea' : [bool , "health_assessment"], 'ery_nodosum' : [bool , "health_assessment"], 'fever' : [str , "health_assessment"], 'leg_edema' : [bool , "health_assessment"], 'neurologic' : [bool , "health_assessment"], 'pregnant' : [bool , "health_assessment"], 'preg_plans' : [bool , "health_assessment"], 'pyo_gangrenosum' : [bool , "health_assessment"], 'rash' : [bool , "health_assessment"], 'stool_blood' : [bool , "health_assessment"], 'stool_soft' : [int , "health_assessment"], 'surgery' : [str , "health_assessment"], 'urgency_def' : [str , "health_assessment"], 'uveitis' : [bool , "health_assessment"], 'weight_change' : [str , "health_assessment"], 'diag_other' : [str , "health_assessment"], 'hosp' : [bool , "health_assessment"], 'work_missed' : [int , "health_assessment"], # These are the medications fields 'new_meds' : [bool, "medications"], 'stopped_meds' : [bool, "medications"], 'abx' : [bool, "medications"], 'chemo' : [bool, "medications"], 'immunosupp' : [bool, "medications"], # These are the tests fields 'colonoscopy': [bool, "tests"], 'oral_contrast': [bool, "tests"], # These are the psych fields 'psychiatric' : [bool, "psych"], 'upset' : [int , "psych"], 'control' : [int , "psych"], 'stress' : [int , "psych"], 'stress_def' : [str , "psych"], 'confident' : [int , "psych"], 'going_your_way' : [int , "psych"], 'coping' : [int , "psych"], 'irritation' : [int , "psych"], 'on_top' : [int , "psych"], 'anger' : [int , "psych"], 'difficulties' : [int , "psych"], # These are the exercise fields 'vig_activity_days' : [int, "exercise"], 'vig_activity_hours' : [int, "exercise"], 'vig_activity_minutes' : [int, "exercise"], 'mod_activity_days' : [int, "exercise"], 'mod_activity_hours' : [int, "exercise"], 'mod_activity_minutes' : [int, "exercise"], 'walking_days' : [int, "exercise"], 'walking_hours' : [int, "exercise"], 'walking_minutes' : [int, "exercise"], 'activity_30d' : [str, "exercise"], 'activity_3m' : [str, "exercise"], 'activity_change_30d' : [str, "exercise"], 'activity_change_3m' : [str, "exercise"], # These are the dietary log fields 'alcohol' : [bool , "dietary_log"], 'beans' : [bool , "dietary_log"], 'biscuit' : [bool , "dietary_log"], 'bread' : [str , "dietary_log"], 'bread_spread' : [str , "dietary_log"], 'breadrolls' : [bool , "dietary_log"], 'cheese' : [bool , "dietary_log"], 'cereal' : [bool , "dietary_log"], 'cereal_type' : [str , "dietary_log"], 'chips_crisps' : [bool , "dietary_log"], 'dairy' : [bool , "dietary_log"], 'diet_drinks' : [bool , "dietary_log"], 'eggs' : [bool , "dietary_log"], 'fish' : [bool , "dietary_log"], 'fish_white' : [bool , "dietary_log"], 'fish_oil' : [bool , "dietary_log"], 'fish_count' : [int , "dietary_log"], 'fruit' : [bool , "dietary_log"], 'fruit_count' : [int , "dietary_log"], 'grains' : [bool , "dietary_log"], 'ice_cream' : [bool , "dietary_log"], 'juice' : [bool , "dietary_log"], 'meat' : [bool , "dietary_log"], 'meat_red' : [bool , "dietary_log"], 'meat_white' : [bool , "dietary_log"], 'meat_product' : [bool , "dietary_log"], 'milk' : [str , "dietary_log"], 'pastry' : [bool , "dietary_log"], 'poultry' : [bool , "dietary_log"], 'probiotic' : [bool , "dietary_log"], 'salt' : [str , "dietary_log"], 'shellfish' : [bool , "dietary_log"], 'soda' : [bool , "dietary_log"], 'starch' : [bool , "dietary_log"], 'starch_type' : [bool , "dietary_log"], 'sugar' : [str , "dietary_log"], 'sugar_drinks' : [bool , "dietary_log"], 'sweets' : [bool , "dietary_log"], 'sweets_count' : [int , "dietary_log"], 'veg' : [bool , "dietary_log"], 'veg_green' : [bool , "dietary_log"], 'veg_root' : [bool , "dietary_log"], 'veg_raw' : [bool , "dietary_log"], 'water' : [bool , "dietary_log"], 'yogurt' : [bool , "dietary_log"], # These are the dietary log "today" fields 'breakfast_tod': [str, "dietary_log_today"], 'breakfast_food': [str, "dietary_log_today"], 'breakfast_amt': [str, "dietary_log_today"], 'lunch_tod': [str, "dietary_log_today"], 'lunch_food': [str, "dietary_log_today"], 'lunch_amt': [str, "dietary_log_today"], 'dinner_tod': [str, "dietary_log_today"], 'dinner_food': [str, "dietary_log_today"], 'dinner_amt': [str, "dietary_log_today"], 'other_food_intake': [str, "dietary_log_today"] } @staticmethod # pylint: disable=W0211,W0613 def _getx(self, propname, *args): if propname in self.__dict: propType = self.__dict[propname][0] if type(propType) == str and propType.startswith("DiseaseMeta."): dm_name = propType.replace("DiseaseMeta.", "", 1) value = getattr(self._disease_meta, dm_name) elif propname in self._d: value = self._d[propname] else: value = None else: raise AttributeError("Unknown attribute %s" % propname) return value @staticmethod # pylint: disable=W0211 def _setx(self, value, n): self._d[n] = value @staticmethod def _bindRead(name): # pylint: disable=C0111 def getXXXX(self, *args): return VisitAttribute._getx(self, name, *args) getXXXX.__name__ = name return getXXXX @staticmethod def _bindWrite(name, t): # pylint: disable=C0111 def setXXXX(self, val): func = VisitAttribute._setx if t == str: func = enforce_string(func) elif t == int: func = enforce_int(func) elif t == float: func = enforce_float(func) elif t == list: func = enforce_list(func) elif t == bool: func = enforce_bool(func) elif t == dict: func = enforce_dict(func) func(self, val, name) setXXXX.__name__ = name return setXXXX def __init__(self, *args, **kwargs): """ Constructor for the VisitAttribute class. This initializes the fields specific to the VisitAttribute class, and inherits from the Base class. Args: None """ self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__) self.logger.addHandler(logging.NullHandler()) # An instance of the DieseaseMeta class (composition). self._disease_meta = DiseaseMeta() # A flag to mark whether DiseaseMeta is dirty or not self._dm_dirty = False self._id = None self._tags = [] self._links = {} self._version = None self._d = { "study": None, "tags": [] } for propname, spec in VisitAttribute.__dict.iteritems(): t = spec[0] x = property(VisitAttribute._bindRead(propname), VisitAttribute._bindWrite(propname, t)) setattr(self.__class__, propname, x) super(VisitAttribute, self).__init__(*args, **kwargs) def __setattr__(self, name, value): if name == "_d": self.__dict__[name] = value return if name not in VisitAttribute.__dict: super(VisitAttribute, self).__setattr__(name, value) else: nameType = self.__dict[name][0] if type(nameType) == str and nameType.startswith("DiseaseMeta."): dm_name = nameType.replace("DiseaseMeta.", "", 1) self.logger.debug("Setting %s %s property.", __name__, dm_name) setattr(self._disease_meta, dm_name, value) self.logger.debug("Setting flag that DiseaseMeta is dirty.") self._dm_dirty = True else: func = getattr(self.__class__, name) func.__set__(self, value) @staticmethod def required_fields(): """ A static method. The required fields for the class. Args: None Returns: Tuple of strings of required properties. """ module_logger.debug("In required_fields.") return ("comment", "study", "tags") @staticmethod def load_visit_attr(attrib_data): """ Takes the provided JSON string and converts it to a VisitAttribute object. Args: attrib_data (str): The JSON string to convert Returns: Returns a VisitAttribute instance. """ module_logger.info("Creating a template %s.", __name__) attrib = VisitAttribute() module_logger.debug("Filling in %s details.", __name__) attrib._set_id(attrib_data['id']) attrib.links = attrib_data['linkage'] attrib.version = attrib_data['ver'] # Required fields attrib.comment = attrib_data['meta']['comment'] attrib.study = attrib_data['meta']['study'] attrib.survey_id = attrib_data['meta']['survey_id'] attrib.tags = attrib_data['meta']['tags'] # Handle optional fields attrib_metadata = attrib_data['meta'] for (propname, spec) in VisitAttribute.__dict.iteritems(): _cls = spec[0] section = spec[1] ## We need to handle any DiseaseMeta props separately here if not section: continue module_logger.debug("In section %s", section) # Handle any special cases that we need too. if (section == "excercise" or (propname.startswith('breakfast') or propname.startswith('lunch') or propname.startswith('dinner'))): (propbase, propkey) = propname.split('_', 1) propval = attrib_metadata.get(section, {}).get(propbase, {}).get(propkey) elif propname == "sixtym_gluc": propval = attrib_metadata.get(section, {}).get('60m_gluc') elif propname == "thirtym_gluc": propval = attrib_metadata.get(section, {}).get('30m_gluc') else: propval = attrib_metadata.get(section, {}).get(propname) if propval: module_logger.debug("Setting prop %s to %s", propname, propval) setattr(attrib, propname, _cls(propval)) # If any of the DiseaseMeta props exist we can handle them now if attrib_data['meta'].get('disease'): if attrib_data['meta']['disease'].get('study_disease_status'): attrib.disease_study_status = \ attrib_data['meta']['disease'].get('study_disease_status') disease_props = dict(('disease_%s' % key, value) for key, value in attrib_data['meta']['disease']['study_disease'].iteritems()) # This will have a double "disease" on it so we need to correct it. disease_props['disease_ontology_id'] = \ disease_props.pop('disease_disease_ontology_id') map(lambda key: setattr(attrib, key, disease_props.get(key)), disease_props.keys()) module_logger.debug("Returning loaded %s.", __name__) return attrib @staticmethod def load(attrib_id): """ Loads the data for the node from OSDF to this object. If the provided ID does not exist, then an error message is generated. Args: attrib_id (str): The OSDF ID for the document to load. Returns: A VisitAttribute object with all the available OSDF data loaded into it. """ module_logger.debug("In load. Specified ID: %s", attrib_id) session = iHMPSession.get_session() module_logger.info("Got iHMP session.") data = session.get_osdf().get_node(attrib_id) attrib = VisitAttribute.load_visit_attr(data) return attrib def validate(self): """ Validates the current object's data against the schema in the OSDF instance. Args: None Returns: A list of strings, where each string is a validation error that the OSDF instance identified. """ self.logger.debug("In validate.") document = self._get_raw_doc() session = iHMPSession.get_session() self.logger.info("Got iHMP session.") (valid, error_message) = session.get_osdf().validate_node(document) problems = [] if not valid: self.logger.info("Validation did not succeed for %s.", __name__) problems.append(error_message) if 'associated_with' not in self._links.keys(): problems.append("Must add an 'associated_with' link to a visit.") self.logger.debug("Number of validation problems: %s.", len(problems)) return problems def is_valid(self): """ Validates the current object's data/JSON against the current schema in the OSDF instance for the specific object. However, unlike validate(), this method does not provide exact error messages, it states if the validation was successful or not. Args: None Returns: True if the data validates, False if the current state of fields in the instance does not validate with OSDF. """ self.logger.debug("In is_valid.") document = self._get_raw_doc() session = iHMPSession.get_session() self.logger.info("Got iHMP session.") (valid, _error_message) = session.get_osdf().validate_node(document) if 'associated_with' not in self._links.keys(): self.logger.error("Must have an 'associated_with' linkage.") valid = False self.logger.debug("Valid? %s", str(valid)) return valid def _get_raw_doc(self): self.logger.debug("In _get_raw_doc.") doc = { 'acl': { 'read': ['all'], 'write': [VisitAttribute.namespace] }, 'linkage': self.links, 'ns': VisitAttribute.namespace, 'node_type': 'visit_attr', 'meta': { 'tags': self.tags, 'comment': self.comment, 'survey_id': self.survey_id, 'study': self.study, 'subtype': self.study } } # Go through each of the properties, and add it to the document # if it contains data for propname, spec in VisitAttribute.__dict.iteritems(): # Don't encode 'special' properties that are delegated, such # as the DiseaseMeta fields... if spec[1] is None: continue value = getattr(self, propname) if value is not None: self.logger.debug("Value found for %s property.", propname) section = spec[1] # Set the section to a dictionary if it doesn't exist yet if section not in doc['meta']: doc['meta'][section] = {} # Handle special cases if propname == "sixtym_gluc": propname = "60m_gluc" elif propname == "thirtym_gluc": propname = "30m_gluc" if propname == "vig_activity_days": if "vig_activity" not in doc['meta']['exercise']: doc['meta']['exercise']['vig_activity'] = {} doc['meta']['exercise']['vig_activity']['days'] = value elif propname == "vig_activity_hours": if "vig_activity" not in doc['meta']['exercise']: doc['meta']['exercise']['vig_activity'] = {} doc['meta']['exercise']['vig_activity']['hours'] = value elif propname == "vig_activity_minutes": if "vig_activity" not in doc['meta']['exercise']: doc['meta']['exercise']['vig_activity'] = {} doc['meta']['exercise']['vig_activity']['minutes'] = value elif propname == "mod_activity_days": if "mod_activity" not in doc['meta']['exercise']: doc['meta']['exercise']['mod_activity'] = {} doc['meta']['exercise']['mod_activity']['days'] = value elif propname == "mod_activity_hours": if "mod_activity" not in doc['meta']['exercise']: doc['meta']['exercise']['mod_activity'] = {} doc['meta']['exercise']['mod_activity']['hours'] = value elif propname == "mod_activity_minutes": if "mod_activity" not in doc['meta']['exercise']: doc['meta']['exercise']['mod_activity'] = {} doc['meta']['exercise']['mod_activity']['minutes'] = value elif propname == "walking_days": if "walking" not in doc['meta']['exercise']: doc['meta']['exercise']['walking'] = {} doc['meta']['exercise']['walking']['days'] = value elif propname == "walking_hours": if "walking" not in doc['meta']['exercise']: doc['meta']['exercise']['walking'] = {} doc['meta']['exercise']['walking']['hours'] = value elif propname == "walking_minutes": if "walking" not in doc['meta']['exercise']: doc['meta']['exercise']['walking'] = {} doc['meta']['exercise']['walking']['minutes'] = value # dietary log "today" elif propname == "breakfast_tod": if "breakfast" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['breakfast'] = {} doc['meta']['dietary_log_today']['breakfast']['tod'] = value elif propname == "breakfast_food": if "breakfast" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['breakfast'] = {} doc['meta']['dietary_log_today']['breakfast']['food'] = value elif propname == "breakfast_amt": if "breakfast" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['breakfast'] = {} doc['meta']['dietary_log_today']['breakfast']['amt'] = value elif propname == "lunch_tod": if "lunch" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['lunch'] = {} doc['meta']['dietary_log_today']['lunch']['tod'] = value elif propname == "lunch_food": if "lunch" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['lunch'] = {} doc['meta']['dietary_log_today']['lunch']['food'] = value elif propname == "lunch_amt": if "lunch" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['lunch'] = {} doc['meta']['dietary_log_today']['lunch']['amt'] = value elif propname == "dinner_tod": if "dinner" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['dinner'] = {} doc['meta']['dietary_log_today']['dinner']['tod'] = value elif propname == "dinner_food": if "dinner" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['dinner'] = {} doc['meta']['dietary_log_today']['dinner']['food'] = value elif propname == "dinner_amt": if "dinner" not in doc['meta']['dietary_log_today']: doc['meta']['dietary_log_today']['dinner'] = {} doc['meta']['dietary_log_today']['dinner']['amt'] = value else: doc['meta'][section][propname] = value # If we've configured fields in the DiseaseMeta class, fill the disease # portion of the document, which is delegated to the DiseaseMeta class. if self._dm_dirty: doc['meta']['disease'] = self._disease_meta._get_raw_doc() if self._id is not None: self.logger.debug("%s object has the OSDF id set.", __name__) doc['id'] = self._id if self._version is not None: self.logger.debug("%s object has the OSDF version set.", __name__) doc['ver'] = self._version return doc @staticmethod def search(query="\"visit_attr\"[node_type]"): """ Searches OSDF for VisitAttribute nodes. Any criteria the user wishes to add is provided by the user in the query language specifications provided in the OSDF documentation. A general format is (including the quotes and brackets): "search criteria"[field to search] If there are any results, they are returned as SampleAttribute instances, otherwise an empty list will be returned. Args: query (str): The query for the OSDF framework. Defaults to the SampleAttribute node type. Returns: Returns an array of VisitAttribute objects. It returns an empty list if there are no results. """ module_logger.debug("In search.") session = iHMPSession.get_session() module_logger.info("Got iHMP session.") if query != '"visit_attr"[node_type]': query = '({}) && "visit_attr"[node_type]'.format(query) module_logger.debug("Submitting OQL query: %s", query) attrib_data = session.get_osdf().oql_query(VisitAttribute.namespace, query) all_results = attrib_data['results'] result_list = list() if len(all_results) > 0: for result in all_results: attrib_result = VisitAttribute.load_visit_attr(result) result_list.append(attrib_result) return result_list def save(self): """ Saves the data to OSDF. The JSON form of the object is not valid, then the data is not saved. If the instance was saved previously, then the node ID is assigned the alphanumeric assigned by the OSDF instance. If not saved previously, then the node ID is 'None', and upon a successful save, will be defined as the alphanumeric ID from OSDF. In addition, the document's version is updated when a successful save operation is completed. Args: None Returns; True if successful, False otherwise. """ self.logger.debug("In save.") if not self.is_valid(): self.logger.error("Cannot save, data is invalid.") return False session = iHMPSession.get_session() self.logger.info("Got iHMP session.") success = False if self._id is None: # The document has not yet been saved data = self._get_raw_doc() self.logger.info("Got the raw JSON document.") try: self.logger.info("Attempting to save a new %snode.", __name__) node_id = session.get_osdf().insert_node(data) self.logger.info("Save for %s %s successful.", __name__, node_id) self.logger.info("Setting ID for %s %s.", __name__, node_id) self._set_id(node_id) self._version = 1 success = True except Exception as save_exception: self.logger.error("An error occurred while saving %s." + \ "Reason: %s", __name__, save_exception) else: data = self._get_raw_doc() try: self.logger.info("Attempting to update ID: %s.", self.id) session.get_osdf().edit_node(data) self.logger.info("Update for %s successful.", self.id) success = True except Exception as edit_exception: msg = "An error occurred while updating %s %s. Reason: %s" \ % (__name__, self.id, edit_exception) self.logger.error(msg) return success
from __future__ import unicode_literals import codecs import os import re from django.conf import settings from django.core.management.base import CommandError from django.db import models from django.db.models import get_models from django.utils._os import upath def sql_create(app, style, connection): "Returns a list of the CREATE TABLE SQL statements for the given app." if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy': # This must be the "dummy" database backend, which means the user # hasn't set ENGINE for the database. raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" + "because you haven't properly specified the ENGINE setting for the database.\n" + "see: https://docs.djangoproject.com/en/dev/ref/settings/#databases") # Get installed models, so we generate REFERENCES right. # We trim models from the current app so that the sqlreset command does not # generate invalid SQL (leaving models out of known_models is harmless, so # we can be conservative). app_models = models.get_models(app, include_auto_created=True) final_output = [] tables = connection.introspection.table_names() known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models]) pending_references = {} for model in app_models: output, references = connection.creation.sql_create_model(model, style, known_models) final_output.extend(output) for refto, refs in references.items(): pending_references.setdefault(refto, []).extend(refs) if refto in known_models: final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references)) final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references)) # Keep track of the fact that we've created the table for this model. known_models.add(model) # Handle references to tables that are from other apps # but don't exist physically. not_installed_models = set(pending_references.keys()) if not_installed_models: alter_sql = [] for model in not_installed_models: alter_sql.extend(['-- ' + sql for sql in connection.creation.sql_for_pending_references(model, style, pending_references)]) if alter_sql: final_output.append('-- The following references should be added but depend on non-existent tables:') final_output.extend(alter_sql) return final_output def sql_delete(app, style, connection): "Returns a list of the DROP TABLE SQL statements for the given app." # This should work even if a connection isn't available try: cursor = connection.cursor() except: cursor = None # Figure out which tables already exist if cursor: table_names = connection.introspection.table_names(cursor) else: table_names = [] output = [] # Output DROP TABLE statements for standard application tables. to_delete = set() references_to_delete = {} app_models = models.get_models(app, include_auto_created=True) for model in app_models: if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names: # The table exists, so it needs to be dropped opts = model._meta for f in opts.local_fields: if f.rel and f.rel.to not in to_delete: references_to_delete.setdefault(f.rel.to, []).append((model, f)) to_delete.add(model) for model in app_models: if connection.introspection.table_name_converter(model._meta.db_table) in table_names: output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style)) # Close database connection explicitly, in case this output is being piped # directly into a database client, to avoid locking issues. if cursor: cursor.close() connection.close() return output[::-1] # Reverse it, to deal with table dependencies. def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False): """ Returns a list of the SQL statements used to flush the database. If only_django is True, then only table names that have associated Django models and are in INSTALLED_APPS will be included. """ if only_django: tables = connection.introspection.django_table_names(only_existing=True) else: tables = connection.introspection.table_names() seqs = connection.introspection.sequence_list() if reset_sequences else () statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade) return statements def sql_custom(app, style, connection): "Returns a list of the custom table modifying SQL statements for the given app." output = [] app_models = get_models(app) for model in app_models: output.extend(custom_sql_for_model(model, style, connection)) return output def sql_indexes(app, style, connection): "Returns a list of the CREATE INDEX SQL statements for all models in the given app." output = [] for model in models.get_models(app, include_auto_created=True): output.extend(connection.creation.sql_indexes_for_model(model, style)) return output def sql_destroy_indexes(app, style, connection): "Returns a list of the DROP INDEX SQL statements for all models in the given app." output = [] for model in models.get_models(app, include_auto_created=True): output.extend(connection.creation.sql_destroy_indexes_for_model(model, style)) return output def sql_all(app, style, connection): "Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module." return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection) def _split_statements(content): comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$") statements = [] statement = [] for line in content.split("\n"): cleaned_line = comment_re.sub(r"\1", line).strip() if not cleaned_line: continue statement.append(cleaned_line) if cleaned_line.endswith(";"): statements.append(" ".join(statement)) statement = [] return statements def custom_sql_for_model(model, style, connection): opts = model._meta app_dir = os.path.normpath(os.path.join(os.path.dirname(upath(models.get_app(model._meta.app_label).__file__)), 'sql')) output = [] # Post-creation SQL should come before any initial SQL data is loaded. # However, this should not be done for models that are unmanaged or # for fields that are part of a parent model (via model inheritance). if opts.managed: post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')] for f in post_sql_fields: output.extend(f.post_create_sql(style, model._meta.db_table)) # Find custom SQL, if it's available. backend_name = connection.settings_dict['ENGINE'].split('.')[-1] sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.model_name, backend_name)), os.path.join(app_dir, "%s.sql" % opts.model_name)] for sql_file in sql_files: if os.path.exists(sql_file): with codecs.open(sql_file, 'U', encoding=settings.FILE_CHARSET) as fp: # Some backends can't execute more than one SQL statement at a time, # so split into separate statements. output.extend(_split_statements(fp.read())) return output def emit_pre_sync_signal(create_models, verbosity, interactive, db): # Emit the pre_sync signal for every application. for app in models.get_apps(): app_name = app.__name__.split('.')[-2] if verbosity >= 2: print("Running pre-sync handlers for application %s" % app_name) models.signals.pre_syncdb.send(sender=app, app=app, create_models=create_models, verbosity=verbosity, interactive=interactive, db=db) def emit_post_sync_signal(created_models, verbosity, interactive, db): # Emit the post_sync signal for every application. for app in models.get_apps(): app_name = app.__name__.split('.')[-2] if verbosity >= 2: print("Running post-sync handlers for application %s" % app_name) models.signals.post_syncdb.send(sender=app, app=app, created_models=created_models, verbosity=verbosity, interactive=interactive, db=db)
#!/usr/bin/env python3 # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import io import random import string import struct import unittest from ipaddress import ip_address from ipv6 import ( ICMPv6Header, UDPHeader, IPv6Header, IPv6PacketFactory, UDPDatagram, UDPDatagramFactory, ICMPv6Factory, HopByHopFactory, MPLOptionFactory, ICMPv6, HopByHopOptionHeader, HopByHopOption, HopByHop, MPLOption, IPv6Packet, ICMPv6EchoBody, BytesPayload, ICMPv6EchoBodyFactory, UpperLayerProtocol, UDPHeaderFactory, HopByHopOptionsFactory, ICMPv6DestinationUnreachableFactory, BytesPayloadFactory, ICMPv6DestinationUnreachable, UdpBasedOnSrcDstPortsPayloadFactory, FragmentHeader, ) import common class HopByHopOptionBytesValue: """ Test helper class """ _value = "value" def __init__(self, _bytes): self.bytes = _bytes def to_bytes(self): return self.bytes def to_dict(self, d=None): d = d if d is not None else dict() d[self._value] = self.bytes return d def __len__(self): return len(self.bytes) class ICMPv6BytesBody: """ Test helper class """ _icmp_body = "icmp_body" def __init__(self, _bytes): self.bytes = _bytes def to_bytes(self): return self.bytes def to_dict(self, d=None): d[self._icmp_body] = self.bytes return d def __len__(self): return len(self.bytes) class ICMPv6BytesBodyFactory: """ Test helper class """ def parse(self, data, context): return ICMPv6BytesBody(data.read()) class DummyHeader: def __init__(self): self.checksum = 0 class DummyUpperLayerProtocol(UpperLayerProtocol): def __init__(self, header, data, _type): super(DummyUpperLayerProtocol, self).__init__(header) self._data = data self._type = _type @property def type(self): return self._type def to_bytes(self): return self._data def __len__(self): return len(self._data) def any_uint(bits): return random.randint(0, (1 << bits) - 1) def any_type(): return any_uint(8) def any_code(): return any_uint(8) def any_checksum(): return any_uint(16) def any_fragment_offset(): return any_uint(13) def any_bool(): return (any_uint(1) == 1) def any_fragment_identification(): return any_uint(32) def any_icmp_payload(_type, code, checksum, body): return bytearray([_type, code, (checksum >> 8) & 0xff, checksum & 0xff]) + body def any_udp_payload(src_port, dst_port, payload, checksum): payload_len = len(payload) + 8 return bytearray([(src_port >> 8) & 0xff, src_port & 0xff, (dst_port >> 8) & 0xff, dst_port & 0xff, (payload_len >> 8) & 0xff, payload_len & 0xff, (checksum >> 8) & 0xff, checksum & 0xff]) + payload def any_hop_by_hop_payload(next_header, hdr_ext_len, payload): return bytearray([next_header, hdr_ext_len]) + payload def any_body(): length = any_uint(8) body = "".join([ random.choice(string.ascii_letters + string.digits + string.hexdigits) for _ in range(length) ]) return bytearray(body.encode("utf-8")) def any_payload(): length = any_uint(8) payload = "".join([random.choice(string.printable) for _ in range(length)]) return bytearray(payload.encode("utf-8")) def any_ip_address(): return bytearray([0xfe, 0x80]) + bytearray([0x00] * 6) + bytearray( [random.getrandbits(8)] * 8) def any_port(): return any_uint(16) def any_mpl_opt_type(): return any_uint(8) def any_mpl_opt_data_len(): return any_uint(8) def any_mpl_S(): return any_uint(2) def any_mpl_M(): return any_uint(1) def any_mpl_V(): return any_uint(1) def any_mpl_sequence(): return any_uint(8) def any_mpl_seed_id(S): length = MPLOption._seed_id_length[S] seed_id = "".join([ random.choice(string.ascii_letters + string.digits + string.hexdigits) for _ in range(length) ]) return bytearray(seed_id.encode("utf-8")) def any_next_header(): return any_uint(8) def any_traffic_class(): return any_uint(8) def any_flow_label(): return any_uint(20) def any_hop_limit(): return any_uint(8) def any_payload_length(): return any_uint(16) def any_hdr_ext_len(): return any_uint(3) def any_length(): return any_uint(4) def any_str(length=8): s = "".join(random.choice(string.printable) for _ in range(length)) return s.encode("utf-8") def any_bytes(length=4): return bytearray(any_str(length)) def any_dict(keys_count=4): keys = [any_str() for _ in range(keys_count)] d = {} for key in keys: d[key] = any_bytes() return d def any_mpl_option(): S = any_mpl_S() M = any_mpl_M() V = any_mpl_V() sequence = any_mpl_sequence() seed_id = any_mpl_seed_id(S) return MPLOption(S, M, V, sequence, seed_id) def any_hop_by_hop_bytes_option_header(length=4): _type = any_type() # 0 or 1 means padding, so type have to be higher than 1 while _type <= 1: _type = any_type() return HopByHopOptionHeader(_type, length) def any_hop_by_hop_bytes_value(length=2): return HopByHopOptionBytesValue(any_bytes(length)) def any_hop_by_hop_bytes_option(): length = any_length() return HopByHopOption(any_hop_by_hop_bytes_option_header(length), any_hop_by_hop_bytes_value(length)) def any_hop_by_hop_mpl_option(): mpl_option = any_mpl_option() return HopByHopOption(any_hop_by_hop_bytes_option_header(len(mpl_option)), mpl_option) def any_identifier(): return any_uint(16) def any_sequence_number(): return any_uint(16) def any_data(): return any_bytes(random.randint(0, 32)) def any_upper_layer_payload(data, _type): return DummyUpperLayerProtocol(DummyHeader(), data, _type) def any_extension_headers(): return [] def any_message_info(): return common.MessageInfo() class TestIPv6Header(unittest.TestCase): def test_should_convert_IPv6_header_to_bytes_when_to_bytes_method_is_called( self): # GIVEN traffic_class = any_traffic_class() flow_label = any_flow_label() payload_length = any_payload_length() next_header = any_next_header() hop_limit = any_hop_limit() source_address = any_ip_address() destination_address = any_ip_address() ipv6_header = IPv6Header(source_address, destination_address, traffic_class, flow_label, hop_limit, payload_length, next_header) # WHEN data = ipv6_header.to_bytes() # THEN self.assertEqual(6, data[0] >> 4) self.assertEqual(traffic_class, ((data[0] << 8 | data[1]) >> 4) & 0xff) self.assertEqual(flow_label, ((data[1] & 0x0F) << 16) | (data[2] << 8) | data[3]) self.assertEqual(payload_length, struct.unpack("!H", data[4:6])[0]) self.assertEqual(next_header, data[6]) self.assertEqual(hop_limit, data[7]) self.assertEqual(source_address, data[8:24]) self.assertEqual(destination_address, data[24:40]) def test_should_create_IPv6Header_when_from_bytes_classmethod_is_called( self): # GIVEN traffic_class = any_traffic_class() flow_label = any_flow_label() payload_length = any_payload_length() next_header = any_next_header() hop_limit = any_hop_limit() source_address = any_ip_address() destination_address = any_ip_address() data = bytearray([(6 << 4) | (traffic_class >> 4), (traffic_class & 0xF) << 4 | (flow_label >> 16) & 0xF, (flow_label >> 8) & 0xff, flow_label & 0xff, payload_length >> 8, payload_length & 0xff, next_header, hop_limit]) data += ip_address(bytes(source_address)).packed + ip_address( bytes(destination_address)).packed # WHEN ipv6_header = IPv6Header.from_bytes(io.BytesIO(data)) # THEN self.assertEqual(6, ipv6_header.version) self.assertEqual(traffic_class, ipv6_header.traffic_class) self.assertEqual(flow_label, ipv6_header.flow_label) self.assertEqual(payload_length, ipv6_header.payload_length) self.assertEqual(next_header, ipv6_header.next_header) self.assertEqual(hop_limit, ipv6_header.hop_limit) self.assertEqual(source_address, ipv6_header.source_address.packed) self.assertEqual(destination_address, ipv6_header.destination_address.packed) def test_should_return_proper_header_length_when_IPv6Packet_object_is_called_in_len( self): # GIVEN ipv6_header = IPv6Header(any_traffic_class(), any_flow_label(), any_payload_length(), any_next_header(), any_hop_limit(), any_ip_address(), any_ip_address()) # WHEN ipv6_header_length = len(ipv6_header) # THEN self.assertEqual(40, ipv6_header_length) class TestUDPHeader(unittest.TestCase): def test_should_convert_UDP_header_to_bytes_when_to_bytes_method_is_called( self): # GIVEN src_port = any_port() dst_port = any_port() payload_length = any_payload_length() checksum = any_checksum() udp_header = UDPHeader(src_port, dst_port, payload_length, checksum) # WHEN data = udp_header.to_bytes() # THEN self.assertEqual(src_port, struct.unpack("!H", data[0:2])[0]) self.assertEqual(dst_port, struct.unpack("!H", data[2:4])[0]) self.assertEqual(payload_length, struct.unpack("!H", data[4:6])[0]) self.assertEqual(checksum, struct.unpack("!H", data[6:])[0]) def test_should_create_UDPHeader_when_from_bytes_classmethod_is_called( self): # GIVEN src_port = any_port() dst_port = any_port() payload_length = any_payload_length() checksum = any_checksum() data = struct.pack("!H", src_port) + struct.pack("!H", dst_port) + \ struct.pack("!H", payload_length) + struct.pack("!H", checksum) # WHEN udp_header = UDPHeader.from_bytes(io.BytesIO(data)) # THEN self.assertEqual(src_port, udp_header.src_port) self.assertEqual(dst_port, udp_header.dst_port) self.assertEqual(payload_length, udp_header.payload_length) self.assertEqual(checksum, udp_header.checksum) def test_should_return_proper_header_length_when_UDPHeader_object_is_called_in_len( self): # GIVEN udp_header = UDPHeader(any_port(), any_port(), any_payload_length(), any_checksum()) # WHEN udp_header_length = len(udp_header) # THEN self.assertEqual(8, udp_header_length) def test_should_return_17_when_type_property_is_called(self): # GIVEN udp_header = UDPHeader(any_port(), any_port(), any_payload_length(), any_checksum()) # THEN self.assertEqual(17, udp_header.type) class TestICMPv6Header(unittest.TestCase): def test_should_convert_icmp_message_header_to_bytes_when_to_bytes_method_is_called( self): # GIVEN _type = any_type() code = any_code() checksum = any_checksum() icmpv6_header = ICMPv6Header(_type, code, checksum) # WHEN data = icmpv6_header.to_bytes() # THEN self.assertEqual(_type, data[0]) self.assertEqual(code, data[1]) self.assertEqual(checksum, struct.unpack("!H", data[2:])[0]) def test_should_create_ICMPv6Header_when_to_bytes_classmethod_is_called( self): # GIVEN _type = any_type() code = any_code() checksum = any_checksum() data = bytearray([_type, code]) + struct.pack("!H", checksum) # WHEN icmpv6_header = ICMPv6Header.from_bytes(io.BytesIO(data)) # THEN self.assertEqual(_type, icmpv6_header.type) self.assertEqual(code, icmpv6_header.code) self.assertEqual(checksum, icmpv6_header.checksum) def test_should_return_proper_header_length_when_ICMPv6Header_object_is_called_in_len( self): # GIVEN icmpv6_header = ICMPv6Header(any_type(), any_code(), any_checksum()) # WHEN icmpv6_header_length = len(icmpv6_header) # THEN self.assertEqual(4, icmpv6_header_length) class TestIPv6Packet(unittest.TestCase): def test_should_build_IPv6Packet_with_ICMP_payload_from_well_know_values_when_to_bytes_method_is_called( self): # GIVEN ipv6_packet = IPv6Packet( IPv6Header(source_address="fd00:1234:4555::ff:fe00:1800", destination_address="ff03::1"), ICMPv6( ICMPv6Header(128, 0), ICMPv6EchoBody( 0, 2, bytearray([ 0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 ]))), [ HopByHop(options=[ HopByHopOption( HopByHopOptionHeader(_type=0x6d), MPLOption(S=1, M=0, V=0, sequence=2, seed_id=bytearray([0x00, 0x18]))) ]) ]) # WHEN ipv6_packet_bytes = ipv6_packet.to_bytes() # THEN expected_ipv6_packet_bytes = bytearray([ 0x60, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00, 0x40, 0xfd, 0x00, 0x12, 0x34, 0x45, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xfe, 0x00, 0x18, 0x00, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x00, 0x6d, 0x04, 0x40, 0x02, 0x00, 0x18, 0x80, 0x00, 0x87, 0x12, 0x00, 0x00, 0x00, 0x02, 0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 ]) self.assertEqual(expected_ipv6_packet_bytes, ipv6_packet_bytes) def test_should_build_IPv6Packet_with_UDP_payload_from_well_know_values_when_to_bytes_method_is_called( self): # GIVEN ipv6_header = IPv6Header(source_address="fe80::1", destination_address="ff02::2", hop_limit=255) udp_dgram = UDPDatagram( UDPHeader(src_port=19788, dst_port=19788), BytesPayload( bytearray([ 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x09, 0x01, 0x01, 0x0b, 0x03, 0x04, 0xc6, 0x69, 0x73, 0x51, 0x0e, 0x01, 0x80, 0x12, 0x02, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef ]))) ipv6_packet = IPv6Packet(ipv6_header, udp_dgram) # WHEN ipv6_packet_bytes = ipv6_packet.to_bytes() # THEN expected_ipv6_packet_bytes = bytearray([ 0x60, 0x00, 0x00, 0x00, 0x00, 0x28, 0x11, 0xff, 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x4d, 0x4c, 0x4d, 0x4c, 0x00, 0x28, 0xe9, 0xf4, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x09, 0x01, 0x01, 0x0b, 0x03, 0x04, 0xc6, 0x69, 0x73, 0x51, 0x0e, 0x01, 0x80, 0x12, 0x02, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef ]) self.assertEqual(expected_ipv6_packet_bytes, ipv6_packet_bytes) class TestIPv6PacketFactory(unittest.TestCase): def test_should_create_IPv6Packet_with_MPL_and_ICMP_when_to_bytes_method_is_called( self): # GIVEN ipv6_packet_bytes = bytearray([ 0x60, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00, 0x40, 0xfd, 0x00, 0x12, 0x34, 0x45, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xfe, 0x00, 0x18, 0x00, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x00, 0x6d, 0x04, 0x40, 0x02, 0x00, 0x18, 0x80, 0x00, 0x87, 0x12, 0x00, 0x00, 0x00, 0x02, 0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 ]) ipv6_factory = IPv6PacketFactory( ehf={ 0: HopByHopFactory( hop_by_hop_options_factory=HopByHopOptionsFactory( options_factories={109: MPLOptionFactory()})) }, ulpf={ 58: ICMPv6Factory(body_factories={128: ICMPv6EchoBodyFactory()}) }) # WHEN ipv6_packet = ipv6_factory.parse(io.BytesIO(ipv6_packet_bytes), any_message_info()) # THEN self.assertEqual('fd00:1234:4555::ff:fe00:1800', ipv6_packet.ipv6_header.source_address.compressed) self.assertEqual('ff03::1', ipv6_packet.ipv6_header.destination_address.compressed) self.assertEqual(64, ipv6_packet.ipv6_header.hop_limit) self.assertEqual(0, ipv6_packet.ipv6_header.next_header) self.assertEqual(34, ipv6_packet.ipv6_header.payload_length) self.assertEqual(0, ipv6_packet.ipv6_header.flow_label) self.assertEqual(0, ipv6_packet.ipv6_header.traffic_class) self.assertEqual(6, ipv6_packet.ipv6_header.version) self.assertEqual(1, ipv6_packet.extension_headers[0].options[0].value.S) self.assertEqual(0, ipv6_packet.extension_headers[0].options[0].value.M) self.assertEqual(0, ipv6_packet.extension_headers[0].options[0].value.V) self.assertEqual( 2, ipv6_packet.extension_headers[0].options[0].value.sequence) self.assertEqual( bytearray([0x00, 0x18]), ipv6_packet.extension_headers[0].options[0].value.seed_id) self.assertEqual(34578, ipv6_packet.upper_layer_protocol.header.checksum) self.assertEqual(128, ipv6_packet.upper_layer_protocol.header.type) self.assertEqual(0, ipv6_packet.upper_layer_protocol.header.code) self.assertEqual(0, ipv6_packet.upper_layer_protocol.body.identifier) self.assertEqual(2, ipv6_packet.upper_layer_protocol.body.sequence_number) self.assertEqual(b'\x80\x00\xc7\xbf\x00\x00\x00\x01AAAAAAAAAA', ipv6_packet.upper_layer_protocol.body.data) def test_should_create_IPv6Packet_without_any_extension_header_with_ICMP_when_to_bytes_method_is_called( self): # GIVEN ipv6_packet_bytes = bytearray([ 0x60, 0x00, 0x00, 0x00, 0x00, 0x1A, 0x3A, 0x40, 0xfd, 0x00, 0x12, 0x34, 0x45, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xfe, 0x00, 0x18, 0x00, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x80, 0x00, 0x87, 0x12, 0x00, 0x00, 0x00, 0x02, 0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 ]) ipv6_factory = IPv6PacketFactory(ulpf={ 58: ICMPv6Factory(body_factories={128: ICMPv6EchoBodyFactory()}) }) # WHEN ipv6_packet = ipv6_factory.parse(io.BytesIO(ipv6_packet_bytes), any_message_info()) ipv6_packet._validate_checksum() # THEN self.assertEqual('fd00:1234:4555::ff:fe00:1800', ipv6_packet.ipv6_header.source_address.compressed) self.assertEqual('ff03::1', ipv6_packet.ipv6_header.destination_address.compressed) self.assertEqual(64, ipv6_packet.ipv6_header.hop_limit) self.assertEqual(58, ipv6_packet.ipv6_header.next_header) self.assertEqual(26, ipv6_packet.ipv6_header.payload_length) self.assertEqual(0, ipv6_packet.ipv6_header.flow_label) self.assertEqual(0, ipv6_packet.ipv6_header.traffic_class) self.assertEqual(6, ipv6_packet.ipv6_header.version) self.assertEqual(34578, ipv6_packet.upper_layer_protocol.header.checksum) self.assertEqual(128, ipv6_packet.upper_layer_protocol.header.type) self.assertEqual(0, ipv6_packet.upper_layer_protocol.header.code) self.assertEqual(0, ipv6_packet.upper_layer_protocol.body.identifier) self.assertEqual(2, ipv6_packet.upper_layer_protocol.body.sequence_number) self.assertEqual(b'\x80\x00\xc7\xbf\x00\x00\x00\x01AAAAAAAAAA', ipv6_packet.upper_layer_protocol.body.data) def test_should_set_message_info_field_when_to_bytes_method_is_called(self): # GIVEN ipv6_packet_data = bytearray([ 0x60, 0x00, 0x00, 0x00, 0x00, 0x1A, 0x3A, 0x40, 0xfd, 0x00, 0x12, 0x34, 0x45, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xfe, 0x00, 0x18, 0x00, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x80, 0x00, 0x87, 0x12, 0x00, 0x00, 0x00, 0x02, 0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 ]) message_info = any_message_info() message_info.source_ipv6 = "ff::" message_info.destination_address = "ff::" factory = IPv6PacketFactory(ulpf={ 58: ICMPv6Factory(body_factories={128: ICMPv6EchoBodyFactory()}) }) # WHEN factory.parse(io.BytesIO(ipv6_packet_data), message_info) # THEN self.assertEqual("fd00:1234:4555::ff:fe00:1800", message_info.source_ipv6.compressed) self.assertEqual("ff03::1", message_info.destination_ipv6.compressed) class TestUDPDatagram(unittest.TestCase): def test_should_creates_bytes_from_UDPHeader_and_payload_when_to_bytes_method_is_called( self): # GIVEN src_port = any_port() dst_port = any_port() checksum = any_checksum() payload = any_payload() payload_length = len( payload ) + 8 # UDP length consists of UDP header length and payload length udp_header = UDPHeader(src_port, dst_port, payload_length, checksum) udp_payload = BytesPayload(payload) udp_dgram = UDPDatagram(udp_header, udp_payload) # WHEN udp_dgram_bytes = udp_dgram.to_bytes() # THEN expected_udp_dgram_bytes = struct.pack("!H", src_port) + struct.pack("!H", dst_port) + \ struct.pack("!H", payload_length) + struct.pack("!H", checksum) + payload self.assertEqual(expected_udp_dgram_bytes, udp_dgram_bytes) class TestIPv6FragmentHeader(unittest.TestCase): def test_shold_convert_IPv6_fragment_header_to_bytes_when_to_bytes_method_is_called( self): # GIVEN type = any_type() offset = any_fragment_offset() more_flag = any_bool() identification = any_fragment_identification() ipv6_fragment_header = FragmentHeader(type, offset, more_flag, identification) # WHEN actual = ipv6_fragment_header.to_bytes() # THEN expected = bytearray([type, 0x00, offset >> 5, ((offset << 3) & 0xff) | more_flag])\ + struct.pack("!I", identification) self.assertEqual(expected, actual) def test_should_create_FragmentHeader_when_from_bytes_classmethod_is_called( self): # GIVEN type = any_type() offset = any_fragment_offset() more_flag = any_bool() identification = any_fragment_identification() data = bytearray([type, 0x00, offset >> 5, ((offset << 3) & 0xff) | more_flag])\ + struct.pack("!I", identification) # WHEN ipv6_fragment_header = FragmentHeader.from_bytes(io.BytesIO(data)) # THEN self.assertEqual(type, ipv6_fragment_header.next_header) self.assertEqual(offset, ipv6_fragment_header.offset) self.assertEqual(more_flag, ipv6_fragment_header.more_flag) self.assertEqual(identification, ipv6_fragment_header.identification) class TestICMPv6(unittest.TestCase): def test_should_creates_bytes_from_ICMPv6Header_and_body_when_to_bytes_method_is_called( self): # GIVEN _type = any_type() code = any_code() checksum = any_checksum() body = any_body() icmpv6_header = ICMPv6Header(_type, code, checksum) icmpv6_body = ICMPv6BytesBody(body) icmpv6_msg = ICMPv6(icmpv6_header, icmpv6_body) # WHEN actual = icmpv6_msg.to_bytes() # THEN expected = bytearray([_type, code]) + struct.pack("!H", checksum) + body self.assertEqual(expected, actual) class TestHopByHop(unittest.TestCase): def _calculate_hdr_ext_len(self, payload_len): count = payload_len // 8 rest = payload_len % 8 if rest != 0: count += 1 if count == 0 and rest == 0: return count return count - 1 def _calculate_required_padding(self, content_length): excess_bytes = content_length & 0x7 if excess_bytes > 0: return 8 - excess_bytes return 0 def create_padding(self, padding_length): if padding_length == 1: return bytearray([0x00]) elif padding_length > 1: padding_length -= 2 return bytearray([0x01, padding_length]) + bytearray( [0x00 for _ in range(padding_length)]) else: return bytearray() def test_should_create_bytes_from_HopByHop_when_to_bytes_method_is_called( self): # GIVEN next_header = any_next_header() hop_by_hop_option = any_hop_by_hop_bytes_option() hdr_ext_len = self._calculate_hdr_ext_len(2 + len(hop_by_hop_option)) hop_by_hop = HopByHop(next_header, [hop_by_hop_option]) # WHEN data = hop_by_hop.to_bytes() # THEN expected_data = bytearray([next_header, hdr_ext_len ]) + hop_by_hop_option.to_bytes() padding_length = self._calculate_required_padding(len(expected_data)) expected_data += self.create_padding(padding_length) self.assertEqual(expected_data, data) class TestMPLOption(unittest.TestCase): def test_should_convert_MPLOption_to_bytes_when_to_bytes_method_is_called( self): # GIVEN S = any_mpl_S() M = any_mpl_M() V = any_mpl_V() sequence = any_mpl_sequence() seed_id = any_mpl_seed_id(S) mpl_option = MPLOption(S, M, V, sequence, seed_id) # WHEN data = mpl_option.to_bytes() # THEN expected_data = bytearray([(S << 6) | (M << 5) | (V << 4), sequence]) + seed_id self.assertEqual(expected_data, data) def test_should_create_MPLOption_when_to_bytes_method_is_called_with_data( self): # GIVEN S = any_mpl_S() M = any_mpl_M() V = any_mpl_V() sequence = any_mpl_sequence() seed_id = any_mpl_seed_id(S) data = bytearray([(S << 6) | (M << 5) | (V << 4), sequence]) + seed_id # WHEN mpl_option = MPLOption.from_bytes(io.BytesIO(data)) # THEN self.assertEqual(S, mpl_option.S) self.assertEqual(M, mpl_option.M) self.assertEqual(V, mpl_option.V) self.assertEqual(sequence, mpl_option.sequence) self.assertEqual(seed_id, mpl_option.seed_id) def test_check_if_mpl_seed_id_length_values_was_not_changed(self): self.assertEqual(0, MPLOption._seed_id_length[0]) self.assertEqual(2, MPLOption._seed_id_length[1]) self.assertEqual(8, MPLOption._seed_id_length[2]) self.assertEqual(16, MPLOption._seed_id_length[3]) def test_should_return_proper_length_when_len_is_called_with_mpl_option_object( self): # GIVEN S = any_mpl_S() M = any_mpl_M() V = any_mpl_V() sequence = any_mpl_sequence() seed_id = any_mpl_seed_id(S) mpl_option = MPLOption(S, M, V, sequence, seed_id) # WHEN mpl_option_length = len(mpl_option) # THEN SMV_and_sequence_length = 2 self.assertEqual(SMV_and_sequence_length + len(seed_id), mpl_option_length) class TestclassHopByHopOption(unittest.TestCase): def test_should_convert_HopByHopOption_to_bytes_when_to_bytes_method_is_called( self): # GIVEN length = any_length() header = any_hop_by_hop_bytes_option_header(length) value = any_hop_by_hop_bytes_value(length) hop_by_hop_option = HopByHopOption(header, value) # WHEN data = hop_by_hop_option.to_bytes() # THEN expected_data = header.to_bytes() + value.to_bytes() self.assertEqual(expected_data, data) def test_should_return_length_of_HopByHopOption_when_len_is_called_with_hop_by_hop_option_object( self): # GIVEN length = any_length() header = any_hop_by_hop_bytes_option_header(length) value = any_hop_by_hop_bytes_value(length) hop_by_hop_option = HopByHopOption(header, value) # WHEN hop_by_hop_option_length = len(hop_by_hop_option) # THEN header_length = 2 expected_hop_by_hop_option_length = header_length + length self.assertEqual(expected_hop_by_hop_option_length, hop_by_hop_option_length) class TestHopByHopOptionHeader(unittest.TestCase): def test_should_convert_HopByHopOptionHeader_to_bytes_when_to_bytes_method_is_called( self): # GIVEN _type = any_type() length = any_length() hop_by_hop_option_header = HopByHopOptionHeader(_type, length) # WHEN data = hop_by_hop_option_header.to_bytes() # THEN expected_data = bytearray([_type, length]) self.assertEqual(expected_data, data) def test_should_create_HopByHopOptionHeader_when_to_bytes_method_is_called_with_data( self): # GIVEN _type = any_type() length = any_length() data = bytearray([_type, length]) # WHEN option_header = HopByHopOptionHeader.from_bytes(io.BytesIO(data)) # THEN self.assertEqual(_type, option_header.type) self.assertEqual(length, option_header.length) def test_should_return_proper_length_when_len_is_called_with_HopByHopOptionHeader_object( self): # GIVEN _type = any_type() length = any_length() option_header = HopByHopOptionHeader(_type, length) # WHEN option_header_length = len(option_header) # THEN expected_option_header_length = 2 self.assertEqual(expected_option_header_length, option_header_length) class TestHopByHopFactory(unittest.TestCase): def _calculate_hdr_ext_len(self, payload_length): count = payload_length >> 3 if (payload_length & 0x7) == 0 and count > 0: return count - 1 return count def padding(self, content_length): excess_bytes = content_length & 0x7 if excess_bytes > 0: padding_length = 8 - excess_bytes if padding_length == 1: return bytearray([0x00]) elif padding_length > 1: padding_length -= 2 return bytearray([0x01, padding_length]) + bytearray( [0x00 for _ in range(padding_length)]) return bytearray() def test_should_create_HopByHop_object_instance_when_to_bytes_method_is_called_with_data( self): # GIVEN hop_by_hop_option = any_hop_by_hop_mpl_option() hop_by_hop_option_type = hop_by_hop_option.header.type next_header = any_next_header() hdr_ext_len = self._calculate_hdr_ext_len(2 + len(hop_by_hop_option)) hop_by_hop_factory = HopByHopFactory( hop_by_hop_options_factory=HopByHopOptionsFactory( options_factories={hop_by_hop_option_type: MPLOptionFactory()})) data = bytearray([next_header, hdr_ext_len ]) + hop_by_hop_option.to_bytes() data += self.padding(len(data)) # WHEN hop_by_hop = hop_by_hop_factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertEqual(hop_by_hop_option.value.S, hop_by_hop.options[0].value.S) self.assertEqual(hop_by_hop_option.value.V, hop_by_hop.options[0].value.V) self.assertEqual(hop_by_hop_option.value.M, hop_by_hop.options[0].value.M) self.assertEqual(hop_by_hop_option.value.sequence, hop_by_hop.options[0].value.sequence) self.assertEqual(hop_by_hop_option.value.seed_id, hop_by_hop.options[0].value.seed_id) def test_should_raise_RuntimeError_when_no_option_factory_is_set_and_parse_method_is_called( self): # GIVEN hop_by_hop_option = any_hop_by_hop_mpl_option() next_header = any_next_header() hdr_ext_len = self._calculate_hdr_ext_len(2 + len(hop_by_hop_option)) hop_by_hop_factory = HopByHopFactory( hop_by_hop_options_factory=HopByHopOptionsFactory()) data = bytes([next_header, hdr_ext_len]) + hop_by_hop_option.to_bytes() data += self.padding(len(data)) # THEN self.assertRaises(RuntimeError, hop_by_hop_factory.parse, io.BytesIO(data), any_message_info()) class TestMPLOptionFactory(unittest.TestCase): def test_should_produce_MPLOption_from_bytes_when_to_bytes_method_is_called_with_data( self): # GIVEN S = any_mpl_S() M = any_mpl_M() V = any_mpl_V() sequence = any_mpl_sequence() seed_id = any_mpl_seed_id(S) SMV = (S << 6) | (M << 5) | (V << 4) data = bytearray([SMV, sequence]) + seed_id factory = MPLOptionFactory() # WHEN mpl_opt = factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertEqual(mpl_opt.S, S) self.assertEqual(mpl_opt.M, M) self.assertEqual(mpl_opt.V, V) self.assertEqual(mpl_opt.sequence, sequence) self.assertEqual(mpl_opt.seed_id, seed_id) class TestUdpBasedOnSrcDstPortsPayloadFactory(unittest.TestCase): def test_should_create_payload_from_data_when_src_port_factory_is_defined_and_parse_method_is_called( self): # GIVEN data = any_data() message_info = common.MessageInfo() message_info.src_port = any_port() message_info.dst_port = any_port() factory = UdpBasedOnSrcDstPortsPayloadFactory( src_dst_port_based_payload_factories={ message_info.src_port: BytesPayloadFactory() }) # WHEN actual_data = factory.parse(io.BytesIO(data), message_info) # THEN self.assertEqual(data, actual_data.data) def test_should_create_payload_from_data_when_dst_port_factory_is_defined_and_parse_method_is_called( self): # GIVEN data = any_data() message_info = common.MessageInfo() message_info.src_port = any_port() message_info.dst_port = any_port() factory = UdpBasedOnSrcDstPortsPayloadFactory( src_dst_port_based_payload_factories={ message_info.dst_port: BytesPayloadFactory() }) # WHEN actual_data = factory.parse(io.BytesIO(data), message_info) # THEN self.assertEqual(data, actual_data.data) def test_should_raise_RuntimeError_when_parse_method_is_called_but_required_factory_is_not_defined( self): # GIVEN data = any_data() message_info = common.MessageInfo() message_info.src_port = any_port() message_info.dst_port = any_port() factory = UdpBasedOnSrcDstPortsPayloadFactory( src_dst_port_based_payload_factories={}) # THEN self.assertRaises(RuntimeError, factory.parse, io.BytesIO(data), message_info) class TestUDPDatagramFactory(unittest.TestCase): def test_should_produce_UDPDatagram_from_bytes_when_to_bytes_method_is_called_with_data( self): # GIVEN src_port = any_port() dst_port = any_port() checksum = any_checksum() payload = any_payload() payload_length = len(payload) + len(UDPHeader(0, 0)) data = bytearray([(src_port >> 8), (src_port & 0xff), (dst_port >> 8), (dst_port & 0xff), (payload_length >> 8), (payload_length & 0xff), (checksum >> 8), (checksum & 0xff)]) + payload factory = UDPDatagramFactory(UDPHeaderFactory(), BytesPayloadFactory()) # WHEN udp_dgram = factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertEqual(udp_dgram.header.src_port, src_port) self.assertEqual(udp_dgram.header.dst_port, dst_port) self.assertEqual(udp_dgram.header.payload_length, payload_length) self.assertEqual(udp_dgram.header.checksum, checksum) self.assertEqual(udp_dgram.payload.data, payload) def test_should_set_src_and_dst_port_in_message_info_when_parse_method_is_called( self): # GIVEN message_info = any_message_info() src_port = any_port() dst_port = any_port() checksum = any_checksum() payload = any_payload() payload_length = len(payload) + len(UDPHeader(0, 0)) data = (bytearray([ (src_port >> 8), (src_port & 0xff), (dst_port >> 8), (dst_port & 0xff), (payload_length >> 8), (payload_length & 0xff), (checksum >> 8), (checksum & 0xff), ]) + payload) factory = UDPDatagramFactory(UDPHeaderFactory(), BytesPayloadFactory()) # WHEN factory.parse(io.BytesIO(data), message_info) # THEN self.assertEqual(src_port, message_info.src_port) self.assertEqual(dst_port, message_info.dst_port) class TestICMPv6Factory(unittest.TestCase): def test_should_produce_ICMPv6_from_bytes_when_to_bytes_method_is_called_with_data( self): # GIVEN _type = any_type() code = any_code() checksum = any_checksum() body = any_body() data = bytearray([_type, code, (checksum >> 8), (checksum & 0xff)]) + body factory = ICMPv6Factory( body_factories={_type: ICMPv6BytesBodyFactory()}) # WHEN icmpv6_msg = factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertEqual(icmpv6_msg.header.type, _type) self.assertEqual(icmpv6_msg.header.code, code) self.assertEqual(icmpv6_msg.header.checksum, checksum) self.assertEqual(icmpv6_msg.body.bytes, body) def test_should_raise_RuntimeError_when_method_parse_is_called_but_body_factory_is_not_present( self): # GIVEN _type = any_type() code = any_code() checksum = any_checksum() body = any_body() data = bytes([_type, code, (checksum >> 8), (checksum & 0xff)]) + body factory = ICMPv6Factory() # WHEN self.assertRaises(RuntimeError, factory.parse, io.BytesIO(data), any_message_info()) class TestBytesPayload(unittest.TestCase): def test_should_create_BytesPayload_when_from_bytes_class_method_is_called( self): # GIVEN data = any_data() # WHEN actual = BytesPayload.from_bytes(data) # THEN self.assertEqual(data, actual.data) def test_should_return_exactly_the_same_data_as_passed_to_constructor_when_to_bytes_method_is_called( self): # GIVEN data = any_data() payload = BytesPayload(data) # WHEN actual = payload.to_bytes() # THEN self.assertEqual(data, actual) def test_should_return_the_same_len_as_data_passed_to_constructor_when_len_is_called_on_BytesPayload_object( self): # GIVEN data = any_data() payload = BytesPayload(data) # WHEN actual = len(payload) # THEN self.assertEqual(len(data), actual) class TestICMPv6EchoBody(unittest.TestCase): def test_convert_ICMPv6_echo_body_to_data_when_to_bytes_method_is_called( self): # GIVEN identifier = any_identifier() sequence_number = any_sequence_number() data = any_data() body = ICMPv6EchoBody(identifier, sequence_number, data) # WHEN actual = body.to_bytes() # THEN expected = bytearray([ identifier >> 8, identifier & 0xff, sequence_number >> 8, sequence_number & 0xff ]) + data self.assertEqual(expected, actual) def test_should_create_ICMPv6EchoBody_from_data_when_from_bytes_classmethod_is_called( self): # GIVEN identifier = any_identifier() sequence_number = any_sequence_number() body_data = any_data() data = bytearray([(identifier >> 8), (identifier & 0xff), (sequence_number >> 8), (sequence_number & 0xff)]) data += body_data # WHEN actual = ICMPv6EchoBody.from_bytes(io.BytesIO(data)) # THEN self.assertEqual(identifier, actual.identifier) self.assertEqual(sequence_number, actual.sequence_number) self.assertEqual(body_data, actual.data) def test_should_build_ICMPv6EchoBody_from_well_know_values_when_to_bytes_method_is_called( self): # GIVEN body = ICMPv6EchoBody( 0, 2, bytearray([ 0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 ])) # WHEN actual = body.to_bytes() # THEN expected = bytearray([ 0x00, 0x00, 0x00, 0x02, 0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 ]) self.assertEqual(expected, actual) class TestICMPv6EchoBodyFactory(unittest.TestCase): def test_should_build_ICMPv6EchoBody_when_to_bytes_method_is_called(self): # GIVEN identifier = any_identifier() sequence_number = any_sequence_number() body_data = any_data() data = bytearray([(identifier >> 8) & 0xff, identifier & 0xff, (sequence_number >> 8) & 0xff, sequence_number & 0xff ]) + body_data factory = ICMPv6EchoBodyFactory() # WHEN actual = factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertTrue(isinstance(actual, ICMPv6EchoBody)) self.assertEqual(identifier, actual.identifier) self.assertEqual(sequence_number, actual.sequence_number) self.assertEqual(body_data, actual.data) class TestICMPv6DestinationUnreachable(unittest.TestCase): def test_should_convert_ICMPv6DestinationUnreachable_to_bytearray_when_to_bytes_method_is_called( self): # GIVEN data = any_data() icmpv6_dest_unreachable = ICMPv6DestinationUnreachable(data) # WHEN actual_data = icmpv6_dest_unreachable.to_bytes() # THEN self.assertEqual( bytearray([0x00, 0x00, 0x00, 0x00]) + data, actual_data) def test_should_convert_bytearray_to_ICMPv6DestinationUnreachable_when_from_bytes_method_is_called( self): # GIVEN data = any_data() # WHEN icmpv6_dest_unreachable = ICMPv6DestinationUnreachable.from_bytes( io.BytesIO(bytearray([0x00, 0x00, 0x00, 0x00]) + data)) # THEN self.assertEqual(data, icmpv6_dest_unreachable.data) def test_should_raise_RuntimeError_when_from_bytes_method_is_called(self): # GIVEN data = any_data() unused = random.randint(1, 1 << 32) # WHEN self.assertRaises( RuntimeError, ICMPv6DestinationUnreachable.from_bytes, io.BytesIO(bytearray(struct.pack(">I", unused)) + data)) class TestICMPv6DestinationUnreachableFactory(unittest.TestCase): def test_should_create_ICMPv6DestinationUnreachable_when_parse_method_is_called( self): # GIVEN icmp_data = any_data() factory = ICMPv6DestinationUnreachableFactory() data = bytearray([0x00, 0x00, 0x00, 0x00]) + icmp_data # WHEN icmpv6_dest_unreachable = factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertEqual(icmp_data, icmpv6_dest_unreachable.data) class TestUDPHeaderFactory(unittest.TestCase): def test_should_create_UDPHeader_when_to_bytes_method_is_called(self): # GIVEN factory = UDPHeaderFactory() src_port = any_port() dst_port = any_port() payload_length = any_payload_length() checksum = any_checksum() data = struct.pack("!H", src_port) + struct.pack("!H", dst_port) + \ struct.pack("!H", payload_length) + struct.pack("!H", checksum) # WHEN udp_header = factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertEqual(src_port, udp_header.src_port) self.assertEqual(dst_port, udp_header.dst_port) self.assertEqual(payload_length, udp_header.payload_length) self.assertEqual(checksum, udp_header.checksum) class TestHopByHopOptionsFactory(unittest.TestCase): def test_should_create_option_from_bytearray_when_to_bytes_method_is_called( self): # GIVEN class DummyOptionFactory: def parse(self, data, message_info): return data.read() factory = HopByHopOptionsFactory( options_factories={2: DummyOptionFactory()}) data = bytearray([0x02, 0x03, 0x11, 0x22, 0x33, 0x01, 0x00]) # WHEN actual_options = factory.parse(io.BytesIO(data), any_message_info()) # THEN self.assertEqual(1, len(actual_options)) self.assertEqual(2, actual_options[0].header.type) self.assertEqual(3, actual_options[0].header.length) if __name__ == "__main__": unittest.main()
import hashlib import hmac import json import re from django.conf import settings from django.contrib.auth import logout from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.urlresolvers import reverse from django.db.models import Count from django.http import HttpResponse, Http404 from django.shortcuts import redirect, render, get_object_or_404 from django.utils.decorators import method_decorator from django.views import generic from django.views.decorators.csrf import csrf_exempt from github import UnknownObjectException, BadCredentialsException from social.apps.django_app.default.models import UserSocialAuth from documents.models import Document from documents.search import Search from interface.models import Repo from interface.utils import get_github from interface.path_processor import PathProcessor class RepoDetailView(generic.DetailView, generic.UpdateView): model = Repo slug_field = 'full_name' slug_url_kwarg = 'full_name' template_name = 'interface/repo_detail.html' fields = ['wiki_branch'] def get(self, request, *args, **kwargs): self.object = self.get_object() context = self.get_context_data(object=self.object) is_collab = self.object.user_is_collaborator(request.user) context['is_owner'] = is_collab if self.object.is_private and not is_collab: raise Http404('You are not allowed to view this Repo') repo_name = self.object.full_name branches = [] if is_collab: g = get_github(self.object.user) grepo = g.get_repo(repo_name) branches = [i.name for i in grepo.get_branches()] context['branches'] = branches path = kwargs.get('path') path = path or '/' path_processor = PathProcessor(repo_name, path) is_directory = False try: # Viewing a single file filename = path_processor.filename trunc_path = path_processor.directory document = Document.objects.get(repo=self.object, path=trunc_path, filename=filename) documents = [] except Document.DoesNotExist: path_processor = PathProcessor(repo_name, path, is_directory=True) trunc_path = path_processor.directory is_directory = True try: # Viewing a folder with a README document = Document.objects.get( repo=self.object, path=trunc_path, filename__istartswith='README') except Document.DoesNotExist: # Viewing a folder without a README document = None documents = Document.objects.filter(repo=self.object, path__startswith=trunc_path) context['document'] = document context['path'] = path_processor.path_in_repo context['files'] = self.object.get_folder_contents(trunc_path, documents) context['directory'] = is_directory if is_directory and re.match('.+[^/]$', request.path): return redirect(request.path + '/') if len(context['files']) == 0 and 'document' not in context: raise Http404 context['base_url'] = request.build_absolute_uri(self.object.get_absolute_url()) b_tuples = [] if path != '/': path = path[1:] breadcrumbs = path.split('/') for b in breadcrumbs: if not b_tuples: url = '{0}/{1}/'.format(context['base_url'], b) else: url = '{0}{1}/'.format(b_tuples[-1][0], b) b_tuples.append((url, b)) context['breadcrumbs'] = b_tuples return self.render_to_response(context) def form_invalid(self, form): # TODO: Submit form via ajax, show error message if invalid # I have no idea how someone would submit an invalid form return render(self.request, 'interface/500.html') class RepoListView(LoginRequiredMixin, generic.ListView): template_name = 'interface/repo_list.html' def get(self, request, *args, **kwargs): g = get_github(self.request.user) try: repos = [r for r in g.get_user().get_repos()] except BadCredentialsException: UserSocialAuth.objects.filter(user=request.user).delete() return redirect(reverse('social:begin', args=['github'])) + '?next=' + request.path self.object_list = Repo.objects.filter( full_name__in=[i.full_name for i in repos] ).annotate(doc_count=Count('documents')) names = [x.full_name for x in self.object_list] filtered = [] for repo in repos: if repo.full_name not in names: filtered.append(repo) context = self.get_context_data() context['repos'] = filtered context['welcome'] = request.GET.get('welcome', False) return self.render_to_response(context) class RepoDeleteView(generic.DetailView): model = Repo slug_field = 'full_name' slug_url_kwarg = 'full_name' @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(RepoDeleteView, self).dispatch(request, *args, **kwargs) def check_and_delete(self, request): obj = self.get_object() if not obj.user_is_collaborator(request.user): raise Http404('You are not allowed to delete this repo') obj.delete() def get(self, request, *args, **kwargs): self.check_and_delete(request) return redirect(reverse('repo_list')) def delete(self, request, **kwargs): self.check_and_delete(request) return HttpResponse(status=204) @login_required def ProcessRepo(request, full_name): user = request.user g = get_github(request.user) grepo = g.get_repo(full_name) if not grepo.full_name: raise Http404('Repo not found') guser = g.get_user(user.username) is_collab = grepo.has_in_collaborators(guser) if not is_collab and grepo.private: raise Http404('You are not a collaborator of this repo') try: repo = Repo.objects.get(full_name=grepo.full_name) repo.is_private = grepo.private repo.save() except Repo.DoesNotExist: repo = Repo.objects.create( full_name=grepo.full_name, user=user, wiki_branch=grepo.default_branch, is_private=grepo.private ) if not repo.webhook_id: try: repo.add_webhook(request) except UnknownObjectException: raise Http404('Github failed to create a hook') repo.enqueue() url = reverse('repo_detail', kwargs={'full_name': repo.full_name}) return redirect(url) def search_view(request): query_text = request.GET.get('q', None) if not query_text: raise Http404 search = Search(request.user, query_text) docs = search.perform() context = { 'query': query_text, 'results': docs } return render(request, 'interface/search.html', context) def LogoutView(request): next = request.GET.get('next', '/') logout(request) return redirect(next) def handler404(request): response = render(request, 'interface/404.html') response.status_code = 404 return response def handler500(request): response = render(request, 'interface/500.html') response.status_code = 500 return response
import os import pytest import bcrypt _test_vectors = [ ( b"Kk4DQuMMfZL9o", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm", ), ( b"9IeRXmnGxMYbs", b"$2b$04$pQ7gRO7e6wx/936oXhNjrO", b"$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy", ), ( b"xVQVbwa1S0M8r", b"$2b$04$SQe9knOzepOVKoYXo9xTte", b"$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW", ), ( b"Zfgr26LWd22Za", b"$2b$04$eH8zX.q5Q.j2hO1NkVYJQO", b"$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne", ), ( b"Tg4daC27epFBE", b"$2b$04$ahiTdwRXpUG2JLRcIznxc.", b"$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2", ), ( b"xhQPMmwh5ALzW", b"$2b$04$nQn78dV0hGHf5wUBe0zOFu", b"$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy", ), ( b"59je8h5Gj71tg", b"$2b$04$cvXudZ5ugTg95W.rOjMITu", b"$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG", ), ( b"wT4fHJa2N9WSW", b"$2b$04$YYjtiq4Uh88yUsExO0RNTu", b"$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO", ), ( b"uSgFRnQdOgm4S", b"$2b$04$WLTjgY/pZSyqX/fbMbJzf.", b"$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu", ), ( b"tEPtJZXur16Vg", b"$2b$04$2moPs/x/wnCfeQ5pCheMcu", b"$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG", ), ( b"vvho8C6nlVf9K", b"$2b$04$HrEYC/AQ2HS77G78cQDZQ.", b"$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2", ), ( b"5auCCY9by0Ruf", b"$2b$04$vVYgSTfB8KVbmhbZE/k3R.", b"$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG", ), ( b"GtTkR6qn2QOZW", b"$2b$04$JfoNrR8.doieoI8..F.C1O", b"$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m", ), ( b"zKo8vdFSnjX0f", b"$2b$04$HP3I0PUs7KBEzMBNFw7o3O", b"$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy", ), ( b"I9VfYlacJiwiK", b"$2b$04$xnFVhJsTzsFBTeP3PpgbMe", b"$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6", ), ( b"VFPO7YXnHQbQO", b"$2b$04$WQp9.igoLqVr6Qk70mz6xu", b"$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6", ), ( b"VDx5BdxfxstYk", b"$2b$04$xgZtlonpAHSU/njOCdKztO", b"$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS", ), ( b"dEe6XfVGrrfSH", b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.", b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe", ), ( b"cTT0EAFdwJiLn", b"$2b$04$7/Qj7Kd8BcSahPO4khB8me", b"$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m", ), ( b"J8eHUDuxBB520", b"$2b$04$VvlCUKbTMjaxaYJ.k5juoe", b"$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.", ), ( b"U*U", b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.", b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW", ), ( b"U*U*", b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.", b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK", ), ( b"U*U*U", b"$2a$05$XXXXXXXXXXXXXXXXXXXXXO", b"$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a", ), ( b"0123456789abcdefghijklmnopqrstuvwxyz" b"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" b"chars after 72 are ignored", b"$2a$05$abcdefghijklmnopqrstuu", b"$2a$05$abcdefghijklmnopqrstuu5s2v8.iXieOjg/.AySBTTZIIVFJeBui", ), ( b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" b"chars after 72 are ignored as usual", b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.", b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.swQOIzjOiJ9GHEPuhEkvqrUyvWhEMx6", ), ( b"\xa3", b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.", b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq", ), ] _2y_test_vectors = [ ( b"\xa3", b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq", b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq", ), ( b"\xff\xff\xa3", b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e", b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e", ), ] def test_gensalt_basic(monkeypatch): monkeypatch.setattr(os, "urandom", lambda n: b"0000000000000000") assert bcrypt.gensalt() == b"$2b$12$KB.uKB.uKB.uKB.uKB.uK." @pytest.mark.parametrize( ("rounds", "expected"), [ (4, b"$2b$04$KB.uKB.uKB.uKB.uKB.uK."), (5, b"$2b$05$KB.uKB.uKB.uKB.uKB.uK."), (6, b"$2b$06$KB.uKB.uKB.uKB.uKB.uK."), (7, b"$2b$07$KB.uKB.uKB.uKB.uKB.uK."), (8, b"$2b$08$KB.uKB.uKB.uKB.uKB.uK."), (9, b"$2b$09$KB.uKB.uKB.uKB.uKB.uK."), (10, b"$2b$10$KB.uKB.uKB.uKB.uKB.uK."), (11, b"$2b$11$KB.uKB.uKB.uKB.uKB.uK."), (12, b"$2b$12$KB.uKB.uKB.uKB.uKB.uK."), (13, b"$2b$13$KB.uKB.uKB.uKB.uKB.uK."), (14, b"$2b$14$KB.uKB.uKB.uKB.uKB.uK."), (15, b"$2b$15$KB.uKB.uKB.uKB.uKB.uK."), (16, b"$2b$16$KB.uKB.uKB.uKB.uKB.uK."), (17, b"$2b$17$KB.uKB.uKB.uKB.uKB.uK."), (18, b"$2b$18$KB.uKB.uKB.uKB.uKB.uK."), (19, b"$2b$19$KB.uKB.uKB.uKB.uKB.uK."), (20, b"$2b$20$KB.uKB.uKB.uKB.uKB.uK."), (21, b"$2b$21$KB.uKB.uKB.uKB.uKB.uK."), (22, b"$2b$22$KB.uKB.uKB.uKB.uKB.uK."), (23, b"$2b$23$KB.uKB.uKB.uKB.uKB.uK."), (24, b"$2b$24$KB.uKB.uKB.uKB.uKB.uK."), ], ) def test_gensalt_rounds_valid(rounds, expected, monkeypatch): monkeypatch.setattr(os, "urandom", lambda n: b"0000000000000000") assert bcrypt.gensalt(rounds) == expected @pytest.mark.parametrize("rounds", list(range(1, 4))) def test_gensalt_rounds_invalid(rounds): with pytest.raises(ValueError): bcrypt.gensalt(rounds) def test_gensalt_bad_prefix(): with pytest.raises(ValueError): bcrypt.gensalt(prefix="bad") def test_gensalt_2a_prefix(monkeypatch): monkeypatch.setattr(os, "urandom", lambda n: b"0000000000000000") assert bcrypt.gensalt(prefix=b"2a") == b"$2a$12$KB.uKB.uKB.uKB.uKB.uK." @pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors) def test_hashpw_new(password, salt, hashed): assert bcrypt.hashpw(password, salt) == hashed @pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors) def test_checkpw(password, salt, hashed): assert bcrypt.checkpw(password, hashed) is True @pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors) def test_hashpw_existing(password, salt, hashed): assert bcrypt.hashpw(password, hashed) == hashed @pytest.mark.parametrize(("password", "hashed", "expected"), _2y_test_vectors) def test_hashpw_2y_prefix(password, hashed, expected): assert bcrypt.hashpw(password, hashed) == expected @pytest.mark.parametrize(("password", "hashed", "expected"), _2y_test_vectors) def test_checkpw_2y_prefix(password, hashed, expected): assert bcrypt.checkpw(password, hashed) is True def test_hashpw_invalid(): with pytest.raises(ValueError): bcrypt.hashpw(b"password", b"$2z$04$cVWp4XaNU8a4v1uMRum2SO") def test_checkpw_wrong_password(): assert ( bcrypt.checkpw( b"badpass", b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe", ) is False ) def test_checkpw_bad_salt(): with pytest.raises(ValueError): bcrypt.checkpw( b"badpass", b"$2b$04$?Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe", ) def test_checkpw_str_password(): with pytest.raises(TypeError): bcrypt.checkpw("password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO") def test_checkpw_str_salt(): with pytest.raises(TypeError): bcrypt.checkpw(b"password", "$2b$04$cVWp4XaNU8a4v1uMRum2SO") def test_hashpw_str_password(): with pytest.raises(TypeError): bcrypt.hashpw("password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO") def test_hashpw_str_salt(): with pytest.raises(TypeError): bcrypt.hashpw(b"password", "$2b$04$cVWp4XaNU8a4v1uMRum2SO") def test_checkpw_nul_byte(): with pytest.raises(ValueError): bcrypt.checkpw( b"abc\0def", b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe", ) with pytest.raises(ValueError): bcrypt.checkpw( b"abcdef", b"$2b$04$2S\0w3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe", ) def test_hashpw_nul_byte(): salt = bcrypt.gensalt(4) with pytest.raises(ValueError): bcrypt.hashpw(b"abc\0def", salt) def test_checkpw_extra_data(): salt = bcrypt.gensalt(4) hashed = bcrypt.hashpw(b"abc", salt) assert bcrypt.checkpw(b"abc", hashed) assert bcrypt.checkpw(b"abc", hashed + b"extra") is False assert bcrypt.checkpw(b"abc", hashed[:-10]) is False @pytest.mark.parametrize( ("rounds", "password", "salt", "expected"), [ [ 4, b"password", b"salt", b"\x5b\xbf\x0c\xc2\x93\x58\x7f\x1c\x36\x35\x55\x5c\x27\x79\x65\x98" b"\xd4\x7e\x57\x90\x71\xbf\x42\x7e\x9d\x8f\xbe\x84\x2a\xba\x34\xd9", ], [ 4, b"password", b"\x00", b"\xc1\x2b\x56\x62\x35\xee\xe0\x4c\x21\x25\x98\x97\x0a\x57\x9a\x67", ], [ 4, b"\x00", b"salt", b"\x60\x51\xbe\x18\xc2\xf4\xf8\x2c\xbf\x0e\xfe\xe5\x47\x1b\x4b\xb9", ], [ # nul bytes in password and string 4, b"password\x00", b"salt\x00", b"\x74\x10\xe4\x4c\xf4\xfa\x07\xbf\xaa\xc8\xa9\x28\xb1\x72\x7f\xac" b"\x00\x13\x75\xe7\xbf\x73\x84\x37\x0f\x48\xef\xd1\x21\x74\x30\x50", ], [ 4, b"pass\x00wor", b"sa\0l", b"\xc2\xbf\xfd\x9d\xb3\x8f\x65\x69\xef\xef\x43\x72\xf4\xde\x83\xc0", ], [ 4, b"pass\x00word", b"sa\0lt", b"\x4b\xa4\xac\x39\x25\xc0\xe8\xd7\xf0\xcd\xb6\xbb\x16\x84\xa5\x6f", ], [ # bigger key 8, b"password", b"salt", b"\xe1\x36\x7e\xc5\x15\x1a\x33\xfa\xac\x4c\xc1\xc1\x44\xcd\x23\xfa" b"\x15\xd5\x54\x84\x93\xec\xc9\x9b\x9b\x5d\x9c\x0d\x3b\x27\xbe\xc7" b"\x62\x27\xea\x66\x08\x8b\x84\x9b\x20\xab\x7a\xa4\x78\x01\x02\x46" b"\xe7\x4b\xba\x51\x72\x3f\xef\xa9\xf9\x47\x4d\x65\x08\x84\x5e\x8d", ], [ # more rounds 42, b"password", b"salt", b"\x83\x3c\xf0\xdc\xf5\x6d\xb6\x56\x08\xe8\xf0\xdc\x0c\xe8\x82\xbd", ], [ # longer password 8, b"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do " b"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut " b"enim ad minim veniam, quis nostrud exercitation ullamco laboris " b"nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor " b"in reprehenderit in voluptate velit esse cillum dolore eu fugiat " b"nulla pariatur. Excepteur sint occaecat cupidatat non proident, " b"sunt in culpa qui officia deserunt mollit anim id est laborum.", b"salis\x00", b"\x10\x97\x8b\x07\x25\x3d\xf5\x7f\x71\xa1\x62\xeb\x0e\x8a\xd3\x0a", ], [ # "unicode" 8, b"\x0d\xb3\xac\x94\xb3\xee\x53\x28\x4f\x4a\x22\x89\x3b\x3c\x24\xae", b"\x3a\x62\xf0\xf0\xdb\xce\xf8\x23\xcf\xcc\x85\x48\x56\xea\x10\x28", b"\x20\x44\x38\x17\x5e\xee\x7c\xe1\x36\xc9\x1b\x49\xa6\x79\x23\xff", ], [ # very large key 8, b"\x0d\xb3\xac\x94\xb3\xee\x53\x28\x4f\x4a\x22\x89\x3b\x3c\x24\xae", b"\x3a\x62\xf0\xf0\xdb\xce\xf8\x23\xcf\xcc\x85\x48\x56\xea\x10\x28", b"\x20\x54\xb9\xff\xf3\x4e\x37\x21\x44\x03\x34\x74\x68\x28\xe9\xed" b"\x38\xde\x4b\x72\xe0\xa6\x9a\xdc\x17\x0a\x13\xb5\xe8\xd6\x46\x38" b"\x5e\xa4\x03\x4a\xe6\xd2\x66\x00\xee\x23\x32\xc5\xed\x40\xad\x55" b"\x7c\x86\xe3\x40\x3f\xbb\x30\xe4\xe1\xdc\x1a\xe0\x6b\x99\xa0\x71" b"\x36\x8f\x51\x8d\x2c\x42\x66\x51\xc9\xe7\xe4\x37\xfd\x6c\x91\x5b" b"\x1b\xbf\xc3\xa4\xce\xa7\x14\x91\x49\x0e\xa7\xaf\xb7\xdd\x02\x90" b"\xa6\x78\xa4\xf4\x41\x12\x8d\xb1\x79\x2e\xab\x27\x76\xb2\x1e\xb4" b"\x23\x8e\x07\x15\xad\xd4\x12\x7d\xff\x44\xe4\xb3\xe4\xcc\x4c\x4f" b"\x99\x70\x08\x3f\x3f\x74\xbd\x69\x88\x73\xfd\xf6\x48\x84\x4f\x75" b"\xc9\xbf\x7f\x9e\x0c\x4d\x9e\x5d\x89\xa7\x78\x39\x97\x49\x29\x66" b"\x61\x67\x07\x61\x1c\xb9\x01\xde\x31\xa1\x97\x26\xb6\xe0\x8c\x3a" b"\x80\x01\x66\x1f\x2d\x5c\x9d\xcc\x33\xb4\xaa\x07\x2f\x90\xdd\x0b" b"\x3f\x54\x8d\x5e\xeb\xa4\x21\x13\x97\xe2\xfb\x06\x2e\x52\x6e\x1d" b"\x68\xf4\x6a\x4c\xe2\x56\x18\x5b\x4b\xad\xc2\x68\x5f\xbe\x78\xe1" b"\xc7\x65\x7b\x59\xf8\x3a\xb9\xab\x80\xcf\x93\x18\xd6\xad\xd1\xf5" b"\x93\x3f\x12\xd6\xf3\x61\x82\xc8\xe8\x11\x5f\x68\x03\x0a\x12\x44", ], [ # UTF-8 Greek characters "odysseus" / "telemachos" 8, b"\xe1\xbd\x88\xce\xb4\xcf\x85\xcf\x83\xcf\x83\xce\xb5\xcf\x8d\xcf" b"\x82", b"\xce\xa4\xce\xb7\xce\xbb\xce\xad\xce\xbc\xce\xb1\xcf\x87\xce\xbf" b"\xcf\x82", b"\x43\x66\x6c\x9b\x09\xef\x33\xed\x8c\x27\xe8\xe8\xf3\xe2\xd8\xe6", ], ], ) def test_kdf(rounds, password, salt, expected): derived = bcrypt.kdf( password, salt, len(expected), rounds, ignore_few_rounds=True ) assert derived == expected def test_kdf_str_password(): with pytest.raises(TypeError): bcrypt.kdf("password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 10, 10) def test_kdf_str_salt(): with pytest.raises(TypeError): bcrypt.kdf(b"password", "salt", 10, 10) def test_kdf_no_warn_rounds(): bcrypt.kdf(b"password", b"salt", 10, 10, True) def test_kdf_warn_rounds(): with pytest.warns(UserWarning): bcrypt.kdf(b"password", b"salt", 10, 10) @pytest.mark.parametrize( ("password", "salt", "desired_key_bytes", "rounds", "error"), [ ("pass", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 10, 10, TypeError), (b"password", "salt", 10, 10, TypeError), (b"", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 10, 10, ValueError), (b"password", b"", 10, 10, ValueError), (b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 0, 10, ValueError), (b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", -3, 10, ValueError), (b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 513, 10, ValueError), (b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 20, 0, ValueError), ], ) def test_invalid_params(password, salt, desired_key_bytes, rounds, error): with pytest.raises(error): bcrypt.kdf(password, salt, desired_key_bytes, rounds) def test_bcrypt_assert(): with pytest.raises(SystemError): bcrypt._bcrypt_assert(False) def test_2a_wraparound_bug(): assert ( bcrypt.hashpw( (b"0123456789" * 26)[:255], b"$2a$04$R1lJ2gkNaoPGdafE.H.16." ) == b"$2a$04$R1lJ2gkNaoPGdafE.H.16.1MKHPvmKwryeulRe225LKProWYwt9Oi" )
"""============== Motif pipeline ============== The motif pipeline runs a set of motif discovery and enrichment analysis on a set of intervals. * Motif discovery using MEME * Motif detection using MAST * requires a set of known motifs Usage ===== See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general information how to use CGAT pipelines. Configuration ------------- The pipeline requires a configured :file:`pipeline.ini` file. The pipeline looks for a configuration file in several places: 1. The default configuration in the :term:`code directory`. 2. A shared configuration file :file:`../pipeline.ini`. 3. A local configuration :file:`pipeline.ini`. The order is as above. Thus, a local configuration setting will override a shared configuration setting and a default configuration setting. Configuration files follow the ini format (see the python `ConfigParser <http://docs.python.org/library/configparser.html>` documentation). The configuration file is organized by section and the variables are documented within the file. In order to get a local configuration file in the current directory, type:: python <codedir>/pipeline_motifs.py config The following sections and parameters probably should be changed from the default values: .. todo:: describe important parameters The sphinxreport report requires a :file:`conf.py` and :file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start with, use the files supplied with the Example_ data. Input ----- Intervals +++++++++ Input are :term:`bed`-formatted files of intervals. Intervals should be at least bed4 formatted, i.e., each interval should be labelled (uniquely). Reference motifs ++++++++++++++++ Reference motifs are described by fasta-formatted multiple alignments, see for example Jaspar download. The motifs are build by running MEME on the file. Reference motifs should end in the suffix ".motif.fasta", for example, :file:`rxrvdr.motif.fasta`. Requirements ------------ The pipeline requires the information from the following pipelines: :doc:`pipeline_annotations` set the configuration variable :py:data:`annotations_database` and :py:data:`annotations_dir`. On top of the default CGAT setup, the pipeline requires the following software to be in the path: +--------------------+-------------------+------------------------------------------------+ |*Program* |*Version* |*Purpose* | +--------------------+-------------------+------------------------------------------------+ |bowtie_ |>=0.12.7 |read mapping | +--------------------+-------------------+------------------------------------------------+ Pipline Output ============== The results of the computation are all stored in an sqlite relational database :file:`csvdb`. Example ======= Example data is available at http://www.cgat.org/~andreas/sample_data/pipeline_chipseq.tgz. To run the example, simply unpack and untar:: wget http://www.cgat.org/~andreas/sample_data/pipeline_chipseq.tgz tar -xvzf pipeline_chipseq.tgz cd pipeline_chipseq python <srcdir>/pipeline_chipseq.py make full .. note:: For the pipeline to run, install the :doc:`pipeline_annotations` as well. Glossary ======== .. glossary:: bowtie bowtie_ - a read mapper .. _bowtie: http://bowtie-bio.sourceforge.net/index.shtml Code ==== """ import sys import shutil import itertools import re import glob import os from ruffus import * import sqlite3 import xml.etree.ElementTree import CGAT.Experiment as E import CGAT.IOTools as IOTools import CGATPipelines.PipelineMotifs as PipelineMotifs import CGATPipelines.PipelineTracks as PipelineTracks ################################################### ################################################### ################################################### # Pipeline configuration ################################################### import CGATPipelines.Pipeline as P P.getParameters( ["%s/pipeline.ini" % os.path.splitext(__file__)[0], "../pipeline.ini", "pipeline.ini"], defaults={ 'annotations_dir': ""}) PARAMS = P.PARAMS PARAMS_ANNOTATIONS = P.peekParameters( PARAMS["annotations_dir"], "pipeline_genesets.py") ################################################################### ################################################################### ################################################################### # Helper functions mapping tracks to conditions, etc ################################################################### # load all tracks - exclude input/control tracks Sample = PipelineTracks.Sample TRACKS = PipelineTracks.Tracks(Sample).loadFromDirectory(glob.glob("*.bed.gz"), "(\S+).bed.gz") TRACKS_BEDFILES = ["%s.bed.gz" % x for x in TRACKS] def getAssociatedBAMFiles(track): '''return a list of BAM files associated with a track. By default, this method searches for ``track.bam`` file in the current directory and returns an offset of 0. Associations can be defined in the .ini file in the section [bams]. For example, the following snippet associates track track1 with the bamfiles :file:`track1.bam` and :file:`track2.bam`:: [bams] track1=track1.bam,track2.bam Glob expressions are permitted. Offsets are used to shift tags in ChIP experiments. Offsets need to be defined in the [offsets] sections. If no offsets are defined, the method returns a list of 0 offsets. Offsets need to be defined in the same order as the bam files:: [offsets] track1=120,200 returns a list of BAM files and offsets. Default tracks and offsets can be specified using a placeholder ``%``. The following will associate all tracks with the same bam file:: [bams] %=all.bam ''' fn = track.asFile() bamfiles = glob.glob("%s.bam" % fn) if bamfiles == []: if "bams_%s" % fn.lower() in PARAMS: for ff in P.asList(PARAMS["bams_%s" % fn.lower()]): bamfiles.extend(glob.glob(ff)) else: for pattern, value in P.CONFIG.items("bams"): if "%" in pattern: p = re.sub("%", "\S+", pattern) if re.search(p, fn, re.IGNORECASE): bamfiles.extend(glob.glob(value)) offsets = [] if "offsets_%s" % fn.lower() in PARAMS: offsets = list(map(int, P.asList(PARAMS["offsets_%s" % fn.lower()]))) else: for pattern, value in P.CONFIG.items("offsets"): if "%" in pattern: p = re.sub("%", "\S+", pattern) if re.search(p, fn, re.IGNORECASE): offsets.extend(list(map(int, value.split(",")))) if offsets == []: offsets = [0] * len(bamfiles) if len(bamfiles) != len(offsets): raise ValueError( "number of BAM files %s is not the " "same as number of offsets: %s" % (str(bamfiles), str(offsets))) return bamfiles, offsets ################################################################### ################################################################### ################################################################### def connect(): '''connect to database. This method also attaches to helper databases. ''' dbh = sqlite3.connect(PARAMS["database_name"]) statement = '''ATTACH DATABASE '%s' as annotations''' % ( PARAMS["annotations_database"]) cc = dbh.cursor() cc.execute(statement) cc.close() return dbh ################################################################### ################################################################### ################################################################### # General preparation tasks ################################################################### ############################################################ ############################################################ ############################################################ @transform(TRACKS_BEDFILES, suffix(".bed.gz"), "_intervals.load") def loadIntervals(infile, outfile): '''load intervals from :term:`bed` formatted files into database. ''' bedfile = infile track = Sample(filename=P.snip(infile, ".bed.gz")) bamfiles, offsets = getAssociatedBAMFiles(track) control = "" if bamfiles: E.info("%s: associated bamfiles = %s" % (track, bamfiles)) else: E.info("%s: no bamfiles associated" % (track)) assert (len(bamfiles) == 1) bamfile = bamfiles[0] offset = offsets[0] tablename = P.toTable(outfile) statement = '''zcat %(bedfile)s | awk '{printf("%%s\\t%%i\\t%%i\\t%%i\\n", $1,$2,$3,++a)}' | cgat bed2table --counter=peaks --bam-file=%(bamfile)s --offset=%(offset)i --bed-header=contig,start,end,interval_id %(control)s --output-all-fields --log=%(outfile)s | cgat csv2db %(csv2db_options)s --add-index=contig,start --add-index=interval_id --table=%(tablename)s --allow-empty-file > %(outfile)s''' P.run() @follows(mkdir(os.path.join(PARAMS["exportdir"], "bed"))) @transform(TRACKS_BEDFILES, regex(r"(.*).bed.gz"), os.path.join(PARAMS["exportdir"], "bed", r"\1.bed.gz")) def indexIntervals(infile, outfile): '''index intervals. ''' statement = '''zcat %(infile)s | sort -k1,1 -k2,2n | bgzip > %(outfile)s; tabix -p bed %(outfile)s''' P.run() @follows(mkdir(os.path.join(PARAMS["exportdir"], "peaks"))) @transform(loadIntervals, regex(r"(.*)_intervals.load"), os.path.join(PARAMS["exportdir"], "peaks", r"\1.peak.bed.gz")) def exportPeakLocations(infile, outfile): '''export peak locations ''' dbh = connect() outf = IOTools.openFile(outfile, "w") cc = dbh.cursor() table = P.toTable(infile) for x in cc.execute("""SELECT contig, peakcenter, peakcenter+1, interval_id, peakval FROM %(table)s """ % locals()): outf.write("\t".join(map(str, x)) + "\n") outf.close() ############################################################ ############################################################ ############################################################ @transform(loadIntervals, suffix("_intervals.load"), ".discovery.fasta") def exportMotifDiscoverySequences(infile, outfile): '''export sequences for motif discovery. This method requires the _interval tables. For motif discovery, only the sequences with the highest S/N ratio are supplied. 1. The top *motifs_proportion* intervals sorted by peakval 2. Only a region +/- *motifs_halfwidth* around the peak 3. At least *motifs_min_sequences*. If there are not enough sequences to start with, all will be used. 4. At most *motifs_max_size* sequences will be output. ''' track = P.snip(infile, "_intervals.load") dbhandle = connect() p = P.substituteParameters(**locals()) nseq = PipelineMotifs.writeSequencesForIntervals( track, outfile, dbhandle, full=False, masker=P.asList( p['motifs_masker']), halfwidth=int( p["motifs_halfwidth"]), maxsize=int( p["motifs_max_size"]), proportion=p[ "motifs_proportion"], min_sequences=p[ "motifs_min_sequences"], num_sequences=p[ "motifs_num_sequences"], order=p['motifs_score']) if nseq == 0: E.warn("%s: no sequences - meme skipped" % outfile) P.touch(outfile) @follows(mkdir("motifs")) @transform(TRACKS_BEDFILES, regex("(.*).bed.gz"), r"motifs/\1.foreground.fasta") def exportMotifDetectionSequences(infile, outfile): '''export sequences for motif discovery. This method requires the _interval tables. ''' PipelineMotifs.exportSequencesFromBedFile(infile, outfile, masker=PARAMS['motifs_masker']) @follows(mkdir("motifs")) @transform(TRACKS_BEDFILES, regex("(.*).bed.gz"), r"motifs/\1.control.fasta") def exportMotifControlSequences(infile, outfile): '''for each interval, export the left and right sequence segment of the same size. ''' PipelineMotifs.exportSequencesFromBedFile(infile, outfile, masker=PARAMS['motifs_masker'], mode="leftright") @follows(mkdir("meme.dir")) @transform(exportMotifDiscoverySequences, regex("(.*).discovery.fasta"), r"meme.dir/\1.meme") def runMeme(infile, outfile): '''run MEME to find motifs. In order to increase the signal/noise ratio, MEME is not run on all intervals but only the top 10% of intervals (peakval) are used. Also, only the segment of 200 bp around the peak is used and not the complete interval. * Softmasked sequence is converted to hardmasked sequence to avoid the detection of spurious motifs. * Sequence is run through dustmasker ''' track = P.snip(infile, ".discovery.fasta") PipelineMotifs.runMEMEOnSequences(infile, outfile) ############################################################ ############################################################ ############################################################ @merge(runMeme, "meme_summary.load") def loadMemeSummary(infiles, outfile): '''load information about motifs into database.''' outf = P.getTempFile(".") outf.write("track\n") for infile in infiles: if IOTools.isEmpty(infile): continue motif = P.snip(infile, ".meme") outf.write("%s\n" % motif) outf.close() P.load(outf.name, outfile) os.unlink(outf.name) def suggestMotifDiscoveryForeground(): '''output bed files for motif discovery. ''' npeaks = [x.strip() for x in str(PARAMS["memechip_npeaks"]).split(",")] widths = [x.strip() for x in str(PARAMS["memechip_widths"]).split(",")] maskers = [x.strip() for x in str(PARAMS["memechip_maskers"]).split(",")] for infile in TRACKS_BEDFILES: track = P.snip(os.path.basename(infile), ".bed.gz") for n, w, masker in itertools.product(npeaks, widths, maskers): foreground = os.path.join( "discovery.dir", ".".join( [track, n, w, masker, "foreground", "fasta"])) background = os.path.join( "discovery.dir", ".".join( [track, n, w, masker, "background", "fasta"])) yield (track + "_intervals.load", foreground, int(n), int(w), masker) def suggestMotifDiscoveryBackground(): '''output bed files for motif discovery. ''' npeaks = [x.strip() for x in str(PARAMS["memechip_npeaks"]).split(",")] widths = [x.strip() for x in str(PARAMS["memechip_widths"]).split(",")] maskers = [x.strip() for x in str(PARAMS["memechip_maskers"]).split(",")] for infile in TRACKS_BEDFILES: track = P.snip(os.path.basename(infile), ".bed.gz") for n, w, masker in itertools.product(npeaks, widths, maskers): background = os.path.join( "discovery.dir", ".".join([track, n, w, masker, "background", "fasta"])) yield (track + "_intervals.load", background, int(n), int(w), masker) @follows(loadIntervals, mkdir("discovery.dir")) @files(suggestMotifDiscoveryForeground) def buildDiscoverySequences(infile, outfile, npeaks, width, masker): '''get the peak sequences, masking or not specificed in the ini file. ''' track = P.snip(infile, "_intervals.load") dbhandle = connect() nseq = PipelineMotifs.writeSequencesForIntervals( track, outfile, dbhandle, full=False, masker=[masker], halfwidth=width, maxsize=int( PARAMS["motifs_max_size"]), proportion=None, num_sequences=npeaks, order='peakval') if nseq == 0: E.warn("%s: no sequences in foreground" % outfile) P.touch(outfile) @follows(loadIntervals, mkdir("discovery.dir")) @files(suggestMotifDiscoveryBackground) def buildBackgroundSequences(infile, outfile, npeaks, width, masker): '''get the peak sequences, masking or not specificed in the ini file. ''' track = P.snip(infile, "_intervals.load") dbhandle = connect() nseq = PipelineMotifs.writeSequencesForIntervals( track, outfile, dbhandle, full=False, masker=[masker], halfwidth=width, maxsize=int( PARAMS["motifs_max_size"]), proportion=None, num_sequences=npeaks, order='peakval', shift="leftright") if nseq == 0: E.warn("%s: no sequences in background" % outfile_background) # P.touch( outfile ) @transform(buildBackgroundSequences, suffix(".fasta"), ".markov") def buildMemeBackgroundFiles(infile, outfile): '''prepare the meme background model''' statement = '''fasta-get-markov -m 2 %(infile)s > %(outfile)s''' % locals() P.run() @follows(mkdir("memechip.dir")) @merge(PARAMS["memechip_transfac_matrices"], "memechip.dir/transfac.filtered.dat") def filterTransfac(infile, outfile): '''filter the transfac matrices, here for vertebrate''' statement = '''cat %(infile)s | cgat transfac2transfac --method=filter --filter-method=V --log=%(outfile)s.log > %(outfile)s ''' P.run() @transform(filterTransfac, suffix(".dat"), ".meme") def makeMemeMotifs(infile, outfile): '''convert transfac motifs to meme format''' statement = '''transfac2meme -use_acc -logodds %(infile)s > %(outfile)s ''' P.run() @follows(mkdir("memechip.dir")) @collate((buildDiscoverySequences, buildMemeBackgroundFiles), regex("discovery.dir/(.*).(foreground.fasta|background.markov)"), r"memechip.dir/\1.memechip") def runMemeChip(infiles, outfile): background_markov, foreground_fasta = infiles assert foreground_fasta.endswith(".fasta") transfacMotifs = PARAMS["memechip_transfac_meme"] nmotifs = PARAMS["memechip_nmotifs"] ncpu = PARAMS["memechip_ncpu"] # job_options = "-pe mpi %(ncpu)i " % locals() # job_queue = "mpi.q" # '-meme-p %(ncpu)i' outdir = os.path.join(os.path.abspath(PARAMS["exportdir"]), "memechip", os.path.basename(outfile)) # remove any existing output directory as otherwise meme will fail try: shutil.rmtree(outdir) except OSError: pass statement = '''meme-chip -o %(outdir)s -db %(transfacMotifs)s -bfile %(background_markov)s -ccut 0 -meme-mod zoops -meme-minw %(memechip_minw)s -meme-maxw %(memechip_maxw)s -meme-nmotifs %(memechip_nmotifs)s -meme-maxsize %(memechip_max_size)i %(foreground_fasta)s %(memechip_options)s > %(outfile)s ''' P.run() @merge(runMemeChip, "memechip_summary.load") def loadMemeChipSummary(infiles, outfile): '''load information about motifs into database.''' outf = P.getTempFile(".") outf.write("track\tnpeaks\twidth\tmasking\tpath\n") for infile in infiles: if IOTools.isEmpty(infile): continue fn = P.snip(os.path.basename(infile), ".memechip") track, npeaks, width, masking = fn.split(".") outf.write( "\t".join(map(str, (track, npeaks, width, masking, fn))) + "\n") outf.close() P.load(outf.name, outfile) os.unlink(outf.name) @transform(exportMotifDiscoverySequences, suffix(".fasta"), ".motifseq_stats.load") def loadMotifSequenceComposition(infile, outfile): '''compute sequence composition of sequences used for ab-initio search.''' tablename = P.toTable(outfile) statement = ''' cgat fasta2table --section=na --log=%(outfile)s < %(infile)s | cgat csv2db %(csv2db_options)s --table=%(tablename)s > %(outfile)s''' P.run() @merge("*.motif", "motif_info.load") def loadMotifInformation(infiles, outfile): '''load information about motifs into database.''' outf = P.getTempFile(".") outf.write("motif\n") for infile in infiles: if IOTools.isEmpty(infile): continue motif = P.snip(infile, ".motif") outf.write("%s\n" % motif) outf.close() P.load(outf.name, outfile, "--allow-empty-file") os.unlink(outf.name) @transform(runMeme, suffix(".meme"), ".tomtom") def runTomTom(infile, outfile): '''compare ab-initio motifs against tomtom.''' PipelineMotifs.runTomTom(infile, outfile) @transform(runTomTom, suffix(".tomtom"), "_tomtom.load") def loadTomTom(infile, outfile): '''load tomtom results''' tablename = P.toTable(outfile) resultsdir = os.path.join( os.path.abspath(PARAMS["exportdir"]), "tomtom", infile) xml_file = os.path.join(resultsdir, "tomtom.xml") if not os.path.exists(xml_file): E.warn("no tomtom output - skipped loading ") P.touch(outfile) return # get the motif name from the xml file tree = xml.etree.ElementTree.ElementTree() tree.parse(xml_file) motifs = tree.find("targets") name2alt = {} for motif in motifs.getiterator("motif"): name = motif.get("name") alt = motif.get("alt") name2alt[name] = alt tmpfile = P.getTempFile(".") # parse the text file for line in IOTools.openFile(infile): if line.startswith("#Query"): tmpfile.write( "target_name\tquery_id\ttarget_id\toptimal_offset\tpvalue\tevalue\tqvalue\tOverlap\tquery_consensus\ttarget_consensus\torientation\n") continue data = line[:-1].split("\t") target_name = name2alt[data[1]] tmpfile.write("%s\t%s" % (target_name, line)) tmpfile.close() P.load(tmpfile.name, outfile) os.unlink(tmpfile.name) @files_re((exportMotifDetectionSequences, exportMotifControlSequences), "(\S+).control.fasta", [r"\1.control.fasta", r"\1.foreground.fasta", glob.glob("*.motif")], r"\1.mast.gz") def runMast(infiles, outfile): '''run mast on all intervals and motifs. Collect all results for an E-value up to 10000 so that all sequences are output and MAST curves can be computed. 10000 is a heuristic. ''' PipelineMotifs.runMAST(infiles, outfile) @jobs_limit(PARAMS.get("jobs_limit_db", 1), "db") @transform(runMast, suffix(".mast.gz"), "_mast.load") def loadMast(infile, outfile): '''parse mast file and load into database. Parse several motif runs and add them to the same table. Add columns for the control data as well. ''' PipelineMotifs.loadMAST(infile, outfile) @follows(loadMotifInformation, mkdir(os.path.join(PARAMS["exportdir"], "motifs"))) @merge(loadMast, "motifs.export") def exportMotifLocations(infiles, outfile): '''export motif locations. There will be a bed-file per motif. Overlapping motif matches in different tracks will be merged. ''' dbh = connect() cc = dbh.cursor() motifs = [x[0] for x in cc.execute("SELECT motif FROM motif_info").fetchall()] for motif in motifs: tmpf = P.getTempFile(".") for infile in infiles: table = P.toTable(infile) track = P.snip(table, "_mast") for x in cc.execute( """SELECT contig, start, end, '%(track)s', evalue FROM %(table)s WHERE motif = '%(motif)s' AND start IS NOT NULL""" % locals()): tmpf.write("\t".join(map(str, x)) + "\n") tmpf.close() outfile = os.path.join( PARAMS["exportdir"], "motifs", "%s.bed.gz" % motif) tmpfname = tmpf.name statement = '''mergeBed -i %(tmpfname)s -nms | gzip > %(outfile)s''' P.run() os.unlink(tmpf.name) @follows(loadMemeChipSummary, loadIntervals) def full(): '''run the full pipeline.''' @follows(mkdir("report")) def build_report(): '''build report from scratch.''' E.info("starting documentation build process from scratch") P.run_report(clean=True) @follows(mkdir("report")) def update_report(): '''update report.''' E.info("updating documentation") P.run_report(clean=False) @follows(mkdir("%s/bedfiles" % PARAMS["web_dir"]), update_report, ) def publish(): '''publish files.''' # publish web pages P.publish_report() # publish additional data web_dir = PARAMS["web_dir"] project_id = P.getProjectId() # directory, files exportfiles = { "intervals": glob.glob(os.path.join( PARAMS["exportdir"], "bed", "*.bed.gz")) + glob.glob(os.path.join(PARAMS["exportdir"], "bed", "*.bed.gz.tbi")), } bams = [] for targetdir, filenames in exportfiles.items(): if len(filenames) == 0: E.warn("no files for target '%s'" % targetdir) for src in filenames: dest = "%s/%s/%s" % (web_dir, targetdir, os.path.basename(src)) if dest.endswith(".bam"): bams.append(dest) dest = os.path.abspath(dest) destdir = os.path.dirname(dest) if not os.path.exists(destdir): os.makedirs(destdir) if not os.path.exists(dest): E.debug("creating symlink from %s to %s" % (src, dest)) os.symlink(os.path.abspath(src), dest) def main(argv=None): if argv is None: argv = sys.argv P.main(argv) if __name__ == "__main__": sys.exit(P.main(sys.argv))
#------------------------------------------------------------------------------- # Copyright 2017 Cognizant Technology Solutions # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. #------------------------------------------------------------------------------- ''' Created on Jul 15, 2019 @author: 302683 ''' from datetime import datetime as dateTime2 import datetime import copy import json from dateutil import parser from ....core.BaseAgent3 import BaseAgent class AzureBoardAgent(BaseAgent): @BaseAgent.timed def process(self): self.userid = self.getCredential("userid") self.passwd = self.getCredential("passwd") baseUrl = self.config.get("baseUrl", '') wiqlUrl = self.config.get("wiqlUrl", '') projectName = self.config.get("projectName", '') startFrom = self.config.get("startFrom", '') lastUpdated = self.tracking.get("lastupdated", startFrom) responseTemplate = self.getResponseTemplate() WIQL_URL = wiqlUrl newWorkItemQuery = "{\"query\":\"Select [System.Id] From WorkItems Where [System.TeamProject] =='" + \ projectName + "' AND [System.ChangedDate] > '" + \ lastUpdated + "' order by [System.ChangedDate] asc\"}" changeLog = self.config.get('dynamicTemplate', {}).get('changeLog', None) headers = {'Content-Type': 'application/json'} if changeLog: changeLogFields = changeLog['fields'] changeLogMetadata = changeLog['metadata'] changeLogResponseTemplate = changeLog['responseTemplate'] startFromDate = parser.parse(startFrom) updatetimestamp = None sprintField = self.config.get("sprintField", None) workLogData = [] wiqlResponse = self.getResponse(WIQL_URL, 'POST', self.userid, self.passwd, newWorkItemQuery, None, headers) for workItemIterator in range(0, len(wiqlResponse["workItems"])): workItem = wiqlResponse["workItems"][workItemIterator] data = [] newWorkItemData = {} workItemUrl = baseUrl + "_apis/wit/workItems/" + \ str(workItem["id"]) + "?$expand=all" workItemData = self.getResponse(workItemUrl, 'GET', self.userid, self.passwd, None) injectData = {} Parent = [] Child = [] Commit = [] if workItemData.get("relations", None) is not None: for relation in workItemData["relations"]: if relation["attributes"].get("name", None) == 'Parent': Parent.append(relation["url"].split('/')[8]) if relation["attributes"].get("name", None) == 'Child': Child.append(relation["url"].split('/')[8]) if relation["attributes"].get("name", None) == 'Fixed in Commit': Commit.append(relation["url"].lower().split('%2f')[2]) injectData["Parent"] = Parent injectData["Child"] = Child injectData["Commit"] = Commit injectData["isNodeUpdated"] = True updatetimestamp = workItemData["fields"]["System.ChangedDate"] parsedIssue = self.parseResponse(responseTemplate, workItemData, injectData) data += parsedIssue if changeLog: workLogData = self.processChangeLog(baseUrl, workItem["id"], changeLogFields, changeLogResponseTemplate, startFromDate) dt = parser.parse(updatetimestamp) fromDateTime = dt + datetime.timedelta(minutes=0o1) fromDateTime = fromDateTime.strftime('%Y-%m-%d %H:%M') self.tracking["lastupdated"] = fromDateTime jiraKeyMetadata = { "dataUpdateSupported": True, "uniqueKey": ["key"]} self.publishToolsData(data, jiraKeyMetadata) if len(workLogData) > 0: insighstTimeXFieldMapping = self.config.get('dynamicTemplate', {}).get('changeLog', {}).get('insightsTimeXFieldMapping',None) timeStampField=insighstTimeXFieldMapping.get('timefield',None) timeStampFormat=insighstTimeXFieldMapping.get('timeformat',None) isEpoch=insighstTimeXFieldMapping.get('isEpoch',None); timeFieldMapping=self.config.get('dynamicTemplate', {}).get('changeLog', {}).get('timeFieldMapping',None) self.publishToolsData(workLogData, changeLogMetadata,timeStampField,timeStampFormat,isEpoch,True) self.updateTrackingJson(self.tracking) def processChangeLog(self, baseUrl, issue, workLogFields, responseTemplate, startFromDate): workItemChangeUrl = baseUrl + \ "_apis/wit/workItems/" + str(issue) + "/updates" workItemDataUpdate = self.getResponse(workItemChangeUrl, 'GET', self.userid, self.passwd, None) workLogData = [] injectData = {'issueKey': str(issue)} if workItemDataUpdate: histories = workItemDataUpdate["value"] for change in histories: data = copy.deepcopy(self.parseResponse(responseTemplate, change, injectData)[0]) changeDate = parser.parse(data['changeDate'][:19]) if (str(changeDate) == "9999-01-01 00:00:00+00:00") or (changeDate > startFromDate): items = change.get("fields", None) if items: for item in items: if item in workLogFields: dataCopy = copy.deepcopy(data) dataCopy['changedfield'] = item dataCopy['from'] = items[item].get('oldValue', None) dataCopy['to'] = items[item].get('newValue', None) workLogData.append(dataCopy) relations = change.get("relations",None) if relations: relationshipData = dict() if relations.get("added",None): addedRelations = relations.get("added",None) for relation in addedRelations: relation["attributes"]["name"] = relation["attributes"]["name"].replace(" ","") if (relation["attributes"]["name"] == "Parent") or (relation["attributes"]["name"] == "Child") or (relation["attributes"]["name"] == "FixedinCommit"): if "added"+relation["attributes"]["name"] in relationshipData: if relation["attributes"]["name"] == "FixedinCommit": relationshipData["added"+relation["attributes"]["name"]].append(relation["url"].lower().split("%2f")[2]) else: relationshipData["added"+relation["attributes"]["name"]].append(relation["url"].split("/")[8]) else: relationshipData["added"+relation["attributes"]["name"]] = [] if relation["attributes"]["name"] == "FixedinCommit": relationshipData["added"+relation["attributes"]["name"]].append(relation["url"].lower().split("%2f")[2]) else: relationshipData["added"+relation["attributes"]["name"]].append(relation["url"].split("/")[8]) if relations.get("removed",None): removedRelations = relations.get("removed",None) for relation in removedRelations: relation["attributes"]["name"] = relation["attributes"]["name"].replace(" ","") if (relation["attributes"]["name"] == "Parent") or (relation["attributes"]["name"] == "Child") or (relation["attributes"]["name"] == "FixedinCommit"): if "removed"+relation["attributes"]["name"] in relationshipData: if relation["attributes"]["name"] == "FixedinCommit": relationshipData["removed"+relation["attributes"]["name"]].append(relation["url"].lower().split("%2f")[2]) else: relationshipData["removed"+relation["attributes"]["name"]].append(relation["url"].split("/")[8]) else: relationshipData["removed"+relation["attributes"]["name"]] = [] if relation["attributes"]["name"] == "FixedinCommit": relationshipData["removed"+relation["attributes"]["name"]].append(relation["url"].lower().split("%2f")[2]) else: relationshipData["removed"+relation["attributes"]["name"]].append(relation["url"].split("/")[8]) if relationshipData: dataCopy = copy.deepcopy(data) dataCopy['changedfield'] = "relationship" for relationship in relationshipData: dataCopy[relationship] = relationshipData[relationship] workLogData.append(dataCopy) return workLogData def scheduleExtensions(self): extensions = self.config.get( 'dynamicTemplate', {}).get('extensions', None) if extensions: sprints = extensions.get('sprints', None) if sprints: self.registerExtension( 'sprints', self.retrieveSprintDetails, sprints.get('runSchedule')) def retrieveSprintDetails(self): sprintDetails = self.config.get('dynamicTemplate', {}).get( 'extensions', {}).get('sprints', None) insighstTimeXFieldMapping = sprintDetails.get('insightsTimeXFieldMapping',None) timeStampField=insighstTimeXFieldMapping.get('timefield',None) timeStampFormat=insighstTimeXFieldMapping.get('timeformat',None) isEpoch=insighstTimeXFieldMapping.get('isEpoch',None); teamApiUrl = sprintDetails.get('teamApiUrl') responseTemplate = sprintDetails.get('sprintResponseTemplate', None) sprintMetadata = sprintDetails.get('sprintMetadata') userid = self.config.get('userid', None) passwd = self.config.get('passwd', None) teams = self.getResponse(teamApiUrl, 'GET', userid, passwd, None)["value"] for team in teams: sprintApiUrl = sprintDetails.get("sprintApiUrl", None) if sprintApiUrl: sprintApiUrl = sprintApiUrl.replace("<<team>>", team["name"].replace(" ", "%20")) injectData = {"teamName": team["name"] } sprints = self.getResponse(sprintApiUrl, 'GET', userid, passwd, None)["value"] for sprint in sprints: self.publishToolsData(self.parseResponse(responseTemplate, sprint, injectData), sprintMetadata,timeStampField,timeStampFormat,isEpoch,True) if __name__ == "__main__": AzureBoardAgent()
import piquant.parameters as parameters import pytest import schema import tempfile def _get_test_parameter( name="name", title="The Name", option_name="--option-name", option_validator=int, is_numeric=False, value_namer=None, file_namer=None): param = parameters._Parameter( name, title, option_name, option_validator, is_numeric, value_namer, file_namer) parameters._PARAMETERS.remove(param) parameters._RUN_PARAMETERS.remove(param) return param def _get_ignore_params(): return [ parameters.READ_DEPTH, parameters.PAIRED_END, parameters.ERRORS, parameters.BIAS, parameters.TRANSCRIPT_GTF, parameters.GENOME_FASTA_DIR, parameters.NUM_MOLECULES ] def test_get_run_parameters_returns_parameters_instances(): params = parameters.get_run_parameters() assert all([isinstance(p, parameters._Parameter) for p in params]) def test_parameter_name_is_correct(): name = "parameter name" p = _get_test_parameter(name=name) assert p.name == name def test_parameter_title_is_correct(): title = "A Title" p = _get_test_parameter(title=title) assert p.title == title def test_parameter_option_name_is_correct(): option_name = "--polyA" p = _get_test_parameter(option_name=option_name) assert p.option_name == option_name def test_parameter_option_validator_is_correct(): option_validator = lambda x: x p = _get_test_parameter(option_validator=option_validator) assert p.option_validator == option_validator def test_parameter_is_numeric_is_correct(): is_numeric = True p = _get_test_parameter(is_numeric=is_numeric) assert p.is_numeric == is_numeric def test_get_value_name_returns_correct_value_when_no_value_namer_supplied(): value = 30 p = _get_test_parameter() assert p.get_value_name(value) == value def test_get_value_name_returns_correct_value_when_value_namer_supplied(): value = 30 p = _get_test_parameter(value_namer=lambda x: "VAL{v}".format(v=x)) assert p.get_value_name(value) == "VAL" + str(value) def test_get_file_name_part_returns_correct_value_when_no_value_or_file_namer_supplied(): value = 30 p = _get_test_parameter() assert p.get_file_name_part(value) == value def test_get_file_name_part_returns_correct_value_when_no_file_namer_supplied(): value = 30 p = _get_test_parameter(value_namer=lambda x: "VAL{v}".format(v=x)) assert p.get_file_name_part(value) == "VAL" + str(value) def test_get_file_name_part_returns_correct_value_when_file_namer_supplied(): value = 30 p = _get_test_parameter(file_namer=lambda x: "{v}bp".format(v=x)) assert p.get_file_name_part(value) == str(value) + "bp" def test_validate_command_line_parameter_sets_returns_correct_number_of_param_sets(): options = { "--quant-method": "Cufflinks", "--read-length": "10,20", } ignore_params = _get_ignore_params() param_vals = parameters.validate_command_line_parameter_sets( None, options, ignore_params) assert len(param_vals) == 2 def test_validate_command_line_parameter_sets_returns_correct_number_of_transformed_values(): num_values = 5 options = { "--read-length": ",".join([str(i) for i in range(0, num_values)]) } ignore_params = _get_ignore_params() ignore_params.append(parameters.QUANT_METHOD) param_vals = parameters.validate_command_line_parameter_sets( None, options, ignore_params) assert len(param_vals[parameters.READ_LENGTH.name]) == num_values def test_validate_command_line_parameter_sets_returns_correct_transformed_values(): len1 = 10 len2 = 20 options = { "--read-length": str(len1) + "," + str(len2) } ignore_params = _get_ignore_params() ignore_params.append(parameters.QUANT_METHOD) param_vals = parameters.validate_command_line_parameter_sets( None, options, ignore_params) assert len1 in param_vals[parameters.READ_LENGTH.name] assert len2 in param_vals[parameters.READ_LENGTH.name] def test_validate_command_line_parameter_sets_raises_exception_for_invalid_param_values(): options = { "--read-length": "abc" } with pytest.raises(schema.SchemaError): parameters.validate_command_line_parameter_sets(None, options) def test_validate_command_line_parameter_sets_raises_exception_if_param_values_not_supplied(): options = { "--read-length": "10,20", } ignore_params = _get_ignore_params() with pytest.raises(schema.SchemaError): parameters.validate_command_line_parameter_sets( None, options, ignore_params) def test_validate_command_line_parameter_sets_reads_parameters_from_file(): len1 = 10 len2 = 20 with tempfile.NamedTemporaryFile() as f: f.write("--quant-method Cufflinks\n") f.write("--read-length " + str(len1) + "," + str(len2) + "\n") f.flush() ignore_params = _get_ignore_params() param_vals = parameters.validate_command_line_parameter_sets( f.name, {}, ignore_params) assert len1 in param_vals[parameters.READ_LENGTH.name] assert len2 in param_vals[parameters.READ_LENGTH.name] def test_validate_command_line_parameter_sets_overrides_file_parameters_with_cl_options(): len1 = 10 len2 = 20 len3 = 30 with tempfile.NamedTemporaryFile() as f: f.write("--read-length " + str(len1) + "," + str(len2)) f.flush() options = { "--read-length": str(len3) } ignore_params = _get_ignore_params() ignore_params.append(parameters.QUANT_METHOD) param_vals = parameters.validate_command_line_parameter_sets( f.name, options, ignore_params) assert len1 not in param_vals[parameters.READ_LENGTH.name] assert len2 not in param_vals[parameters.READ_LENGTH.name] assert len3 in param_vals[parameters.READ_LENGTH.name] def test_get_file_name_returns_correct_name(): assert parameters.get_file_name( read_depth=30, read_length=50, paired_end=True, bias=False) == "30x_50b_pe_no_bias" def test_execute_for_param_sets_executes_for_correct_number_of_parameter_sets(): len1 = 3 len2 = 5 len3 = 7 params_values = { "read_length": [1]*len1, "read_depth": [2]*len2, "errors": [True]*len3 } execute_counter = [] def count_incrementer(logger, options, **params): execute_counter.append(1) parameters.execute_for_param_sets( [count_incrementer], None, None, **params_values) assert len(execute_counter) == len1 * len2 * len3 def test_execute_for_param_sets_executes_all_callables(): execute_record = [] def callable1(logger, options, **params): execute_record.append(1) def callable2(logger, options, **params): execute_record.append(2) parameters.execute_for_param_sets( [callable1, callable2], None, None, param=["a"]) assert 1 in execute_record assert 2 in execute_record assert len(execute_record) == 2 def test_execute_for_param_sets_executes_for_correct_sets_of_parameters(): params1 = [75, 100] params2 = [30, 50] params_values = { "read_length": params1, "read_depth": params2 } execute_record = [] def callable1(logger, options, **params): execute_record.append([v for v in params.values()]) parameters.execute_for_param_sets([callable1], None, None, **params_values) execute_record = [set(params) for params in execute_record] assert set([params1[0], params2[0]]) in execute_record assert set([params1[0], params2[1]]) in execute_record assert set([params1[1], params2[0]]) in execute_record assert set([params1[1], params2[1]]) in execute_record
""" dj-stripe Webhook Tests. """ import json import warnings from collections import defaultdict from copy import deepcopy from unittest.mock import Mock, PropertyMock, call, patch from uuid import UUID import pytest from django.test import TestCase, override_settings from django.test.client import Client from django.urls import reverse from djstripe import webhooks from djstripe.models import Event, Transfer, WebhookEventTrigger from djstripe.models.webhooks import WebhookEndpoint, get_remote_ip from djstripe.settings import djstripe_settings from djstripe.webhooks import TEST_EVENT_ID, call_handlers, handler, handler_all from . import ( FAKE_CUSTOM_ACCOUNT, FAKE_EVENT_TEST_CHARGE_SUCCEEDED, FAKE_EVENT_TRANSFER_CREATED, FAKE_STANDARD_ACCOUNT, FAKE_TRANSFER, FAKE_WEBHOOK_ENDPOINT_1, IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) pytestmark = pytest.mark.django_db def mock_webhook_handler(webhook_event_trigger): webhook_event_trigger.process() class TestWebhookEventTrigger(TestCase): """Test class to test WebhookEventTrigger model and its methods""" def _send_event(self, event_data): return Client().post( reverse("djstripe:webhook"), json.dumps(event_data), content_type="application/json", HTTP_STRIPE_SIGNATURE="PLACEHOLDER", ) def test_webhook_test_event(self): self.assertEqual(WebhookEventTrigger.objects.count(), 0) resp = self._send_event(FAKE_EVENT_TEST_CHARGE_SUCCEEDED) self.assertEqual(resp.status_code, 200) self.assertFalse(Event.objects.filter(id=TEST_EVENT_ID).exists()) self.assertEqual(WebhookEventTrigger.objects.count(), 1) event_trigger = WebhookEventTrigger.objects.first() self.assertTrue(event_trigger.is_test_event) def test___str__(self): self.assertEqual(WebhookEventTrigger.objects.count(), 0) resp = self._send_event(FAKE_EVENT_TEST_CHARGE_SUCCEEDED) self.assertEqual(resp.status_code, 200) self.assertEqual(WebhookEventTrigger.objects.count(), 1) webhookeventtrigger = WebhookEventTrigger.objects.first() self.assertEqual( f"id={webhookeventtrigger.id}, valid={webhookeventtrigger.valid}, processed={webhookeventtrigger.processed}", str(webhookeventtrigger), ) @override_settings(DJSTRIPE_WEBHOOK_VALIDATION="retrieve_event") @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch( "stripe.Event.retrieve", return_value=deepcopy(FAKE_EVENT_TRANSFER_CREATED), autospec=True, ) def test_webhook_retrieve_event_fail( self, event_retrieve_mock, transfer_retrieve_mock, transfer__attach_object_post_save_hook_mock, ): invalid_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) invalid_event["id"] = "evt_invalid" invalid_event["data"]["valid"] = "not really" resp = self._send_event(invalid_event) self.assertEqual(resp.status_code, 400) self.assertFalse(Event.objects.filter(id="evt_invalid").exists()) @patch.object( WebhookEventTrigger.validate, "__defaults__", (None, "whsec_XXXXX", 300, "retrieve_event"), ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "stripe.Account.retrieve", return_value=deepcopy(FAKE_STANDARD_ACCOUNT), autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch( "stripe.Event.retrieve", return_value=deepcopy(FAKE_EVENT_TRANSFER_CREATED), autospec=True, ) def test_webhook_retrieve_event_pass( self, event_retrieve_mock, transfer_retrieve_mock, account_retrieve_mock, transfer__attach_object_post_save_hook_mock, ): resp = self._send_event(FAKE_EVENT_TRANSFER_CREATED) self.assertEqual(resp.status_code, 200) event_retrieve_mock.assert_called_once_with( api_key=djstripe_settings.STRIPE_SECRET_KEY, id=FAKE_EVENT_TRANSFER_CREATED["id"], ) @override_settings( DJSTRIPE_WEBHOOK_VALIDATION="verify_signature", DJSTRIPE_WEBHOOK_SECRET="whsec_XXXXX", ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch( "stripe.Event.retrieve", return_value=deepcopy(FAKE_EVENT_TRANSFER_CREATED), autospec=True, ) def test_webhook_invalid_verify_signature_fail( self, event_retrieve_mock, transfer_retrieve_mock, transfer__attach_object_post_save_hook_mock, ): invalid_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) invalid_event["id"] = "evt_invalid" invalid_event["data"]["valid"] = "not really" resp = self._send_event(invalid_event) self.assertEqual(resp.status_code, 400) self.assertFalse(Event.objects.filter(id="evt_invalid").exists()) @override_settings( DJSTRIPE_WEBHOOK_VALIDATION="verify_signature", DJSTRIPE_WEBHOOK_SECRET="whsec_XXXXX", ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "stripe.WebhookSignature.verify_header", return_value=True, autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Account.retrieve", return_value=deepcopy(FAKE_STANDARD_ACCOUNT), autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch( "stripe.Event.retrieve", return_value=deepcopy(FAKE_EVENT_TRANSFER_CREATED), autospec=True, ) def test_webhook_verify_signature_pass( self, event_retrieve_mock, transfer_retrieve_mock, account_retrieve_mock, verify_header_mock, transfer__attach_object_post_save_hook_mock, ): resp = self._send_event(FAKE_EVENT_TRANSFER_CREATED) self.assertEqual(resp.status_code, 200) self.assertFalse(Event.objects.filter(id="evt_invalid").exists()) verify_header_mock.assert_called_once_with( json.dumps(FAKE_EVENT_TRANSFER_CREATED), "PLACEHOLDER", djstripe_settings.WEBHOOK_SECRET, djstripe_settings.WEBHOOK_TOLERANCE, ) event_retrieve_mock.assert_not_called() @patch.object( WebhookEventTrigger.validate, "__defaults__", (None, "whsec_XXXXX", 300, None) ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch("stripe.WebhookSignature.verify_header", autospec=True) @patch( "stripe.Account.retrieve", return_value=deepcopy(FAKE_STANDARD_ACCOUNT), autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch( "stripe.Event.retrieve", return_value=deepcopy(FAKE_EVENT_TRANSFER_CREATED), autospec=True, ) def test_webhook_no_validation_pass( self, event_retrieve_mock, transfer_retrieve_mock, account_retrieve_mock, verify_header_mock, transfer__attach_object_post_save_hook_mock, ): invalid_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) invalid_event["id"] = "evt_invalid" invalid_event["data"]["valid"] = "not really" # ensure warning is raised with pytest.warns(None, match=r"WEBHOOK VALIDATION is disabled."): resp = self._send_event(invalid_event) self.assertEqual(resp.status_code, 200) self.assertTrue(Event.objects.filter(id="evt_invalid").exists()) event_retrieve_mock.assert_not_called() verify_header_mock.assert_not_called() def test_webhook_no_signature(self): self.assertEqual(WebhookEventTrigger.objects.count(), 0) resp = Client().post( reverse("djstripe:webhook"), "{}", content_type="application/json" ) self.assertEqual(resp.status_code, 400) self.assertEqual(WebhookEventTrigger.objects.count(), 0) def test_webhook_remote_addr_is_none(self): self.assertEqual(WebhookEventTrigger.objects.count(), 0) with warnings.catch_warnings(): warnings.simplefilter("ignore") Client().post( reverse("djstripe:webhook"), "{}", content_type="application/json", HTTP_STRIPE_SIGNATURE="PLACEHOLDER", REMOTE_ADDR=None, ) self.assertEqual(WebhookEventTrigger.objects.count(), 1) event_trigger = WebhookEventTrigger.objects.first() self.assertEqual(event_trigger.remote_ip, "0.0.0.0") def test_webhook_remote_addr_is_empty_string(self): self.assertEqual(WebhookEventTrigger.objects.count(), 0) with warnings.catch_warnings(): warnings.simplefilter("ignore") Client().post( reverse("djstripe:webhook"), "{}", content_type="application/json", HTTP_STRIPE_SIGNATURE="PLACEHOLDER", REMOTE_ADDR="", ) self.assertEqual(WebhookEventTrigger.objects.count(), 1) event_trigger = WebhookEventTrigger.objects.first() self.assertEqual(event_trigger.remote_ip, "0.0.0.0") @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "djstripe.models.WebhookEventTrigger.validate", return_value=True, autospec=True ) @patch("djstripe.models.WebhookEventTrigger.process", autospec=True) def test_webhook_reraise_exception( self, webhook_event_process_mock, webhook_event_validate_mock, transfer__attach_object_post_save_hook_mock, ): class ProcessException(Exception): pass exception_message = "process fail" webhook_event_process_mock.side_effect = ProcessException(exception_message) self.assertEqual(WebhookEventTrigger.objects.count(), 0) fake_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) with self.assertRaisesMessage(ProcessException, exception_message): self._send_event(fake_event) self.assertEqual(WebhookEventTrigger.objects.count(), 1) event_trigger = WebhookEventTrigger.objects.first() self.assertEqual(event_trigger.exception, exception_message) @patch.object( WebhookEventTrigger.validate, "__defaults__", (None, "whsec_XXXXX", 300, None) ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch.object( djstripe_settings, "WEBHOOK_EVENT_CALLBACK", return_value=mock_webhook_handler ) @patch( "stripe.Account.retrieve", return_value=deepcopy(FAKE_STANDARD_ACCOUNT), autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch("stripe.Event.retrieve", autospec=True) def test_webhook_with_custom_callback( self, event_retrieve_mock, transfer_retrieve_mock, account_retrieve_mock, webhook_event_callback_mock, transfer__attach_object_post_save_hook_mock, ): fake_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) event_retrieve_mock.return_value = fake_event with pytest.warns(None): resp = self._send_event(fake_event) self.assertEqual(resp.status_code, 200) webhook_event_trigger = WebhookEventTrigger.objects.get() webhook_event_callback_mock.called_once_with(webhook_event_trigger) @patch.object( WebhookEventTrigger.validate, "__defaults__", (None, "whsec_XXXXX", 300, None) ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "stripe.Account.retrieve", return_value=deepcopy(FAKE_STANDARD_ACCOUNT), autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch("stripe.Event.retrieve", autospec=True) def test_webhook_with_transfer_event_duplicate( self, event_retrieve_mock, transfer_retrieve_mock, account_retrieve_mock, transfer__attach_object_post_save_hook_mock, ): fake_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) event_retrieve_mock.return_value = fake_event with pytest.warns(None): resp = self._send_event(fake_event) self.assertEqual(resp.status_code, 200) self.assertTrue(Event.objects.filter(type="transfer.created").exists()) self.assertEqual(1, Event.objects.filter(type="transfer.created").count()) # Duplication with pytest.warns(None): resp = self._send_event(fake_event) self.assertEqual(resp.status_code, 200) self.assertEqual(1, Event.objects.filter(type="transfer.created").count()) @patch.object( WebhookEventTrigger.validate, "__defaults__", (None, "whsec_XXXXX", 300, None) ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "stripe.Account.retrieve", return_value=deepcopy(FAKE_STANDARD_ACCOUNT), autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch("stripe.Event.retrieve", autospec=True) def test_webhook_good_platform_account( self, event_retrieve_mock, transfer_retrieve_mock, account_retrieve_mock, transfer__attach_object_post_save_hook_mock, ): fake_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) event_retrieve_mock.return_value = fake_event with pytest.warns(None): resp = self._send_event(fake_event) self.assertEqual(resp.status_code, 200) self.assertEqual(Event.objects.count(), 1) self.assertEqual(WebhookEventTrigger.objects.count(), 1) event_trigger = WebhookEventTrigger.objects.first() self.assertEqual(event_trigger.is_test_event, False) self.assertEqual( event_trigger.stripe_trigger_account.id, FAKE_STANDARD_ACCOUNT["id"] ) @patch.object( WebhookEventTrigger.validate, "__defaults__", (None, "whsec_XXXXX", 300, None) ) @patch.object(Transfer, "_attach_objects_post_save_hook") @patch( "stripe.Account.retrieve", return_value=deepcopy(FAKE_CUSTOM_ACCOUNT), autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED, ) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch("stripe.Event.retrieve", autospec=True) def test_webhook_good_connect_account( self, event_retrieve_mock, transfer_retrieve_mock, account_retrieve_mock, transfer__attach_object_post_save_hook_mock, ): fake_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) fake_event["account"] = FAKE_CUSTOM_ACCOUNT["id"] event_retrieve_mock.return_value = fake_event with pytest.warns(None): resp = self._send_event(fake_event) self.assertEqual(resp.status_code, 200) self.assertEqual(Event.objects.count(), 1) self.assertEqual(WebhookEventTrigger.objects.count(), 1) event_trigger = WebhookEventTrigger.objects.first() self.assertEqual(event_trigger.is_test_event, False) self.assertEqual( event_trigger.stripe_trigger_account.id, FAKE_CUSTOM_ACCOUNT["id"] ) @patch.object( WebhookEventTrigger.validate, "__defaults__", (None, "whsec_XXXXX", 300, None) ) @patch.object(target=Event, attribute="invoke_webhook_handlers", autospec=True) @patch( "stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True ) @patch("stripe.Event.retrieve", autospec=True) def test_webhook_error( self, event_retrieve_mock, transfer_retrieve_mock, mock_invoke_webhook_handlers, ): """Test the case where webhook processing fails to ensure we rollback and do not commit the Event object to the database. """ mock_invoke_webhook_handlers.side_effect = KeyError("Test error") fake_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED) event_retrieve_mock.return_value = fake_event with self.assertRaises(KeyError): with pytest.warns(None): self._send_event(fake_event) self.assertEqual(Event.objects.count(), 0) self.assertEqual(WebhookEventTrigger.objects.count(), 1) event_trigger = WebhookEventTrigger.objects.first() self.assertEqual(event_trigger.is_test_event, False) self.assertEqual(event_trigger.exception, "'Test error'") class TestWebhookHandlers(TestCase): def setUp(self): # Reset state of registrations per test patcher = patch.object( webhooks, "registrations", new_callable=(lambda: defaultdict(list)) ) self.addCleanup(patcher.stop) self.registrations = patcher.start() patcher = patch.object(webhooks, "registrations_global", new_callable=list) self.addCleanup(patcher.stop) self.registrations_global = patcher.start() def test_global_handler_registration(self): func_mock = Mock() handler_all()(func_mock) event = self._call_handlers("wib.ble", {"data": "foo"}) # handled self.assertEqual(1, func_mock.call_count) func_mock.assert_called_with(event=event) def test_event_handler_registration(self): global_func_mock = Mock() handler_all()(global_func_mock) func_mock = Mock() handler("foo")(func_mock) event = self._call_handlers("foo.bar", {"data": "foo"}) # handled self._call_handlers("bar.foo", {"data": "foo"}) # not handled self.assertEqual(2, global_func_mock.call_count) # called each time self.assertEqual(1, func_mock.call_count) func_mock.assert_called_with(event=event) def test_event_subtype_handler_registration(self): global_func_mock = Mock() handler_all()(global_func_mock) func_mock = Mock() handler("foo.bar")(func_mock) event1 = self._call_handlers("foo.bar", {"data": "foo"}) # handled event2 = self._call_handlers("foo.bar.wib", {"data": "foo"}) # handled self._call_handlers("foo.baz", {"data": "foo"}) # not handled self.assertEqual(3, global_func_mock.call_count) # called each time self.assertEqual(2, func_mock.call_count) func_mock.assert_has_calls([call(event=event1), call(event=event2)]) def test_global_handler_registration_with_function(self): func_mock = Mock() handler_all(func_mock) event = self._call_handlers("wib.ble", {"data": "foo"}) # handled self.assertEqual(1, func_mock.call_count) func_mock.assert_called_with(event=event) def test_event_handle_registation_with_string(self): func_mock = Mock() handler("foo")(func_mock) event = self._call_handlers("foo.bar", {"data": "foo"}) # handled self.assertEqual(1, func_mock.call_count) func_mock.assert_called_with(event=event) def test_event_handle_registation_with_list_of_strings(self): func_mock = Mock() handler("foo", "bar")(func_mock) event1 = self._call_handlers("foo.bar", {"data": "foo"}) # handled event2 = self._call_handlers("bar.foo", {"data": "bar"}) # handled self.assertEqual(2, func_mock.call_count) func_mock.assert_has_calls([call(event=event1), call(event=event2)]) def test_webhook_event_trigger_invalid_body(self): trigger = WebhookEventTrigger(remote_ip="127.0.0.1", body="invalid json") assert not trigger.json_body # # Helpers # @staticmethod def _call_handlers(event_spec, data): event = Mock(spec=Event) parts = event_spec.split(".") category = parts[0] verb = ".".join(parts[1:]) type(event).parts = PropertyMock(return_value=parts) type(event).category = PropertyMock(return_value=category) type(event).verb = PropertyMock(return_value=verb) call_handlers(event=event) return event class TestGetRemoteIp: class RequestClass: def __init__(self, data): self.data = data @property def META(self): return self.data @pytest.mark.parametrize( "data", [ {"HTTP_X_FORWARDED_FOR": "127.0.0.1,345.5.5.3,451.1.1.2"}, { "REMOTE_ADDR": "422.0.0.1", "HTTP_X_FORWARDED_FOR": "127.0.0.1,345.5.5.3,451.1.1.2", }, { "REMOTE_ADDR": "127.0.0.1", }, ], ) def test_get_remote_ip(self, data): request = self.RequestClass(data) assert get_remote_ip(request) == "127.0.0.1" @pytest.mark.parametrize( "data", [ { "REMOTE_ADDR": "", }, { "pqwwe": "127.0.0.1", }, ], ) def test_get_remote_ip_remote_addr_is_none(self, data): request = self.RequestClass(data) # ensure warning is raised with pytest.warns( None, match=r"Could not determine remote IP \(missing REMOTE_ADDR\)\." ): assert get_remote_ip(request) == "0.0.0.0" class TestWebhookEndpoint: """Test Class to test WebhookEndpoint and its methods""" def test_sync_from_stripe_data_non_existent_webhook_endpoint(self): fake_webhook = deepcopy(FAKE_WEBHOOK_ENDPOINT_1) webhook_endpoint = WebhookEndpoint.sync_from_stripe_data(fake_webhook) assert webhook_endpoint.id == fake_webhook["id"] assert isinstance(webhook_endpoint.djstripe_uuid, UUID) # assert WebHookEndpoint's secret does not exist for a new sync assert not webhook_endpoint.secret def test_sync_from_stripe_data_existent_webhook_endpoint(self): fake_webhook_1 = deepcopy(FAKE_WEBHOOK_ENDPOINT_1) webhook_endpoint = WebhookEndpoint.sync_from_stripe_data(fake_webhook_1) assert webhook_endpoint.id == fake_webhook_1["id"] djstripe_uuid = webhook_endpoint.djstripe_uuid assert isinstance(djstripe_uuid, UUID) # assert WebHookEndpoint's secret does not exist for a new sync assert not webhook_endpoint.secret # add a secret to the webhook_endpoint fake_webhook_2 = deepcopy(FAKE_WEBHOOK_ENDPOINT_1) fake_webhook_2["secret"] = "whsec_rguCE5LMINfRKjmIkxDJM1lOPXkAOQp3" webhook_endpoint.secret = fake_webhook_2["secret"] webhook_endpoint.save() # re-sync the WebhookEndpoint instance WebhookEndpoint.sync_from_stripe_data(fake_webhook_2) webhook_endpoint.refresh_from_db() assert webhook_endpoint.id == fake_webhook_2["id"] # assert secret got updated assert webhook_endpoint.secret == fake_webhook_2["secret"] # assert UUID didn't get regenerated assert webhook_endpoint.djstripe_uuid == djstripe_uuid def test___str__(self): fake_webhook = deepcopy(FAKE_WEBHOOK_ENDPOINT_1) webhook_endpoint = WebhookEndpoint.sync_from_stripe_data(fake_webhook) assert str(webhook_endpoint) == webhook_endpoint.url assert ( str(webhook_endpoint) == "https://dev.example.com/stripe/webhook/f6f9aa0e-cb6c-4e0f-b5ee-5e2b9e0716d8" )
import hail as hl from .java import Env, info from .misc import new_temp_file, local_path_uri, new_local_temp_dir import os import zipfile from urllib.request import urlretrieve from hailtop.utils import sync_retry_transient_errors __all__ = [ 'get_1kg', 'get_hgdp', 'get_movie_lens' ] resources = { '1kg_annotations': 'https://storage.googleapis.com/hail-tutorial/1kg_annotations.txt', '1kg_matrix_table': 'https://storage.googleapis.com/hail-tutorial/1kg.vcf.bgz', '1kg_ensembl_gene_annotations': 'https://storage.googleapis.com/hail-tutorial/ensembl_gene_annotations.txt', 'HGDP_annotations': 'https://storage.googleapis.com/hail-tutorial/hgdp/hgdp_pop_and_sex_annotations.tsv', 'HGDP_matrix_table': 'https://storage.googleapis.com/hail-tutorial/hgdp/hgdp_subset.vcf.bgz', 'HGDP_ensembl_gene_annotations': 'https://storage.googleapis.com/hail-tutorial/hgdp/hgdp_gene_annotations.tsv', 'movie_lens_100k': 'http://files.grouplens.org/datasets/movielens/ml-100k.zip', } tmp_dir: str = None def init_temp_dir(): global tmp_dir if tmp_dir is None: tmp_dir = new_local_temp_dir() def _dir_exists(fs, path): return fs.exists(path) and fs.is_dir(path) def _file_exists(fs, path): return fs.exists(path) and fs.is_file(path) def _copy_to_tmp(fs, src, extension=None): dst = new_temp_file(extension=extension) fs.copy(src, dst) return dst def get_1kg(output_dir, overwrite: bool = False): """Download subset of the `1000 Genomes <http://www.internationalgenome.org/>`__ dataset and sample annotations. Notes ----- The download is about 15M. Parameters ---------- output_dir Directory in which to write data. overwrite If ``True``, overwrite any existing files/directories at `output_dir`. """ fs = Env.fs() if not _dir_exists(fs, output_dir): fs.mkdir(output_dir) matrix_table_path = os.path.join(output_dir, '1kg.mt') vcf_path = os.path.join(output_dir, '1kg.vcf.bgz') sample_annotations_path = os.path.join(output_dir, '1kg_annotations.txt') gene_annotations_path = os.path.join(output_dir, 'ensembl_gene_annotations.txt') if (overwrite or not _dir_exists(fs, matrix_table_path) or not _file_exists(fs, sample_annotations_path) or not _file_exists(fs, vcf_path) or not _file_exists(fs, gene_annotations_path)): init_temp_dir() tmp_vcf = os.path.join(tmp_dir, '1kg.vcf.bgz') source = resources['1kg_matrix_table'] info(f'downloading 1KG VCF ...\n' f' Source: {source}') sync_retry_transient_errors(urlretrieve, resources['1kg_matrix_table'], tmp_vcf) cluster_readable_vcf = _copy_to_tmp(fs, local_path_uri(tmp_vcf), extension='vcf.bgz') info('importing VCF and writing to matrix table...') hl.import_vcf(cluster_readable_vcf, min_partitions=16).write(matrix_table_path, overwrite=True) tmp_sample_annot = os.path.join(tmp_dir, '1kg_annotations.txt') source = resources['1kg_annotations'] info(f'downloading 1KG annotations ...\n' f' Source: {source}') sync_retry_transient_errors(urlretrieve, source, tmp_sample_annot) tmp_gene_annot = os.path.join(tmp_dir, 'ensembl_gene_annotations.txt') source = resources['1kg_ensembl_gene_annotations'] info(f'downloading Ensembl gene annotations ...\n' f' Source: {source}') sync_retry_transient_errors(urlretrieve, source, tmp_gene_annot) hl.hadoop_copy(local_path_uri(tmp_sample_annot), sample_annotations_path) hl.hadoop_copy(local_path_uri(tmp_gene_annot), gene_annotations_path) hl.hadoop_copy(local_path_uri(tmp_vcf), vcf_path) info('Done!') else: info('1KG files found') def get_hgdp(output_dir, overwrite: bool = False): """Download subset of the `Human Genome Diversity Panel <https://www.internationalgenome.org/data-portal/data-collection/hgdp/>`__ dataset and sample annotations. Notes ----- The download is about 30MB. Parameters ---------- output_dir Directory in which to write data. overwrite If ``True``, overwrite any existing files/directories at `output_dir`. """ fs = Env.fs() if not _dir_exists(fs, output_dir): fs.mkdir(output_dir) matrix_table_path = os.path.join(output_dir, 'HGDP.mt') vcf_path = os.path.join(output_dir, 'HGDP.vcf.bgz') sample_annotations_path = os.path.join(output_dir, 'HGDP_annotations.txt') gene_annotations_path = os.path.join(output_dir, 'ensembl_gene_annotations.txt') if (overwrite or not _dir_exists(fs, matrix_table_path) or not _file_exists(fs, sample_annotations_path) or not _file_exists(fs, vcf_path) or not _file_exists(fs, gene_annotations_path)): init_temp_dir() tmp_vcf = os.path.join(tmp_dir, 'HGDP.vcf.bgz') source = resources['HGDP_matrix_table'] info(f'downloading HGDP VCF ...\n' f' Source: {source}') sync_retry_transient_errors(urlretrieve, resources['HGDP_matrix_table'], tmp_vcf) cluster_readable_vcf = _copy_to_tmp(fs, local_path_uri(tmp_vcf), extension='vcf.bgz') info('importing VCF and writing to matrix table...') hl.import_vcf(cluster_readable_vcf, min_partitions=16, reference_genome='GRCh38').write(matrix_table_path, overwrite=True) tmp_sample_annot = os.path.join(tmp_dir, 'HGDP_annotations.txt') source = resources['HGDP_annotations'] info(f'downloading HGDP annotations ...\n' f' Source: {source}') sync_retry_transient_errors(urlretrieve, source, tmp_sample_annot) tmp_gene_annot = os.path.join(tmp_dir, 'ensembl_gene_annotations.txt') source = resources['HGDP_ensembl_gene_annotations'] info(f'downloading Ensembl gene annotations ...\n' f' Source: {source}') sync_retry_transient_errors(urlretrieve, source, tmp_gene_annot) hl.hadoop_copy(local_path_uri(tmp_sample_annot), sample_annotations_path) hl.hadoop_copy(local_path_uri(tmp_gene_annot), gene_annotations_path) hl.hadoop_copy(local_path_uri(tmp_vcf), vcf_path) info('Done!') else: info('HGDP files found') def get_movie_lens(output_dir, overwrite: bool = False): """Download public Movie Lens dataset. Notes ----- The download is about 6M. See the `MovieLens website <https://grouplens.org/datasets/movielens/100k/>`__ for more information about this dataset. Parameters ---------- output_dir Directory in which to write data. overwrite If ``True``, overwrite existing files/directories at those locations. """ fs = Env.fs() if not _dir_exists(fs, output_dir): fs.mkdir(output_dir) paths = [os.path.join(output_dir, x) for x in ['movies.ht', 'ratings.ht', 'users.ht']] if overwrite or any(not _dir_exists(fs, f) for f in paths): init_temp_dir() source = resources['movie_lens_100k'] tmp_path = os.path.join(tmp_dir, 'ml-100k.zip') info(f'downloading MovieLens-100k data ...\n' f' Source: {source}') sync_retry_transient_errors(urlretrieve, source, tmp_path) with zipfile.ZipFile(tmp_path, 'r') as z: z.extractall(tmp_dir) user_table_path = os.path.join(tmp_dir, 'ml-100k', 'u.user') movie_table_path = os.path.join(tmp_dir, 'ml-100k', 'u.item') ratings_table_path = os.path.join(tmp_dir, 'ml-100k', 'u.data') assert (os.path.exists(user_table_path)) assert (os.path.exists(movie_table_path)) assert (os.path.exists(ratings_table_path)) user_cluster_readable = _copy_to_tmp(fs, local_path_uri(user_table_path), extension='txt') movie_cluster_readable = _copy_to_tmp(fs, local_path_uri(movie_table_path), 'txt') ratings_cluster_readable = _copy_to_tmp(fs, local_path_uri(ratings_table_path), 'txt') [movies_path, ratings_path, users_path] = paths genres = ['Action', 'Adventure', 'Animation', "Children's", 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'] # utility functions for importing movies def field_to_array(ds, field): return hl.if_else(ds[field] != 0, hl.array([field]), hl.empty_array(hl.tstr)) def fields_to_array(ds, fields): return hl.flatten(hl.array([field_to_array(ds, f) for f in fields])) def rename_columns(ht, new_names): return ht.rename({k: v for k, v in zip(ht.row, new_names)}) info(f'importing users table and writing to {users_path} ...') users = rename_columns( hl.import_table(user_cluster_readable, key=['f0'], no_header=True, impute=True, delimiter='|'), ['id', 'age', 'sex', 'occupation', 'zipcode']) users.write(users_path, overwrite=True) info(f'importing movies table and writing to {movies_path} ...') movies = hl.import_table(movie_cluster_readable, key=['f0'], no_header=True, impute=True, delimiter='|') movies = rename_columns(movies, ['id', 'title', 'release date', 'video release date', 'IMDb URL', 'unknown'] + genres) movies = movies.drop('release date', 'video release date', 'unknown', 'IMDb URL') movies = movies.transmute(genres=fields_to_array(movies, genres)) movies.write(movies_path, overwrite=True) info(f'importing ratings table and writing to {ratings_path} ...') ratings = hl.import_table(ratings_cluster_readable, no_header=True, impute=True) ratings = rename_columns(ratings, ['user_id', 'movie_id', 'rating', 'timestamp']) ratings = ratings.drop('timestamp') ratings.write(ratings_path, overwrite=True) else: info('Movie Lens files found!')
# -*- coding: utf-8 -*- """ Bindings for SQLite https://docs.python.org/2/library/sqlite3.html Notes, certain SQLite versions might have a problem with long integers http://jakegoulding.com/blog/2011/02/06/sqlite-64-bit-integers/ Looking at the docs, it says it will set an integer value to 1-4, 6, or 8 bytes depending on the size, but I couldn't get it to accept anything over the 32-bit signed integer value of around 2billion savepoints and transactions are similar to Postgres https://www.sqlite.org/lang_savepoint.html http://sqlite.org/lang_transaction.html alter table is similar to Postgres https://www.sqlite.org/lang_altertable.html other links that were helpful http://www.numericalexpert.com/blog/sqlite_blob_time/ """ from __future__ import unicode_literals, division, print_function, absolute_import import os import decimal import datetime from distutils import dir_util import re import sqlite3 try: import thread except ImportError: thread = None from datatypes import Datetime # first party from ..exception import UniqueError from ..compat import * from .base import SQLInterface, SQLConnection class SQLiteRowDict(sqlite3.Row): def __getitem__(self, k): if is_py2: return super(SQLiteRowDict, self).__getitem__(b"{}".format(k)) else: return super(SQLiteRowDict, self).__getitem__(k) def get(self, k, default_val=None): r = default_val r = self[k] return r class SQLiteConnection(SQLConnection, sqlite3.Connection): """ Thin wrapper around the default connection to make sure it has a similar interface to Postgres' connection instance so the common code can all be the same in the parent class """ def __init__(self, *args, **kwargs): super(SQLiteConnection, self).__init__(*args, **kwargs) self.closed = 0 def close(self, *args, **kwargs): r = super(SQLiteConnection, self).close(*args, **kwargs) self.closed = 1 return r class TimestampType(object): """External sqlite3 databases can store the TIMESTAMP type as unix timestamps, this caused parsing problems when pulling the values out of the db because the default adapter expected TIMESTAMP to be in the form of YYYY-MM-DD HH:MM:SS.SSSSSS and so it would fail to convert the DDDDDD.DDD values, this handles that conversion https://www.sqlite.org/lang_datefunc.html the "unixepoch" modifier only works for dates between 0000-01-01 00:00:00 and 5352-11-01 10:52:47 (unix times of -62167219200 through 106751991167) """ @staticmethod def adapt(val): return val.isoformat(b" ") if is_py2 else val.isoformat(" ") @staticmethod def convert(val): val = StringType.adapt(val) if re.match(r"^\-?\d+\.\d+$", val): # account for unix timestamps with microseconds val = datetime.datetime.fromtimestamp(float(val)) elif re.match(r"^\-?\d+$", val): # account for unix timestamps without microseconds val = int(val) try: val = datetime.datetime.fromtimestamp(val) except ValueError: # we're hosed with this unix timestamp, but rather than error # out let's go ahead and return the closest approximation we # can get to the correct timestamp if val > 0: val = datetime.datetime.max else: val = datetime.datetime.min else: # ISO 8601 is not very strict with the date format and this tries to # capture most of that leniency, with the one exception that the # date must be in UTC # https://en.wikipedia.org/wiki/ISO_8601 m = re.match( r"^(\d{4}).?(\d{2}).?(\d{2}).(\d{2}):?(\d{2}):?(\d{2})(?:\.(\d+))?Z?$", val ) if m: parsed_dateparts = m.groups() dateparts = list(map(lambda x: int(x) if x else 0, parsed_dateparts[:6])) val = datetime.datetime(*dateparts) # account for ms with leading zeros if parsed_dateparts[6]: ms_len = len(parsed_dateparts[6]) if ms_len >= 3: millis = parsed_dateparts[6][:3] micros = parsed_dateparts[6][3:] or 0 else: millis = parsed_dateparts[6] or 0 micros = 0 # make sure each part is 3 digits by zero padding on the right if millis: millis = "{:0<3.3}".format(millis) if micros: micros = "{:0<3.3}".format(micros) val += datetime.timedelta( milliseconds=int(millis), microseconds=int(micros) ) else: raise ValueError("Cannot infer UTC datetime value from {}".format(val)) return val class BooleanType(object): @staticmethod def adapt(val): """From python you get False and True, convert those to 1/0""" return 1 if val else 0 @staticmethod def convert(val): """from the db you get values like b'0' and b'1', convert those to True/False""" return bool(int(val)) class NumericType(object): @staticmethod def adapt(val): return float(str(val)) @staticmethod def convert(val): if is_py2: ret = decimal.Decimal(str(val)) else: val = StringType.adapt(val) ret = decimal.Decimal(val) return ret class StringType(object): """this just makes sure 8-bit bytestrings get converted ok""" @staticmethod def adapt(val): #if isinstance(val, str): if isinstance(val, bytes): val = val.decode('utf-8') return val class SQLite(SQLInterface): val_placeholder = '?' _connection = None @classmethod def configure(cls, connection_config): dsn = getattr(connection_config, 'dsn', '') if dsn: host = connection_config.host db = connection_config.database if not host: path = db elif not db: path = host else: path = os.sep.join([host, db]) else: path = connection_config.database if not path: raise ValueError("no sqlite db path found in connection_config") connection_config.path = path return connection_config def _connect(self, connection_config): path = connection_config.path # https://docs.python.org/2/library/sqlite3.html#default-adapters-and-converters options = { 'isolation_level': None, 'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES, 'factory': SQLiteConnection, 'check_same_thread': True, # https://stackoverflow.com/a/2578401/5006 } option_keys = list(options.keys()) + ['timeout', 'cached_statements'] for k in option_keys: if k in connection_config.options: options[k] = connection_config.options[k] try: self._connection = sqlite3.connect(path, **options) except sqlite3.DatabaseError as e: path_d = os.path.dirname(path) if os.path.isdir(path_d): raise else: # let's try and make the directory path and connect again dir_util.mkpath(path_d) self._connection = sqlite3.connect(path, **options) # https://docs.python.org/2/library/sqlite3.html#row-objects self._connection.row_factory = SQLiteRowDict # https://docs.python.org/2/library/sqlite3.html#sqlite3.Connection.text_factory self._connection.text_factory = StringType.adapt # for some reason this is needed in python 3.6 in order for saved bytes # to be ran through the converter, not sure why sqlite3.register_converter(b'TEXT' if is_py2 else 'TEXT', StringType.adapt) sqlite3.register_adapter(decimal.Decimal, NumericType.adapt) sqlite3.register_converter(b'NUMERIC' if is_py2 else 'NUMERIC', NumericType.convert) sqlite3.register_adapter(bool, BooleanType.adapt) sqlite3.register_converter(b'BOOLEAN' if is_py2 else 'BOOLEAN', BooleanType.convert) # sadly, it doesn't look like these work for child classes so each class # has to be adapted even if its parent is already registered sqlite3.register_adapter(datetime.datetime, TimestampType.adapt) sqlite3.register_adapter(Datetime, TimestampType.adapt) sqlite3.register_converter(b'TIMESTAMP' if is_py2 else 'TIMESTAMP', TimestampType.convert) # turn on foreign keys # http://www.sqlite.org/foreignkeys.html self._query('PRAGMA foreign_keys = ON', ignore_result=True); self.readonly(self.connection_config.readonly) def get_connection(self): if not self.connected: self.connect() return self._connection def _get_thread(self): if thread: ret = str(thread.get_ident()) else: ret = "" return ret def _close(self): self._connection.close() self._connection = None def _readonly(self, readonly): self._query( # https://stackoverflow.com/a/49630725/5006 'PRAGMA query_only = {}'.format("ON" if readonly else "OFF"), ignore_result=True ) def _get_tables(self, table_name, **kwargs): query_str = 'SELECT tbl_name FROM sqlite_master WHERE type = ?' query_args = ['table'] if table_name: query_str += ' AND name = ?' query_args.append(str(table_name)) ret = self._query(query_str, query_args, **kwargs) return [r['tbl_name'] for r in ret] def get_field_SQL(self, field_name, field): """ returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL) """ field_type = "" is_pk = field.options.get('pk', False) interface_type = field.interface_type if issubclass(interface_type, bool): field_type = 'BOOLEAN' elif issubclass(interface_type, long): if is_pk: field_type = 'INTEGER PRIMARY KEY' else: field_type = 'BIGINT' elif issubclass(interface_type, int): field_type = 'INTEGER' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(interface_type, basestring): fo = field.options if field.is_ref(): # TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance # would combine all the options of both the current field and the # foreign key field and return all those when Field.options is called # (with the current field's options taking precedence) but there are # lots of circular dependency things that happen when one field is # trying to get the schema of another field and I don't have time # to sort it all out right now ref_s = field.schema fo = ref_s.pk.options if 'size' in fo: field_type = 'CHARACTER({})'.format(fo['size']) elif 'max_size' in fo: field_type = 'VARCHAR({})'.format(fo['max_size']) else: field_type = 'TEXT' if fo.get('ignore_case', False): field_type += ' COLLATE NOCASE' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(interface_type, datetime.datetime): #field_type = 'DATETIME' field_type = 'TIMESTAMP' elif issubclass(interface_type, datetime.date): field_type = 'DATE' elif issubclass(interface_type, float): field_type = 'REAL' size = field.options.get('size', field.options.get('max_size', 0)) if size > 6: field_type = 'DOUBLE PRECISION' elif issubclass(interface_type, decimal.Decimal): field_type = 'NUMERIC' elif issubclass(interface_type, bytearray): field_type = 'BLOB' else: raise ValueError('Unknown python type: {}'.format(interface_type.__name__)) if field.required: field_type += ' NOT NULL' else: field_type += ' NULL' if not is_pk: if field.is_ref(): ref_s = field.schema if field.required: # strong ref, it deletes on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'.format( ref_s, ref_s.pk.name ) else: # weak ref, it sets column to null on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'.format( ref_s, ref_s.pk.name ) return '{} {}'.format(self._normalize_name(field_name), field_type) def _set_table(self, schema, **kwargs): """ http://sqlite.org/lang_createtable.html """ query_str = [] query_str.append("CREATE TABLE {} (".format(self._normalize_table_name(schema))) query_fields = [] for field_name, field in schema.fields.items(): query_fields.append(' {}'.format(self.get_field_SQL(field_name, field))) query_str.append(",{}".format(os.linesep).join(query_fields)) query_str.append(')') query_str = os.linesep.join(query_str) ret = self._query(query_str, ignore_result=True, **kwargs) def _set_index(self, schema, name, fields, **index_options): """ https://www.sqlite.org/lang_createindex.html """ query_str = "CREATE {}INDEX IF NOT EXISTS '{}_{}' ON {} ({})".format( 'UNIQUE ' if index_options.get('unique', False) else '', schema, name, self._normalize_table_name(schema), ', '.join((self._normalize_name(f) for f in fields)) ) return self._query(query_str, ignore_result=True, **index_options) def _get_indexes(self, schema, **kwargs): """return all the indexes for the given schema""" # http://www.sqlite.org/pragma.html#schema # http://www.mail-archive.com/[email protected]/msg22055.html # http://stackoverflow.com/questions/604939/ ret = {} rs = self._query('PRAGMA index_list({})'.format(self._normalize_table_name(schema)), **kwargs) if rs: for r in rs: iname = r['name'] ret.setdefault(iname, []) indexes = self._query('PRAGMA index_info({})'.format(r['name']), **kwargs) for idict in indexes: ret[iname].append(idict['name']) return ret def _insert(self, schema, fields, **kwargs): """ http://www.sqlite.org/lang_insert.html """ field_formats = [] field_names = [] query_vals = [] for field_name, field_val in fields.items(): field_names.append(self._normalize_name(field_name)) field_formats.append(self.val_placeholder) query_vals.append(field_val) query_str = "INSERT INTO {} ({}) VALUES ({})".format( self._normalize_table_name(schema), ', '.join(field_names), ', '.join(field_formats) ) ret = self._query(query_str, query_vals, cursor_result=True, **kwargs) pk_name = schema.pk_name # http://stackoverflow.com/questions/6242756/ # could also do _query('SELECT last_insert_rowid()') return ret.lastrowid if pk_name not in fields else fields[pk_name] def _delete_tables(self, **kwargs): self._query('PRAGMA foreign_keys = OFF', ignore_result=True, **kwargs); ret = super(SQLite, self)._delete_tables(**kwargs) self._query('PRAGMA foreign_keys = ON', ignore_result=True, **kwargs); return ret def _delete_table(self, schema, **kwargs): #query_str = 'DROP TABLE IF EXISTS {}'.format(str(schema)) query_str = "DROP TABLE IF EXISTS {}".format(self._normalize_table_name(schema)) ret = self._query(query_str, ignore_result=True, **kwargs) def _handle_error(self, schema, e, **kwargs): ret = False if isinstance(e, sqlite3.OperationalError): e_msg = str(e) if "no such column" in e_msg or "has no column" in e_msg: #INSERT: "table yscrmiklbgdtx has no column named che" #SELECT: "no such column: che" try: ret = self._set_all_fields(schema, **kwargs) except ValueError as e: ret = False elif "no such table" in e_msg: ret = self._set_all_tables(schema, **kwargs) elif "UNIQUE" in e_msg: self.raise_error(e, e_class=UniqueError) return ret def _create_error(self, e, exc_info): if isinstance(e, sqlite3.IntegrityError): er = UniqueError(e, exc_info) else: er = super(SQLite, self)._create_error(e, exc_info) return er def _get_fields(self, table_name, **kwargs): """return all the fields for the given table""" ret = {} query_str = 'PRAGMA table_info({})'.format(self._normalize_table_name(table_name)) fields = self._query(query_str, **kwargs) #pout.v([dict(d) for d in fields]) query_str = 'PRAGMA foreign_key_list({})'.format(self._normalize_table_name(table_name)) fks = {f["from"]: f for f in self._query(query_str, **kwargs)} #pout.v([dict(d) for d in fks.values()]) pg_types = { "INTEGER": int, "BIGINT": long, "DOUBLE PRECISION": float, "FLOAT": float, "REAL": float, "NUMERIC": decimal.Decimal, "BOOLEAN": bool, "DATE": datetime.date, "TIMESTAMP": datetime.datetime, "CHARACTER": str, "VARCHAR": str, "TEXT": str, "BLOB": bytearray, } # the rows we can set: field_type, name, field_required, min_size, max_size, # size, unique, pk, <foreign key info> # These keys will roughly correspond with schema.Field # TODO -- we could actually use "type" to get the size because SQLite returns # a value like VARCHAR[32] for row in fields: field = { "name": row["name"], "field_required": bool(row["notnull"]) or bool(row["pk"]), "pk": bool(row["pk"]), } for tname, ty in pg_types.items(): if row["type"].startswith(tname): field["field_type"] = ty break if field["pk"] and field["field_type"] is int: # we compensate for SQLite internally setting pk to int field["field_type"] = long if row["name"] in fks: field["schema_table_name"] = fks[row["name"]]["table"] field["ref_table_name"] = fks[row["name"]]["table"] ret[field["name"]] = field return ret def _normalize_date_SQL(self, field_name, field_kwargs, symbol): """ allow extracting information from date http://www.sqlite.org/lang_datefunc.html """ fstrs = [] k_opts = { 'day': "CAST(strftime('%d', {}) AS integer)", 'hour': "CAST(strftime('%H', {}) AS integer)", 'doy': "CAST(strftime('%j', {}) AS integer)", # day of year 'julian_day': "strftime('%J', {})", # YYYY-MM-DD 'month': "CAST(strftime('%m', {}) AS integer)", 'minute': "CAST(strftime('%M', {}) AS integer)", 'dow': "CAST(strftime('%w', {}) AS integer)", # day of week 0 = sunday 'week': "CAST(strftime('%W', {}) AS integer)", 'year': "CAST(strftime('%Y', {}) AS integer)" } for k, v in field_kwargs.items(): fstrs.append([k_opts[k].format(self._normalize_name(field_name)), self.val_placeholder, v]) return fstrs def _normalize_sort_SQL(self, field_name, field_vals, sort_dir_str): """ allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by """ fvi = None if sort_dir_str == 'ASC': fvi = (t for t in enumerate(field_vals)) else: fvi = (t for t in enumerate(reversed(field_vals))) query_sort_str = [' CASE {}'.format(self._normalize_name(field_name))] query_args = [] for i, v in fvi: query_sort_str.append(' WHEN {} THEN {}'.format(self.val_placeholder, i)) query_args.append(v) query_sort_str.append(' END') query_sort_str = "\n".join(query_sort_str) return query_sort_str, query_args def _normalize_bounds_SQL(self, bounds, sql_options): offset = bounds.offset if sql_options.get('one_query', False): limit = 1 else: limit, offset = bounds.get() if not bounds.has_limit(): limit = -1 return 'LIMIT {} OFFSET {}'.format( limit, offset )
# Copyright 2009 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. from array import array from copy import copy from struct import calcsize from whoosh.compat import BytesIO, bytes_type from whoosh.compat import dump as dump_pickle from whoosh.compat import load as load_pickle from whoosh.compat import array_frombytes, array_tobytes from whoosh.system import _INT_SIZE, _SHORT_SIZE, _FLOAT_SIZE, _LONG_SIZE from whoosh.system import IS_LITTLE from whoosh.system import pack_byte, unpack_byte, pack_sbyte, unpack_sbyte from whoosh.system import pack_ushort, unpack_ushort from whoosh.system import pack_ushort_le, unpack_ushort_le from whoosh.system import pack_int, unpack_int, pack_uint, unpack_uint from whoosh.system import pack_uint_le, unpack_uint_le from whoosh.system import pack_long, unpack_long, pack_ulong, unpack_ulong from whoosh.system import pack_float, unpack_float from whoosh.util.varints import varint, read_varint from whoosh.util.varints import signed_varint, decode_signed_varint _SIZEMAP = dict((typecode, calcsize(typecode)) for typecode in "bBiIhHqQf") _ORDERMAP = {"little": "<", "big": ">"} _types = (("sbyte", "b"), ("ushort", "H"), ("int", "i"), ("long", "q"), ("float", "f")) # Main function class StructFile(object): """Returns a "structured file" object that wraps the given file object and provides numerous additional methods for writing structured data, such as "write_varint" and "write_long". """ def __init__(self, fileobj, name=None, onclose=None): self.file = fileobj self._name = name self.onclose = onclose self.is_closed = False self.is_real = hasattr(fileobj, "fileno") if self.is_real: self.fileno = fileobj.fileno def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._name) def __str__(self): return self._name def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() def __iter__(self): return iter(self.file) def raw_file(self): return self.file def read(self, *args, **kwargs): return self.file.read(*args, **kwargs) def readline(self, *args, **kwargs): return self.file.readline(*args, **kwargs) def write(self, *args, **kwargs): return self.file.write(*args, **kwargs) def tell(self, *args, **kwargs): return self.file.tell(*args, **kwargs) def seek(self, *args, **kwargs): return self.file.seek(*args, **kwargs) def truncate(self, *args, **kwargs): return self.file.truncate(*args, **kwargs) def flush(self): """Flushes the buffer of the wrapped file. This is a no-op if the wrapped file does not have a flush method. """ if hasattr(self.file, "flush"): self.file.flush() def close(self): """Closes the wrapped file. """ if self.is_closed: raise Exception("This file is already closed") if self.onclose: self.onclose(self) if hasattr(self.file, "close"): self.file.close() self.is_closed = True def subset(self, offset, length, name=None): from whoosh.filedb.compound import SubFile name = name or self._name return StructFile(SubFile(self.file, offset, length), name=name) def write_string(self, s): """Writes a string to the wrapped file. This method writes the length of the string first, so you can read the string back without having to know how long it was. """ self.write_varint(len(s)) self.write(s) def write_string2(self, s): self.write(pack_ushort(len(s)) + s) def write_string4(self, s): self.write(pack_int(len(s)) + s) def read_string(self): """Reads a string from the wrapped file. """ return self.read(self.read_varint()) def read_string2(self): l = self.read_ushort() return self.read(l) def read_string4(self): l = self.read_int() return self.read(l) def get_string2(self, pos): l = self.get_ushort(pos) base = pos + _SHORT_SIZE return self.get(base, l), base + l def get_string4(self, pos): l = self.get_int(pos) base = pos + _INT_SIZE return self.get(base, l), base + l def skip_string(self): l = self.read_varint() self.seek(l, 1) def write_varint(self, i): """Writes a variable-length unsigned integer to the wrapped file. """ self.write(varint(i)) def write_svarint(self, i): """Writes a variable-length signed integer to the wrapped file. """ self.write(signed_varint(i)) def read_varint(self): """Reads a variable-length encoded unsigned integer from the wrapped file. """ return read_varint(self.read) def read_svarint(self): """Reads a variable-length encoded signed integer from the wrapped file. """ return decode_signed_varint(read_varint(self.read)) def write_tagint(self, i): """Writes a sometimes-compressed unsigned integer to the wrapped file. This is similar to the varint methods but uses a less compressed but faster format. """ # Store numbers 0-253 in one byte. Byte 254 means "an unsigned 16-bit # int follows." Byte 255 means "An unsigned 32-bit int follows." if i <= 253: self.write(chr(i)) elif i <= 65535: self.write("\xFE" + pack_ushort(i)) else: self.write("\xFF" + pack_uint(i)) def read_tagint(self): """Reads a sometimes-compressed unsigned integer from the wrapped file. This is similar to the varint methods but uses a less compressed but faster format. """ tb = ord(self.read(1)) if tb == 254: return self.read_ushort() elif tb == 255: return self.read_uint() else: return tb def write_byte(self, n): """Writes a single byte to the wrapped file, shortcut for ``file.write(chr(n))``. """ self.write(pack_byte(n)) def read_byte(self): return ord(self.read(1)) def write_pickle(self, obj, protocol=-1): """Writes a pickled representation of obj to the wrapped file. """ dump_pickle(obj, self.file, protocol) def read_pickle(self): """Reads a pickled object from the wrapped file. """ return load_pickle(self.file) def write_sbyte(self, n): self.write(pack_sbyte(n)) def write_int(self, n): self.write(pack_int(n)) def write_uint(self, n): self.write(pack_uint(n)) def write_uint_le(self, n): self.write(pack_uint_le(n)) def write_ushort(self, n): self.write(pack_ushort(n)) def write_ushort_le(self, n): self.write(pack_ushort_le(n)) def write_long(self, n): self.write(pack_long(n)) def write_ulong(self, n): self.write(pack_ulong(n)) def write_float(self, n): self.write(pack_float(n)) def write_array(self, arry): if IS_LITTLE: arry = copy(arry) arry.byteswap() if self.is_real: arry.tofile(self.file) else: self.write(array_tobytes(arry)) def read_sbyte(self): return unpack_sbyte(self.read(1))[0] def read_int(self): return unpack_int(self.read(_INT_SIZE))[0] def read_uint(self): return unpack_uint(self.read(_INT_SIZE))[0] def read_uint_le(self): return unpack_uint_le(self.read(_INT_SIZE))[0] def read_ushort(self): return unpack_ushort(self.read(_SHORT_SIZE))[0] def read_ushort_le(self): return unpack_ushort_le(self.read(_SHORT_SIZE))[0] def read_long(self): return unpack_long(self.read(_LONG_SIZE))[0] def read_ulong(self): return unpack_ulong(self.read(_LONG_SIZE))[0] def read_float(self): return unpack_float(self.read(_FLOAT_SIZE))[0] def read_array(self, typecode, length): a = array(typecode) if self.is_real: a.fromfile(self.file, length) else: array_frombytes(a, self.read(length * _SIZEMAP[typecode])) if IS_LITTLE: a.byteswap() return a def get(self, position, length): self.seek(position) return self.read(length) def get_byte(self, position): return unpack_byte(self.get(position, 1))[0] def get_sbyte(self, position): return unpack_sbyte(self.get(position, 1))[0] def get_int(self, position): return unpack_int(self.get(position, _INT_SIZE))[0] def get_uint(self, position): return unpack_uint(self.get(position, _INT_SIZE))[0] def get_ushort(self, position): return unpack_ushort(self.get(position, _SHORT_SIZE))[0] def get_long(self, position): return unpack_long(self.get(position, _LONG_SIZE))[0] def get_ulong(self, position): return unpack_ulong(self.get(position, _LONG_SIZE))[0] def get_float(self, position): return unpack_float(self.get(position, _FLOAT_SIZE))[0] def get_array(self, position, typecode, length): self.seek(position) return self.read_array(typecode, length) class BufferFile(StructFile): def __init__(self, buf, name=None, onclose=None): self._buf = buf self._name = name self.file = BytesIO(buf) self.onclose = onclose self.is_real = False self.is_closed = False def subset(self, position, length, name=None): name = name or self._name return BufferFile(self.get(position, length), name=name) def get(self, position, length): return bytes_type(self._buf[position:position + length]) def get_array(self, position, typecode, length): a = array(typecode) array_frombytes(a, self.get(position, length * _SIZEMAP[typecode])) if IS_LITTLE: a.byteswap() return a class ChecksumFile(StructFile): def __init__(self, *args, **kwargs): StructFile.__init__(self, *args, **kwargs) self._check = 0 self._crc32 = __import__("zlib").crc32 def __iter__(self): for line in self.file: self._check = self._crc32(line, self._check) yield line def seek(self, *args): raise Exception("Cannot seek on a ChecksumFile") def read(self, *args, **kwargs): b = self.file.read(*args, **kwargs) self._check = self._crc32(b, self._check) return b def write(self, b): self._check = self._crc32(b, self._check) self.file.write(b) def checksum(self): return self._check & 0xffffffff
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VirtualNetworksOperations: """VirtualNetworksOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2019_08_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _delete_initial( self, resource_group_name: str, virtual_network_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore async def begin_delete( self, resource_group_name: str, virtual_network_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes the specified virtual network. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore async def get( self, resource_group_name: str, virtual_network_name: str, expand: Optional[str] = None, **kwargs: Any ) -> "_models.VirtualNetwork": """Gets the specified virtual network by resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param expand: Expands referenced resources. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualNetwork, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_08_01.models.VirtualNetwork :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, virtual_network_name: str, parameters: "_models.VirtualNetwork", **kwargs: Any ) -> "_models.VirtualNetwork": cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'VirtualNetwork') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('VirtualNetwork', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore async def begin_create_or_update( self, resource_group_name: str, virtual_network_name: str, parameters: "_models.VirtualNetwork", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualNetwork"]: """Creates or updates a virtual network in the specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param parameters: Parameters supplied to the create or update virtual network operation. :type parameters: ~azure.mgmt.network.v2019_08_01.models.VirtualNetwork :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_08_01.models.VirtualNetwork] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore async def _update_tags_initial( self, resource_group_name: str, virtual_network_name: str, parameters: "_models.TagsObject", **kwargs: Any ) -> "_models.VirtualNetwork": cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._update_tags_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore async def begin_update_tags( self, resource_group_name: str, virtual_network_name: str, parameters: "_models.TagsObject", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualNetwork"]: """Updates a virtual network tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param parameters: Parameters supplied to update virtual network tags. :type parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_08_01.models.VirtualNetwork] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._update_tags_initial( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore def list_all( self, **kwargs: Any ) -> AsyncIterable["_models.VirtualNetworkListResult"]: """Gets all virtual networks in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.VirtualNetworkListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_all.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore def list( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualNetworkListResult"]: """Gets all virtual networks in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.VirtualNetworkListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore async def check_ip_address_availability( self, resource_group_name: str, virtual_network_name: str, ip_address: str, **kwargs: Any ) -> "_models.IPAddressAvailabilityResult": """Checks whether a private IP address is available for use. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :param ip_address: The private IP address to be verified. :type ip_address: str :keyword callable cls: A custom type or function that will be passed the direct response :return: IPAddressAvailabilityResult, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_08_01.models.IPAddressAvailabilityResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.IPAddressAvailabilityResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" # Construct URL url = self.check_ip_address_availability.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str') query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore def list_usage( self, resource_group_name: str, virtual_network_name: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualNetworkListUsageResult"]: """Lists usage stats. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_network_name: The name of the virtual network. :type virtual_network_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.VirtualNetworkListUsageResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListUsageResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-08-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_usage.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
"""Copyright 2008 Orbitz WorldWide Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.""" import sys, os, urllib, time, traceback, cgi, re, socket from cPickle import load,dump from itertools import chain from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from website.graphite.util import getProfile, getProfileByUsername from website.graphite.logger import log from website.graphite.account.models import Profile, MyGraph, Variable, View, Window #Utility functions def printException(): out = "<pre style='color: red'>" out += traceback.format_exc() out += "</pre>" return stdout(out) def stdout(text,lineBreak=True): text = text.replace('"',"'") text = text.replace('\n','<br/>') br = '' if lineBreak: br = "<br/>" return """$('output').innerHTML += "%s%s"; """ % (text,br) def stderr(text): return """$('output').innerHTML += "<font color='red'><b>%s</b></font><br/>"; """ % text.replace('"',"'") #Commands def _set(request,name,value): profile = getProfile(request) try: variable = profile.variable_set.get(name=name) variable.value = value except ObjectDoesNotExist: variable = Variable(profile=profile,name=name,value=value) variable.save() return '' def _unset(request,name): profile = getProfile(request) try: variable = profile.variable_set.get(name=name) variable.delete() except ObjectDoesNotExist: return stderr("Unknown variable %s" % name) return '' def _echo(request,args): return stdout(args) def _vars(request): profile = getProfile(request) out = '<font color="#77ddcc">' for variable in profile.variable_set.all(): out += '%s = %s<br/>' % (variable.name,variable.value) out += '</font>' return stdout(out) def _clear(request): return "$('output').innerHTML = '';\n" def _create(request,window): out = '' w = window.replace('.', '_') #Basic window creation out += "%s_win = new Window('%s_win', {title: '%s',width: 350, height: 225, maximizable: false});\n" % (w,w,w) out += "center = Builder.node( 'center', [Builder.node('img', {id: '%s_img',src: '/media/graphite/img/graphite.png'} )] );\n" % w out += "%s_win.getContent().appendChild( center );\n" % w out += "%s_win.setDestroyOnClose();\n" % w out += "%s_win.showCenter();\n" % w #Useful redraw function out += "function %s_redraw() {\n" % w out += " if (window.%s_timer) { clearTimeout(window.%s_timer); }\n" % (w,w) out += " img = $('%s_img');\n" % w out += " if (!img) { return false; }\n" out += " url = img.src;\n" out += " i = url.indexOf('&uniq=');\n" out += " if (i == -1) {\n" out += " url += '&uniq=' + Math.random();\n" out += " } else {\n" out += " url = url.replace(/&uniq=[^&]+/,'&uniq=' + Math.random());\n" out += " }\n" out += " img.src = url;\n" out += " window.%s_timer = setTimeout('window.%s_redraw()', window.%s_interval);\n" % (w,w,w) out += "}\n" out += "window.%s_redraw = %s_redraw;\n" % (w,w) return out def _draw(request,targets,_from=None,until=None,template=None,window=None,interval=None): out = '' params = [ ('target',t) for t in targets ] if _from: params.append( ('from',_from) ) if until: params.append( ('until',until) ) if template: params.append( ('template',template) ) url = '/graphite/render?' + urllib.urlencode(params) if window: w = window out += "win = %s_win;\n" % w out += "img_id = '%s_img';\n" % w out += "img = $(img_id);\n" out += "if (!win) {\n" out += " alert('No such window %s');\n" % w out += "} else {\n" out += " url = '%s';\n" % url out += " size = win.getSize();\n" out += " if (size['height'] < 100 || size['width'] < 100) {\n" out += " alert('Window is too small!');\n" out += " } else {\n" out += " url += '&height=' + (size['height']) + '&' + 'width=' + (size['width']);\n" out += " window.changeImage(win,url);\n" out += " }\n" out += "}\n" if interval: i = int(interval) out += "window.%s_interval = %d * 60000;\n" % (w,i) out += "window.%s_timer = setTimeout('window.%s_redraw()', window.%s_interval);\n" % (w,w,w) else: return stdout("<img src='%s' onload='scrollBy(0,this.height + 1000);'>" % url) return out def _redraw(request,window,interval): out = '' w = window i = int(interval) out += "img = $('%s_img');\n" % w out += "if (!img) {\n" out += " alert('No such window %s');\n" % w out += "} else {\n" out += " if (window.%s_timer) { clearTimeout(window.%s_timer); }\n" % (w,w) out += " window.%s_interval = %d * 60000;\n" % (w,i) out += " window.%s_timer = setTimeout('window.%s_redraw()', window.%s_interval);\n" % (w,w,w) out += "}\n" return out def _email(request,window,addressList): out = '' w = window addrList = ','.join(addressList) params = { 'commandInput' : 'doemail', 'recipients' : addrList, 'title' : w} paramStr = urllib.urlencode(params) out += "img = $('%s_img');\n" % w out += "if (!img) {\n" out += " alert('No such window %s');\n" % w out += "} else {\n" out += " url = img.src;\n" out += " params = '%s' + '&url=' + escape(url);\n" % paramStr out += " emailreq = new Ajax.Request('/graphite/cli/eval', {method: 'get', parameters: params, onException: handleException, onComplete: handleResponse});\n" out += "}\n" return out def _doemail(request): cgiParams = request.GET assert 'recipients' in cgiParams and 'url' in cgiParams and 'title' in cgiParams, "Incomplete doemail, requires recipients, url, and title" import smtplib, httplib, urlparse from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText from email.MIMEImage import MIMEImage url = cgiParams['url'] title = cgiParams['title'] recipients = cgiParams['recipients'].split(',') proto, server, path, query, frag = urlparse.urlsplit(url) if query: path += '?' + query conn = httplib.HTTPConnection(server) conn.request('GET',path) resp = conn.getresponse() assert resp.status == 200, "Failed HTTP response %s %s" % (resp.status, resp.reason) rawData = resp.read() conn.close() message = MIMEMultipart() message['Subject'] = "Graphite Image" message['To'] = ', '.join(recipients) message['From'] = 'frontend@%s' % socket.gethostname() text = MIMEText( "Image generated by the following graphite URL at %s\r\n\r\n%s" % (time.ctime(),url) ) image = MIMEImage( rawData ) image.add_header('Content-Disposition', 'attachment', filename=title + time.strftime("_%b%d_%I%M%p.png")) message.attach(text) message.attach(image) server = smtplib.SMTP(settings.SMTP_SERVER) server.sendmail('frontend@%s' % socket.gethostname(),recipients,message.as_string()) server.quit() return stdout("Successfully sent %s to %s" % (url,cgiParams['recipients'])) def _code(request,code): return code def _url(request,window): out = '' w = window out += "img = $('%s_img');\n" % w out += "if (!img) {\n" out += " alert('No such window %s');\n" % w out += "} else {\n" out += " url = img.src;\n" out += " $('output').innerHTML += '%s URL is ' + url + '<br/>';\n" % w out += "}\n" return out def _help(request): return "window.open('%s','doc');" % settings.DOCUMENTATION_URL def _change(request,window,var,value): out = '' out += "function changeWindow(win) {\n" out += " var img = $(win + '_img');\n" out += " if (!img) {\n" out += " alert('No such window ' + win);\n" out += " } else {\n" out += " var url = new String(img.src);\n" out += " var i = url.indexOf('?');\n" out += " if (i == -1) {\n" out += " alert('Invalid url in image! url=' + url);\n" out += " } else {\n" out += " var base = url.substring(0,i);\n" out += " var qs = url.substring(i+1,url.length+1);\n" out += " var found = false;\n" out += " var pairs = qs.split('&').collect( function(pair) {\n" out += " var p = pair.split('=');\n" out += " if (p[0] == '%s') {\n" % var out += " found = true;\n" out += " return p[0] + '=' + escape('%s');\n" % value out += " }\n" out += " return pair;\n" out += " });\n" out += " var newqs = pairs.join('&');\n" out += " if (!found) { newqs += '&%s=' + escape('%s'); }\n" % (var,value) out += " img.src = base + '?' + newqs;\n" out += " }\n" out += " }\n" out += "}\n" if window == '*': out += "Windows.windows.each( function(winObject) {\n" out += " var name = winObject.getId().replace('_win','');\n" out += " changeWindow(name);\n" out += "});\n" else: out += "changeWindow('%s');" % window return out def _add(request,target,window): out = '' out += "img = $('%s_img');\n" % window out += "if (!img) {\n" out += " alert('No such window %s');\n" % window out += "} else {\n" out += " if (img.src.indexOf('/graphite/render') == -1) {\n" out += " img.src = '/graphite/render?';\n" out += " }\n" out += " img.src = img.src + '&target=' + encodeURIComponent('%s');\n" % target out += "}\n" return out def _remove(request,target,window): out = '' out += "img = $('%s_img');\n" % window out += "if (!img) {\n" out += " alert('No such window %s');\n" % window out += "} else {\n" out += " var url = new String(img.src);\n" out += " var beginningTarget = '?target=' + encodeURIComponent('%s');\n" % target out += " var newurl = url.replace(beginningTarget,'?');\n" out += " var laterTarget = '&target=' + escape('%s');\n" % target out += " newurl = newurl.replace(laterTarget,'');\n" out += " img.src = newurl;\n" out += "}\n" return out def _find(request,pattern): pattern = pattern.strip() r = re.compile(pattern,re.I) out = '' found = 0 displayMax = 100 rrdIndex = open(settings.STORAGE_DIR + '/rrd_index') wspIndex = open(settings.STORAGE_DIR + '/wsp_index') for line in chain(wspIndex,rrdIndex): if r.search(line): found += 1 if found <= displayMax: out += line.replace('/','.') if found >= displayMax: out += '<font color="red">Displaying %d out of %d matches, try refining your search</font>' % (displayMax,found) else: out += 'Found %d matches' % found return stdout(out) def _save(request,view): if not settings.ALLOW_ANONYMOUS_CLI and not request.user.is_authenticated(): return stderr("You must be logged in to use this functionality.") out = '' out += "allParams = {};\n" out += "Windows.windows.each( function(winObject) {\n" out += " name = winObject.getId().replace('_win','');\n" out += " winElement = $(name + '_win');\n" out += " img_id = name + '_img';\n" out += " img = $(img_id);\n" out += " url = img.src;\n" out += " _top = winElement.style.top\n" out += " left = winElement.style.left\n" out += " size = winObject.getSize();\n" out += " width = size.width;\n" out += " height = size.height;\n" out += " myParams = 'top=' + _top + '&left=' + left + '&width=' + width + '&height=' + height + '&url=' + escape(url);\n" out += " if (window[name+'_interval']) { myParams += '&interval=' + window[name+'_interval']; }\n" out += " allParams[name] = escape(myParams);\n" out += "});\n" out += "if (allParams) {\n" out += " queryString = 'commandInput=dosave%%20%s&' + $H(allParams).toQueryString();\n" % view out += " savereq = new Ajax.Request('/graphite/cli/eval', {method: 'get', parameters: queryString, onException: handleException, onComplete: handleResponse});\n" out += "}\n" return out def _dosave(request,viewName): profile = getProfile(request) #First find our View log.info("Saving view '%s' under profile '%s'" % (viewName,profile.user.username)) try: view = profile.view_set.get(name=viewName) except ObjectDoesNotExist: view = View(profile=profile,name=viewName) view.save() #Now re-associate the view with the correct Windows view.window_set.all().delete() for windowName,encodedString in request.GET.items(): try: if windowName in ('_','commandInput'): continue paramString = urllib.unquote_plus(encodedString) queryParams = cgi.parse_qs(paramString) modelParams = {} for key,value in queryParams.items(): #Clean up the window params key = str(key) value = str(value[0]) if key in ('top','left'): value = int(float( value.replace('px','') )) if key in ('width','height','interval'): value = int(float(value)) modelParams[key] = value if 'interval' not in modelParams: modelParams['interval'] = None win = Window(view=view,name=windowName,**modelParams) win.save() except: log.exception("Failed to process parameters for window '%s'" % windowName) return stdout('Saved view %s' % viewName) def _load(request,viewName,above=None): if above: out = stdout("Loading view %s above the current view" % viewName) else: out = stdout("Loading view %s" % viewName) profile = getProfile(request) try: view = profile.view_set.get(name=viewName) except ObjectDoesNotExist: return stderr("Unknown view %s" % viewName) if not above: out += "Windows.windows.each( function(w) {w.destroy();} );" for window in view.window_set.all(): out += _create(request,window.name) out += "win = %s_win;" % window.name out += "$('%s_img').src = '%s';" % (window.name,window.url) out += "win.show();" out += "win.setLocation(%d,%d);" % (window.top,window.left) out += "win.setSize(%d,%d);" % (window.width,window.height) if window.interval: out += "window.%s_interval = %d;" % (window.name,window.interval) out += "window.%s_timer = setTimeout('window.%s_redraw()', window.%s_interval);" % ((window.name,) * 3) return out def _gsave(request,graphName): profile = getProfile(request,allowDefault=False) if not profile: return stderr("You must be logged in to save graphs") out = "img = $('%s_img');\n" % graphName out += "if (!img) {\n" out += " alert('No such window');\n" out += "} else {\n" out += " queryString = 'commandInput=dogsave%%20%s&url=' + escape(img.src);\n" % graphName out += " savereq = new Ajax.Request('/graphite/cli/eval', {method: 'get', parameters: queryString, onException: handleException, onComplete: handleResponse});\n" out += "}\n" return out def _dogsave(request,graphName): profile = getProfile(request,allowDefault=False) if not profile: return stderr("You must be logged in to save graphs") url = request.GET.get('url') if not url: return stderr("No url specified!") try: existingGraph = profile.mygraph_set.get(name=graphName) existingGraph.url = url existingGraph.save() except ObjectDoesNotExist: try: newGraph = MyGraph(profile=profile,name=graphName,url=url) newGraph.save() except: log.exception("Failed to create new MyGraph in _dogsave(), graphName=%s" % graphName) return stderr("Failed to save graph %s" % graphName) return stdout("Saved graph %s" % graphName) def _gload(request,user=None,graphName=None): if not user: profile = getProfile(request,allowDefault=False) if not profile: return stderr("You are not logged in so you must specify a username") else: try: profile = getProfileByUsername(user) except ObjectDoesNotExist: return stderr("User does not exist") try: myGraph = profile.mygraph_set.get(name=graphName) except ObjectDoesNotExist: return stderr("Graph does not exist") out = _create(request,myGraph.name) out += "changeImage(%s_win,'%s');\n" % (myGraph.name.replace('.', '_'), myGraph.url) return out def _graphs(request,user=None): if not user: profile = getProfile(request,allowDefault=False) if not profile: return stderr("You are not logged in so you must specify a username") else: try: profile = getProfileByUsername(user) except ObjectDoesNotExist: return stderr("User does not exist") out = "" if user: prefix = "~%s/" % user else: prefix = "" for graph in profile.mygraph_set.all(): out += stdout(prefix + graph.name) return out def _views(request): out = '' profile = getProfile(request) for view in profile.view_set.all(): windowList = ','.join([window.name for window in view.window_set.all()]) out += stdout("%s: %s" % (view.name,windowList)) return out def _rmview(request,viewName): profile = getProfile(request) try: view = profile.view_set.get(name=viewName) except ObjectDoesNotExist: return stderr("No such view '%s'" % viewName) view.delete() return stdout("Deleted view %s" % viewName) def _rmgraph(request,graphName): profile = getProfile(request,allowDefault=False) try: graph = profile.mygraph_set.get(name=graphName) except ObjectDoesNotExist: return stderr("No such graph %s" % graphName) graph.delete() return stdout("Deleted graph %s" % graphName) def _compose(request,window): out = "var url = $('%s_img').src;\n" % window out += "var re = /target=([^&]+)/;\n" out += "if ( url.match(re) == null ) {\n" out += " alert('Image has no targets!');\n" out += "} else {\n" out += " composerURL = '/?' + url.substr(url.indexOf('?') + 1);\n"; out += " composerWin = window.open(composerURL, 'GraphiteComposer');\n" out += stdout('A new composer window has been opened.') #out += " var i = 0;" #out += " var m = true;\n" #out += " while ( m = url.substr(i).match(re) ) {\n" #out += " setTimeout(\"composerWin.Composer.toggleTarget('\" + m[1] + \"')\",2500);\n" #out += " i += m.index + m[1].length;\n" #out += " }\n" out += "}\n" return out def _login(request): if request.user.is_authenticated(): return stderr("You are already logged in as %s" % request.user.username) else: return "window.location = '/graphite/account/login/?nextPage=' + encodeURIComponent('/graphite/cli/');" def _logout(request): if not request.user.is_authenticated(): return stderr("You are not logged in!") else: return "window.location = '/graphite/account/logout/?nextPage=' + encodeURIComponent('/graphite/cli/');" def _id(request): if request.user.is_authenticated(): return stdout("You are logged in as %s" % request.user.username) else: return stdout("You are not logged in.") _whoami = _id
"""Miscellaneous stuff that doesn't really fit anywhere else.""" from __future__ import print_function, division import sys import os import re as _re from textwrap import fill, dedent from sympy.core.compatibility import get_function_name, range def filldedent(s, w=70): """ Strips leading and trailing empty lines from a copy of `s`, then dedents, fills and returns it. Empty line stripping serves to deal with docstrings like this one that start with a newline after the initial triple quote, inserting an empty line at the beginning of the string.""" return '\n' + fill(dedent(str(s)).strip('\n'), width=w) def rawlines(s): """Return a cut-and-pastable string that, when printed, is equivalent to the input. The string returned is formatted so it can be indented nicely within tests; in some cases it is wrapped in the dedent function which has to be imported from textwrap. Examples ======== Note: because there are characters in the examples below that need to be escaped because they are themselves within a triple quoted docstring, expressions below look more complicated than they would be if they were printed in an interpreter window. >>> from sympy.utilities.misc import rawlines >>> from sympy import TableForm >>> s = str(TableForm([[1, 10]], headings=(None, ['a', 'bee']))) >>> print(rawlines(s)) # the \\ appears as \ when printed ( 'a bee\\n' '-----\\n' '1 10 ' ) >>> print(rawlines('''this ... that''')) dedent('''\\ this that''') >>> print(rawlines('''this ... that ... ''')) dedent('''\\ this that ''') >>> s = \"\"\"this ... is a triple ''' ... \"\"\" >>> print(rawlines(s)) dedent(\"\"\"\\ this is a triple ''' \"\"\") >>> print(rawlines('''this ... that ... ''')) ( 'this\\n' 'that\\n' ' ' ) """ lines = s.split('\n') if len(lines) == 1: return repr(lines[0]) triple = ["'''" in s, '"""' in s] if any(li.endswith(' ') for li in lines) or '\\' in s or all(triple): rv = ["("] # add on the newlines trailing = s.endswith('\n') last = len(lines) - 1 for i, li in enumerate(lines): if i != last or trailing: rv.append(repr(li)[:-1] + '\\n\'') else: rv.append(repr(li)) return '\n '.join(rv) + '\n)' else: rv = '\n '.join(lines) if triple[0]: return 'dedent("""\\\n %s""")' % rv else: return "dedent('''\\\n %s''')" % rv size = getattr(sys, "maxint", None) if size is None: # Python 3 doesn't have maxint size = sys.maxsize if size > 2**32: ARCH = "64-bit" else: ARCH = "32-bit" # XXX: PyPy doesn't support hash randomization HASH_RANDOMIZATION = getattr(sys.flags, 'hash_randomization', False) _debug_tmp = [] _debug_iter = 0 def debug_decorator(func): """If SYMPY_DEBUG is True, it will print a nice execution tree with arguments and results of all decorated functions, else do nothing. """ from sympy import SYMPY_DEBUG if not SYMPY_DEBUG: return func def maketree(f, *args, **kw): global _debug_tmp global _debug_iter oldtmp = _debug_tmp _debug_tmp = [] _debug_iter += 1 def tree(subtrees): def indent(s, type=1): x = s.split("\n") r = "+-%s\n" % x[0] for a in x[1:]: if a == "": continue if type == 1: r += "| %s\n" % a else: r += " %s\n" % a return r if len(subtrees) == 0: return "" f = [] for a in subtrees[:-1]: f.append(indent(a)) f.append(indent(subtrees[-1], 2)) return ''.join(f) # If there is a bug and the algorithm enters an infinite loop, enable the # following lines. It will print the names and parameters of all major functions # that are called, *before* they are called #from sympy.core.compatibility import reduce #print("%s%s %s%s" % (_debug_iter, reduce(lambda x, y: x + y, \ # map(lambda x: '-', range(1, 2 + _debug_iter))), get_function_name(f), args)) r = f(*args, **kw) _debug_iter -= 1 s = "%s%s = %s\n" % (get_function_name(f), args, r) if _debug_tmp != []: s += tree(_debug_tmp) _debug_tmp = oldtmp _debug_tmp.append(s) if _debug_iter == 0: print((_debug_tmp[0])) _debug_tmp = [] return r def decorated(*args, **kwargs): return maketree(func, *args, **kwargs) return decorated def debug(*args): """ Print ``*args`` if SYMPY_DEBUG is True, else do nothing. """ from sympy import SYMPY_DEBUG if SYMPY_DEBUG: print(*args, file=sys.stderr) def find_executable(executable, path=None): """Try to find 'executable' in the directories listed in 'path' (a string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']). Returns the complete filename or None if not found """ if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) extlist = [''] if os.name == 'os2': (base, ext) = os.path.splitext(executable) # executable files on OS/2 can have an arbitrary extension, but # .exe is automatically appended if no dot is present in the name if not ext: executable = executable + ".exe" elif sys.platform == 'win32': pathext = os.environ['PATHEXT'].lower().split(os.pathsep) (base, ext) = os.path.splitext(executable) if ext.lower() not in pathext: extlist = pathext for ext in extlist: execname = executable + ext if os.path.isfile(execname): return execname else: for p in paths: f = os.path.join(p, execname) if os.path.isfile(f): return f else: return None def func_name(x): '''Return function name of `x` (if defined) else the `type(x)`. See Also ======== sympy.core.compatibility get_function_name ''' return getattr(getattr(x, 'func', x), '__name__', type(x)) def _replace(reps): """Return a function that can make the replacements, given in ``reps``, on a string. The replacements should be given as mapping. Examples ======== >>> from sympy.utilities.misc import _replace >>> f = _replace(dict(foo='bar', d='t')) >>> f('food') 'bart' >>> f = _replace({}) >>> f('food') 'food' """ if not reps: return lambda x: x D = lambda match: reps[match.group(0)] pattern = _re.compile("|".join( [_re.escape(k) for k, v in reps.items()]), _re.M) return lambda string: pattern.sub(D, string) def replace(string, *reps): """Return ``string`` with all keys in ``reps`` replaced with their corresponding values, longer strings first, irrespective of the order they are given. ``reps`` may be passed as tuples or a single mapping. Examples ======== >>> from sympy.utilities.misc import replace >>> replace('foo', {'oo': 'ar', 'f': 'b'}) 'bar' >>> replace("spamham sha", ("spam", "eggs"), ("sha","md5")) 'eggsham md5' There is no guarantee that a unique answer will be obtained if keys in a mapping overlap (i.e. are the same length and have some identical sequence at the beginning/end): >>> reps = [ ... ('ab', 'x'), ... ('bc', 'y')] >>> replace('abc', *reps) in ('xc', 'ay') True References ========== .. [1] http://stackoverflow.com/questions/6116978/python-replace-multiple-strings """ if len(reps) == 1: kv = reps[0] if type(kv) is dict: reps = kv else: return string.replace(*kv) else: reps = dict(reps) return _replace(reps)(string) def translate(s, a, b=None, c=None): """Return ``s`` where characters have been replaced or deleted. SYNTAX ====== translate(s, None, deletechars): all characters in ``deletechars`` are deleted translate(s, map [,deletechars]): all characters in ``deletechars`` (if provided) are deleted then the replacements defined by map are made; if the keys of map are strings then the longer ones are handled first. Multicharacter deletions should have a value of ''. translate(s, oldchars, newchars, deletechars) all characters in ``deletechars`` are deleted then each character in ``oldchars`` is replaced with the corresponding character in ``newchars`` Examples ======== >>> from sympy.utilities.misc import translate >>> from sympy.core.compatibility import unichr >>> abc = 'abc' >>> translate(abc, None, 'a') 'bc' >>> translate(abc, {'a': 'x'}, 'c') 'xb' >>> translate(abc, {'abc': 'x', 'a': 'y'}) 'x' >>> translate('abcd', 'ac', 'AC', 'd') 'AbC' There is no guarantee that a unique answer will be obtained if keys in a mapping overlap are the same length and have some identical sequences at the beginning/end: >>> translate(abc, {'ab': 'x', 'bc': 'y'}) in ('xc', 'ay') True """ from sympy.core.compatibility import maketrans # when support for Python 2 is dropped, this try/except can be #removed try: ''.translate(None, '') py3 = False except TypeError: py3 = True mr = {} if a is None: assert c is None if not b: return s c = b a = b = '' else: if type(a) is dict: short = {} for k in list(a.keys()): if (len(k) == 1 and len(a[k]) == 1): short[k] = a.pop(k) mr = a c = b if short: a, b = [''.join(i) for i in list(zip(*short.items()))] else: a = b = '' else: assert len(a) == len(b) if py3: if c: s = s.translate(maketrans('', '', c)) s = replace(s, mr) return s.translate(maketrans(a, b)) else: # when support for Python 2 is dropped, this if-else-block # can be replaced with the if-clause if c: c = list(c) rem = {} for i in range(-1, -1 - len(c), -1): if ord(c[i]) > 255: rem[c[i]] = '' c.pop(i) s = s.translate(None, ''.join(c)) s = replace(s, rem) if a: a = list(a) b = list(b) for i in range(-1, -1 - len(a), -1): if ord(a[i]) > 255 or ord(b[i]) > 255: mr[a.pop(i)] = b.pop(i) a = ''.join(a) b = ''.join(b) s = replace(s, mr) table = maketrans(a, b) # s may have become unicode which uses the py3 syntax for translate if type(table) is str and type(s) is str: s = s.translate(table) else: s = s.translate(dict( [(i, ord(c)) for i, c in enumerate(table)])) return s
#!/usr/bin/env python import gevent import gevent.monkey gevent.monkey.patch_all() import json from flask import Flask, render_template, session, request from flask_socketio import SocketIO, emit, join_room, leave_room, \ close_room, rooms, disconnect import sys import argparse parser = argparse.ArgumentParser(description='Timing Server') parser.add_argument('--mock', dest='mock', action='store_true', default=False, help="use mock data for testing") args = parser.parse_args() sys.path.append('../delta5interface') if args.mock or sys.platform.lower().startswith('win'): from MockInterface import get_hardware_interface elif sys.platform.lower().startswith('linux'): from Delta5Interface import get_hardware_interface hardwareInterface = get_hardware_interface() # Set this variable to "threading", "eventlet" or "gevent" to test the # different async modes, or leave it set to None for the application to choose # the best option based on installed packages. async_mode = "gevent" app = Flask(__name__, static_url_path='/static') app.config['SECRET_KEY'] = 'secret!' socketio = SocketIO(app, async_mode=async_mode, cors_allowed_origins='*') heartbeat_thread = None firmware_version = {'major': 0, 'minor': 1} # LED Code import time from neopixel import * import signal def signal_handler(signal, frame): colorWipe(strip, Color(0,0,0)) sys.exit(0) # LED strip configuration: LED_COUNT = 150 # Number of LED pixels. #LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!). LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0). LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz) LED_DMA = 10 # DMA channel to use for generating signal (try 10) LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest LED_INVERT = False # True to invert the signal (when using NPN transistor level shift) LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53 LED_STRIP = ws.WS2811_STRIP_GRB # Strip type and colour ordering # LED one color ON/OFF def onoff(strip, color): for i in range(strip.numPixels()): strip.setPixelColor(i, color) strip.show() def theaterChase(strip, color, wait_ms=50, iterations=5): """Movie theater light style chaser animation.""" for j in range(iterations): for q in range(3): for i in range(0, strip.numPixels(), 3): strip.setPixelColor(i+q, color) strip.show() time.sleep(wait_ms/1000.0) for i in range(0, strip.numPixels(), 3): strip.setPixelColor(i+q, 0) def wheel(pos): """Generate rainbow colors across 0-255 positions.""" if pos < 85: return Color(pos * 3, 255 - pos * 3, 0) elif pos < 170: pos -= 85 return Color(255 - pos * 3, 0, pos * 3) else: pos -= 170 return Color(0, pos * 3, 255 - pos * 3) def rainbow(strip, wait_ms=2, iterations=1): """Draw rainbow that fades across all pixels at once.""" for j in range(256*iterations): for i in range(strip.numPixels()): strip.setPixelColor(i, wheel((i+j) & 255)) strip.show() time.sleep(wait_ms/1000.0) def rainbowCycle(strip, wait_ms=2, iterations=1): """Draw rainbow that uniformly distributes itself across all pixels.""" for j in range(256*iterations): for i in range(strip.numPixels()): strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255)) strip.show() time.sleep(wait_ms/1000.0) def theaterChaseRainbow(strip, wait_ms=25): """Rainbow movie theater light style chaser animation.""" for j in range(256): for q in range(3): for i in range(0, strip.numPixels(), 3): strip.setPixelColor(i+q, wheel((i+j) % 255)) strip.show() time.sleep(wait_ms/1000.0) for i in range(0, strip.numPixels(), 3): strip.setPixelColor(i+q, 0) # Create NeoPixel object with appropriate configuration. strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL, LED_STRIP) # Intialize the library (must be called once before other functions). strip.begin() def parse_json(data): if isinstance(data, basestring): return json.loads(data) return data @app.route('/') def index(): template_data = { } return render_template('index.html', async_mode=socketio.async_mode, **template_data) @app.route('/graphs') def graphs(): return render_template('graphs.html', async_mode=socketio.async_mode) @app.route('/rssi') def rssi(): return render_template('rssi.html', async_mode=socketio.async_mode) @socketio.on('connect') def connect_handler(): print ('connected!!'); hardwareInterface.start() global heartbeat_thread if (heartbeat_thread is None): heartbeat_thread = gevent.spawn(heartbeat_thread_function) @socketio.on('disconnect') def disconnect_handler(): print ('disconnected!!'); @socketio.on('get_version') def on_get_version(): return firmware_version @socketio.on('get_timestamp') def on_get_timestamp(): print('get_timestamp') return {'timestamp': hardwareInterface.milliseconds()} @socketio.on('get_settings') def on_get_settings(): print('get_settings') return hardwareInterface.get_settings_json() @socketio.on('set_frequency') def on_set_frequency(data): data = parse_json(data) print(data) index = data['node'] frequency = data['frequency'] hardwareInterface.set_frequency(index, frequency) emit('frequency_set', hardwareInterface.get_frequency_json(index), broadcast=True) @socketio.on('set_calibration_threshold') def on_set_calibration_threshold(data): data = parse_json(data) print(data) calibration_threshold = data['calibration_threshold'] hardwareInterface.set_calibration_threshold_global(calibration_threshold) emit('calibration_threshold_set', hardwareInterface.get_calibration_threshold_json(), broadcast=True) @socketio.on('set_calibration_offset') def on_set_calibration_offset(data): data = parse_json(data) print(data) calibration_offset = data['calibration_offset'] hardwareInterface.set_calibration_offset_global(calibration_offset) emit('calibration_offset_set', hardwareInterface.get_calibration_offset_json(), broadcast=True) @socketio.on('set_trigger_threshold') def on_set_trigger_threshold(data): data = parse_json(data) print(data) trigger_threshold = data['trigger_threshold'] hardwareInterface.set_trigger_threshold_global(trigger_threshold) emit('trigger_threshold_set', hardwareInterface.get_trigger_threshold_json(), broadcast=True) @socketio.on('set_filter_ratio') def on_set_filter_ratio(data): data = parse_json(data) print(data) filter_ratio = data['filter_ratio'] hardwareInterface.set_filter_ratio_global(filter_ratio) emit('filter_ratio_set', hardwareInterface.get_filter_ratio_json(), broadcast=True) # Keep this around for a bit.. old version of the api # @socketio.on('reset_auto_calibration') # def on_reset_auto_calibration(): # print('reset_auto_calibration all') # hardwareInterface.enable_calibration_mode(); @socketio.on('reset_auto_calibration') def on_reset_auto_calibration(data): # onoff(strip, Color(255,0,0)) #RED ON # time.sleep(0.5) # onoff(strip, Color(0,0,0)) #OFF # time.sleep(0.5) # onoff(strip, Color(255,0,0)) #RED ON # time.sleep(0.5) # onoff(strip, Color(0,0,0)) #OFF # time.sleep(0.5) # onoff(strip, Color(255,0,0)) #RED ON # time.sleep(0.5) # onoff(strip, Color(0,0,0)) #OFF data = parse_json(data) print(data) index = data['node'] if index == -1: print('reset_auto_calibration all') hardwareInterface.enable_calibration_mode() else: print('reset_auto_calibration {0}'.format(index)) hardwareInterface.set_calibration_mode(index, True) onoff(strip, Color(0,255,0)) #GREEN ON @socketio.on('simulate_pass') def on_simulate_pass(data): data = parse_json(data) index = data['node'] # todo: how should frequency be sent? emit('pass_record', {'node': index, 'frequency': hardwareInterface.nodes[index].frequency, 'timestamp': hardwareInterface.milliseconds()}, broadcast=True) @socketio.on('LED_solid') def on_LED_solid(data): '''LED Solid Color''' led_red = data['red'] led_green = data['green'] led_blue = data['blue'] onoff(strip, Color(led_red,led_green,led_blue)) @socketio.on('LED_chase') def on_LED_chase(data): '''LED Solid Color''' led_red = data['red'] led_green = data['green'] led_blue = data['blue'] theaterChase(strip, Color(led_red,led_green,led_blue)) @socketio.on('LED_RB') def on_LED_RB(): rainbow(strip) #Rainbow @socketio.on('LED_RBCYCLE') def on_LED_RBCYCLE(): rainbowCycle(strip) #Rainbow Cycle @socketio.on('LED_RBCHASE') def on_LED_RBCHASE(): theaterChaseRainbow(strip) #Rainbow Chase def pass_record_callback(node, ms_since_lap): print('Pass record from {0}{1}: {2}, {3}'.format(node.index, node.frequency, ms_since_lap, hardwareInterface.milliseconds() - ms_since_lap)) #TODO: clean this up socketio.emit('pass_record', { 'node': node.index, 'frequency': node.frequency, 'timestamp': hardwareInterface.milliseconds() - ms_since_lap, 'trigger_rssi': node.trigger_rssi, 'peak_rssi_raw': node.peak_rssi_raw, 'peak_rssi': node.peak_rssi}) if node.index==0: theaterChase(strip, Color(0,0,255)) #BLUE theater chase elif node.index==1: theaterChase(strip, Color(255,50,0)) #ORANGE theater chase elif node.index==2: theaterChase(strip, Color(255,0,60)) #PINK theater chase elif node.index==3: theaterChase(strip, Color(255,0,150)) #PURPLE theater chase elif node.index==4: theaterChase(strip, Color(255,255,0)) #YELLOW theater chase elif node.index==5: theaterChase(strip, Color(0,255,255)) #CYAN theater chase elif node.index==6: theaterChase(strip, Color(0,255,0)) #GREEN theater chase elif node.index==7: theaterChase(strip, Color(255,0,0)) #RED theater chase hardwareInterface.pass_record_callback = pass_record_callback def hardware_log_callback(message): print(message) socketio.emit('hardware_log', message) hardwareInterface.hardware_log_callback = hardware_log_callback def heartbeat_thread_function(): while True: socketio.emit('heartbeat', hardwareInterface.get_heartbeat_json()) gevent.sleep(0.5) if __name__ == '__main__': socketio.run(app, host='0.0.0.0', debug=True)
"Ideogenesis: Note pages." from __future__ import print_function, absolute_import import logging import tornado.web import ideogenesis from . import constants from . import neo4jdbi from . import saver from . import settings from . import utils from .requesthandler import RequestHandler class NoteSaver(saver.Saver): "Save the Note data from HTML form input." NODE_LABELS = set([constants.ANY, constants.NOTE]) def input_entry(self): try: iuid = self.rqh.get_argument('entry') self.entry = self.rqh.get_entry(iuid) if self.entry is None: raise ValueError except (tornado.web.MissingArgumentError, ValueError): raise ValueError('no entry provided') def input_title(self): try: title = self.rqh.get_argument('title') if not title: raise ValueError title_normalized = utils.normalize(title) if not title_normalized: raise ValueError except (tornado.web.MissingArgumentError, ValueError): raise ValueError('no title provided') self['title'] = title self['title_normalized'] = title_normalized def input_access(self): access = self.rqh.get_argument('access', default=constants.PRIVATE) self.public = access.lower() == constants.PUBLIC def input_text(self): self['text'] = self.rqh.get_argument('text', None) @property def node_labels(self): "Return the labels to use for a node." result = set(self.NODE_LABELS) if self.public: result.add(constants.PUBLIC) return result def after_created(self): self.rqh.neo4j.create_relationship(self.node, self.entry, constants.TARGET) self.rqh.neo4j.create_relationship(self.rqh.current_user, self.node, constants.OWNS) class NoteMixin(object): "Some useful methods for Note pages." def get_note_owner(self, note): "Get the user node for the owner of the note." statement = 'MATCH (n:Note {iuid:{iuid}})<-[:OWNS]-(u:User)' \ ' RETURN id(u)' nodes = self.neo4j.get_nodes_execute(statement, iuid=note['iuid']) if len(nodes) > 1: raise tornado.web.HTTPError(500, reason='multiple nodes matched') elif not nodes: return None return nodes[0] def get_note_entry(self, note): "Get the entry node for the note." statement = 'MATCH (n:Note {iuid:{iuid}})-[:TARGET]->(e:Entry)' \ ' RETURN id(e)' nodes = self.neo4j.get_nodes_execute(statement, iuid=note['iuid']) if len(nodes) > 1: raise tornado.web.HTTPError(500, reason='multiple nodes matched') elif not nodes: return None return nodes[0] def check_owner(self, note): owner = self.get_note_owner(note) if owner is not self.current_user: raise tornado.web.HTTPError(403, reason='user is not owner') class Note(NoteMixin, RequestHandler): "Note page." def get(self, iuid): note = self.get_node(constants.NOTE, iuid=iuid) if note is None: raise tornado.web.HTTPError(404, reason='no such note') owner = self.get_note_owner(note) is_owner = owner is self.current_user if constants.PUBLIC not in note.labels and not is_owner: raise tornado.web.HTTPError(403, reason='note is not public') self.render('note.html', note=note, is_owner=is_owner, owner=owner, entry=self.get_note_entry(note)) @tornado.web.authenticated def post(self, iuid): self.check_xsrf_cookie() if self.get_argument('_http_method', None) == 'delete': self.delete(iuid) return raise tornado.web.HTTPError(405, reason='POST only allowed for DELETE') @tornado.web.authenticated def delete(self, iuid): self.check_curator() note = self.get_node(constants.NOTE, iuid=iuid) if note is None: raise tornado.web.HTTPError(404) with neo4jdbi.Transaction(self.neo4j) as trx: # Delete Logs and their relationships with the Note stmt = 'MATCH (:Note {iuid:{iuid}})-[r]-(l:Log)-[o]-()' \ ' DELETE r,l,o' trx.execute(stmt, iuid=note['iuid']) stmt = 'MATCH (:Note {iuid:{iuid}})-[o]-(:User) DELETE o' trx.execute(stmt, iuid=iuid) stmt = 'MATCH (n:Note {iuid:{iuid}})-[t]-() DELETE n,t' trx.execute(stmt, iuid=iuid) try: self.redirect(self.get_argument('next'), status=303) except tornado.web.MissingArgumentError: raise tornado.web.HTTPError(204) class NoteCreate(RequestHandler): "Create a note for the entry." @tornado.web.authenticated def get(self): self.check_curator() iuid = self.get_argument('entry') if not constants.IUID_RX.match(iuid): raise tornado.web.HTTPError(400, reason='invalid entry iuid') entry = self.get_entry(iuid) if entry is None: raise tornado.web.HTTPError(400, reason='no such entry') self.render('note_create.html', entry=entry) @tornado.web.authenticated def post(self): self.check_xsrf_cookie() self.check_curator() try: with NoteSaver(self) as saver: saver.input_entry() saver.input_title() saver.input_access() saver.input_text() except ValueError, msg: self.see_other('note_create', error=str(msg)) else: self.see_other('node', saver.node['iuid']) class NoteEdit(NoteMixin, RequestHandler): "Page for editing a note." @tornado.web.authenticated def get(self, iuid): note = self.get_node(constants.NOTE, iuid=iuid) if note is None: raise tornado.web.HTTPError(404, reason='no such note') self.check_owner(note) self.render('note_edit.html', note=note, entry=self.get_note_entry(note)) @tornado.web.authenticated def post(self, iuid): self.check_xsrf_cookie() note = self.get_node(constants.NOTE, iuid=iuid) if note is None: raise tornado.web.HTTPError(404, reason='no such note') self.check_owner(note) try: with NoteSaver(self, node=note) as saver: saver.input_title() saver.input_access() saver.input_text() except ValueError, msg: self.see_other('note_edit', note['iuid'], error=str(msg)) else: self.see_other('note', note['iuid'])
# Copyright 2017 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers for :mod:`datetime`.""" import calendar import datetime import re from google.protobuf import timestamp_pb2 _UTC_EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=datetime.timezone.utc) _RFC3339_MICROS = "%Y-%m-%dT%H:%M:%S.%fZ" _RFC3339_NO_FRACTION = "%Y-%m-%dT%H:%M:%S" # datetime.strptime cannot handle nanosecond precision: parse w/ regex _RFC3339_NANOS = re.compile( r""" (?P<no_fraction> \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2} # YYYY-MM-DDTHH:MM:SS ) ( # Optional decimal part \. # decimal point (?P<nanos>\d{1,9}) # nanoseconds, maybe truncated )? Z # Zulu """, re.VERBOSE, ) def _from_microseconds(value): """Convert timestamp in microseconds since the unix epoch to datetime. Args: value (float): The timestamp to convert, in microseconds. Returns: datetime.datetime: The datetime object equivalent to the timestamp in UTC. """ return _UTC_EPOCH + datetime.timedelta(microseconds=value) def _to_rfc3339(value, ignore_zone=True): """Convert a datetime to an RFC3339 timestamp string. Args: value (datetime.datetime): The datetime object to be converted to a string. ignore_zone (bool): If True, then the timezone (if any) of the datetime object is ignored and the datetime is treated as UTC. Returns: str: The RFC3339 formatted string representing the datetime. """ if not ignore_zone and value.tzinfo is not None: # Convert to UTC and remove the time zone info. value = value.replace(tzinfo=None) - value.utcoffset() return value.strftime(_RFC3339_MICROS) class DatetimeWithNanoseconds(datetime.datetime): """Track nanosecond in addition to normal datetime attrs. Nanosecond can be passed only as a keyword argument. """ __slots__ = ("_nanosecond",) # pylint: disable=arguments-differ def __new__(cls, *args, **kw): nanos = kw.pop("nanosecond", 0) if nanos > 0: if "microsecond" in kw: raise TypeError("Specify only one of 'microsecond' or 'nanosecond'") kw["microsecond"] = nanos // 1000 inst = datetime.datetime.__new__(cls, *args, **kw) inst._nanosecond = nanos or 0 return inst # pylint: disable=arguments-differ def replace(self, *args, **kw): """Return a date with the same value, except for those parameters given new values by whichever keyword arguments are specified. For example, if d == date(2002, 12, 31), then d.replace(day=26) == date(2002, 12, 26). NOTE: nanosecond and microsecond are mutually exclusive arguments. """ ms_provided = "microsecond" in kw ns_provided = "nanosecond" in kw provided_ns = kw.pop("nanosecond", 0) prev_nanos = self.nanosecond if ms_provided and ns_provided: raise TypeError("Specify only one of 'microsecond' or 'nanosecond'") if ns_provided: # if nanos were provided, manipulate microsecond kw arg to super kw["microsecond"] = provided_ns // 1000 inst = super().replace(*args, **kw) if ms_provided: # ms were provided, nanos are invalid, build from ms inst._nanosecond = inst.microsecond * 1000 elif ns_provided: # ns were provided, replace nanoseconds to match after calling super inst._nanosecond = provided_ns else: # if neither ms or ns were provided, passthru previous nanos. inst._nanosecond = prev_nanos return inst @property def nanosecond(self): """Read-only: nanosecond precision.""" return self._nanosecond or self.microsecond * 1000 def rfc3339(self): """Return an RFC3339-compliant timestamp. Returns: (str): Timestamp string according to RFC3339 spec. """ if self._nanosecond == 0: return _to_rfc3339(self) nanos = str(self._nanosecond).rjust(9, "0").rstrip("0") return "{}.{}Z".format(self.strftime(_RFC3339_NO_FRACTION), nanos) @classmethod def from_rfc3339(cls, stamp): """Parse RFC3339-compliant timestamp, preserving nanoseconds. Args: stamp (str): RFC3339 stamp, with up to nanosecond precision Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp string Raises: ValueError: if `stamp` does not match the expected format """ with_nanos = _RFC3339_NANOS.match(stamp) if with_nanos is None: raise ValueError( "Timestamp: {}, does not match pattern: {}".format( stamp, _RFC3339_NANOS.pattern ) ) bare = datetime.datetime.strptime( with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION ) fraction = with_nanos.group("nanos") if fraction is None: nanos = 0 else: scale = 9 - len(fraction) nanos = int(fraction) * (10 ** scale) return cls( bare.year, bare.month, bare.day, bare.hour, bare.minute, bare.second, nanosecond=nanos, tzinfo=datetime.timezone.utc, ) def timestamp_pb(self): """Return a timestamp message. Returns: (:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message """ inst = ( self if self.tzinfo is not None else self.replace(tzinfo=datetime.timezone.utc) ) delta = inst - _UTC_EPOCH seconds = int(delta.total_seconds()) nanos = self._nanosecond or self.microsecond * 1000 return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) @classmethod def from_timestamp_pb(cls, stamp): """Parse RFC3339-compliant timestamp, preserving nanoseconds. Args: stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp message """ microseconds = int(stamp.seconds * 1e6) bare = _from_microseconds(microseconds) return cls( bare.year, bare.month, bare.day, bare.hour, bare.minute, bare.second, nanosecond=stamp.nanos, tzinfo=datetime.timezone.utc, )
#!/usr/bin/env python # Copyright (c) 2002-2011 ActiveState Software Inc. All rights reserved. """ActivePython identification module This can be run as a script to dump version info: python .../activestate.py or to relocate this Python installation appropriately (see relocate_python() for details): python .../activestate.py --relocate """ import sys #---- ActivePython build/configuration info version = "2.7.2.5" version_info = {'bsddb_ver': None, 'build_host': 'apy-win64', 'build_num': 5, 'build_plat_fullname': 'win64-2003server-x64', 'build_plat_name': 'win64-x64', 'build_time': 'Fri Jun 24 12:59:06 2011', 'bzip2_ver': (1, 0, 5), 'compiler': 'vc9-x64', 'configuration': ['-f', 'apyconfig-apy27-rrun.py', '-p', 'apy27', '--build-tag', 'rrun', '--without-pywin32'], 'openssl_ver': (0, 9, 8, 'r'), 'platinfo': {'arch': 'x64', 'name': 'win64-x64', 'os': 'win64', 'os_csd': 'SP2', 'os_name': '2003Server', 'os_ver': '5.2.3790'}, 'platname': 'win64-x64', 'product_type': 'ActivePython', 'python_src': ('2.7.2', 'path', 'Python-2.7.2.tgz'), 'pywin32_build': '214', 'pywin32_src': ('20111216', 'path', 'pywin32-20111216-CRLF.zip'), 'pywin32_ver': '20111216', 'scm_revision': 'r64662-trunk', 'sqlite3_ver': (3, 6, 21), 'tcltk_ver': (8, 5, 9), 'tix_ver': (8, 4, 3), 'with_bsddb': False, 'with_bzip2': True, 'with_ctypes': True, 'with_docs': True, 'with_pywin32': True, 'with_sqlite3': True, 'with_ssl': True, 'with_tcltk': True, 'with_tests': True, 'zlib_ver': (1, 2, 3)} compiler_info = """Microsoft (R) C/C++ Optimizing Compiler Version 15.00.30729.01 for x64""" # Used for Python install relocation. prefixes = set([ # Prefix to which extensions were built 'F:\\as\\apy-trunk\\build\\py2_7_2-win64-x64-apy27-rrun\\ExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIx', # Prefix to which Python sources were built. 'F:\\as\\apy-trunk\\build\\py2_7_2-win64-x64-apy27-rrun\\CoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIx', # Prefix to the Python image (sys.prefix) # (relied by pypm -- for relocation) 'F:\\as\\apy-trunk\\build\\py2_7_2-win64-x64-apy27-rrun\\image\\feature-core\\INSTALLDIR', ]) shortest_original_prefix_length = 261 #---- relocation code def _is_path_binary(path): """Return true iff the given file is binary. Raises an EnvironmentError if the file does not exist or cannot be accessed. """ fin = open(path, 'rb') try: CHUNKSIZE = 1024 while 1: chunk = fin.read(CHUNKSIZE) if '\0' in chunk: # found null byte return True if len(chunk) < CHUNKSIZE: break # done finally: fin.close() return False def _relocate_path(path, from_prefix, to_prefix, log): import sys import os from os.path import join import stat import re # Determine if this file needs to be relocated. fin = open(path, 'rb') try: content = fin.read() finally: fin.close() is_binary = _is_path_binary(path) if is_binary: from_str = join(from_prefix, "lib") to_str = join(to_prefix, "lib") else: from_str = from_prefix to_str = to_prefix if sys.version_info[0] >= 3: from_str = bytes(from_str, 'utf-8') to_str = bytes(to_str, 'utf-8') if from_str not in content: return # Relocate this file. log("relocate '%s'" % path) perm = stat.S_IMODE(os.stat(path).st_mode) if is_binary: if sys.platform.startswith("aix"): # On AIX the lib path _list_ is stored as one string, rather # than just the one path. This means that the integrity of # the path list must be maintained by separating with ':'. # We also change the remainder to all x's to ensure it is # a bogus path. to_str = join(to_prefix, "lib") \ + ':' + "x"*(len(from_prefix)-len(to_prefix)-1) if sys.version_info[0] >= 3: to_str = bytes(to_str, 'utf-8') #log("replace (length %d)\n\t%s\nwith (length %d)\n\t%s", # % (len(from_str), from_str, len(to_str), to_str)) content = content.replace(from_str, to_str) else: # Replace 'from_str' with 'to_str' in a null-terminated string. # Make sure to properly correct for trailing content in the # same string because: # - on HP-UX sometimes a full path to the shared lib is stored: # <from_str>/libtcl8.4.sl\0 # - on AIX a path _list_ is stored: # <from_str>:other/lib/paths\0 # NOTE: This *should* work on AIX, AFAICT, but it does # *not*. See above for special handling for AIX. #TODO: should this regex use re.DOTALL flag? pattern = re.compile(re.escape(from_str) + "([^\0]*)\0") def c_string_replace(match, before=from_str, after=to_str): lendiff = len(before) - len(after) s = after + match.group(1) + ("\0" * lendiff) + "\0" # Encode nulls as '0' instead of '\x00' so one can see # the before and after strings line up. #log("replace (length %d)\n\t%s\nwith (length %d)\n\t%s", # % (len(match.group(0)), # repr(match.group(0)).replace("\\x00", '0'), # len(s), # repr(s).replace("\\x00", '0'))) return s content = pattern.sub(c_string_replace, content) else: #log("replace (length %d)\n\t%s\nwith (length %d)\n\t%s", # % (len(from_str), from_str, len(to_str), to_str)) content = content.replace(from_str, to_str) # Sometimes get the following error. Avoid it by removing file first. # IOError: [Errno 26] Text file busy: '$path' os.remove(path) fout = open(path, 'wb') try: fout.write(content) finally: fout.close() os.chmod(path, perm) # restore permissions def relocate_python(install_prefix, verbose=False): """Relocate this Python installation. "Relocation" involves updating hardcoded shebang lines in Python scripts and (on some platforms) binary patching of built-in runtime-lib-paths to point to the given install prefix. """ import sys import os from os.path import isabs, join, splitext if verbose: def log(s): sys.stderr.write(s+"\n") else: def log(s): pass assert isabs(install_prefix) if len(install_prefix) > shortest_original_prefix_length: raise RuntimeError("cannot properly relocate this Python " "installation (prefix='%s') because install " "path (%d chars) is longer than the original " "build prefix (%d chars)" % (install_prefix, len(install_prefix), shortest_original_prefix_length)) log("relocate this Python to '%s'" % install_prefix) for prefix in prefixes: if prefix == install_prefix: continue for dirpath, dirnames, filenames in os.walk(install_prefix): for filename in filenames: if splitext(filename)[1] in (".pyo", ".pyc"): continue _relocate_path(join(dirpath, filename), prefix, install_prefix, log) #---- mainline if __name__ == "__main__": if "--relocate" in sys.argv: # Determine the install_prefix holding this module and relocate # that Python installation. if sys.platform == "win32": raise RuntimeError("relocating a Python install isn't " "necessary on Windows") # <prefix>\lib\pythonX.Y\site-packages\activestate.py from os.path import dirname, exists, join, basename, abspath install_prefix = dirname(dirname(dirname(dirname(abspath(__file__))))) python_exe = join(install_prefix, "bin", "python") if not exists(python_exe): raise RuntimeError("'%s' does not exist: it doesn't look like " "'%s' is in a Python site-packages dir" % (python_exe, basename(__file__))) del python_exe, dirname, exists, join, basename, abspath relocate_python(install_prefix, True) else: for key, value in sorted(version_info.items()): if value is None: continue if key.endswith("_src"): continue if key in ("platinfo", "configuration"): continue print("%s: %s" % (key, value))
import json import logging import inspect from .decorators import pipeline_functions, register_pipeline from indra.statements import get_statement_by_name, Statement logger = logging.getLogger(__name__) class AssemblyPipeline(): """An assembly pipeline that runs the specified steps on a given set of statements. Ways to initialize and run the pipeline (examples assume you have a list of INDRA Statements stored in the `stmts` variable.) >>> from indra.statements import * >>> map2k1 = Agent('MAP2K1', db_refs={'HGNC': '6840'}) >>> mapk1 = Agent('MAPK1', db_refs={'HGNC': '6871'}) >>> braf = Agent('BRAF') >>> stmts = [Phosphorylation(map2k1, mapk1, 'T', '185'), ... Phosphorylation(braf, map2k1)] 1) Provide a JSON file containing the steps, then use the classmethod `from_json_file`, and run it with the `run` method on a list of statements. This option allows storing pipeline versions in a separate file and reproducing the same results. All functions referenced in the JSON file have to be registered with the @register_pipeline decorator. >>> import os >>> path_this = os.path.dirname(os.path.abspath(__file__)) >>> filename = os.path.abspath( ... os.path.join(path_this, '..', 'tests', 'pipeline_test.json')) >>> ap = AssemblyPipeline.from_json_file(filename) >>> assembled_stmts = ap.run(stmts) 2) Initialize a pipeline with a list of steps and run it with the `run` method on a list of statements. All functions referenced in steps have to be registered with the @register_pipeline decorator. >>> steps = [ ... {"function": "filter_no_hypothesis"}, ... {"function": "filter_grounded_only", ... "kwargs": {"score_threshold": 0.8}} ... ] >>> ap = AssemblyPipeline(steps) >>> assembled_stmts = ap.run(stmts) 3) Initialize an empty pipeline and append/insert the steps one by one. Provide a function and its args and kwargs. For arguments that require calling a different function, use the RunnableArgument class. All functions referenced here have to be either imported and passed as function objects or registered with the @register_pipeline decorator and passed as function names (strings). The pipeline built this way can be optionally saved into a JSON file. >>> from indra.tools.assemble_corpus import * >>> from indra.ontology.world import load_world_ontology >>> from indra.belief.wm_scorer import get_eidos_scorer >>> ap = AssemblyPipeline() >>> ap.append(filter_no_hypothesis) >>> ap.append(filter_grounded_only) >>> ap.append(run_preassembly, ... belief_scorer=RunnableArgument(get_eidos_scorer), ... ontology=RunnableArgument(load_world_ontology)) >>> assembled_stmts = ap.run(stmts) >>> ap.to_json_file('filename.json') Parameters ---------- steps : list[dict] A list of dictionaries representing steps in the pipeline. Each step should have a 'function' key and, if appropriate, 'args' and 'kwargs' keys. Arguments can be simple values (strings, integers, booleans, lists, etc.) or can be functions themselves. In case an argument is a function or a result of another function, it should also be represented as a dictionary of a similar structure. If a function itself is an argument (and not its result), the dictionary should contain a key-value pair {'no_run': True}. If an argument is a type of a statement, it should be represented as a dictionary {'stmt_type': <name of a statement type>}. """ def __init__(self, steps=None): # This import is here to avoid circular imports # It is enough to import one function to get all registered functions from indra.tools.assemble_corpus import filter_grounded_only from indra.ontology.world import load_world_ontology from indra.ontology.bio import bio_ontology from indra.preassembler.grounding_mapper.gilda import ground_statements from indra.belief.wm_scorer import get_eidos_scorer from indra.preassembler.custom_preassembly import location_matches self.steps = steps if steps else [] @classmethod def from_json_file(cls, filename): """Create an instance of AssemblyPipeline from a JSON file with steps.""" with open(filename, 'r') as f: steps = json.load(f) ap = AssemblyPipeline(steps) return ap def to_json_file(self, filename): """Save AssemblyPipeline to a JSON file.""" with open(filename, 'w') as f: json.dump(self.steps, f, indent=1) def run(self, statements, **kwargs): """Run all steps of the pipeline. Parameters ---------- statements : list[indra.statements.Statement] A list of INDRA Statements to run the pipeline on. **kwargs : kwargs It is recommended to define all arguments for the steps functions in the steps definition, but it is also possible to provide some external objects (if it is not possible to provide them as a step argument) as kwargs to the entire pipeline here. One should be cautious to avoid kwargs name clashes between multiple functions (this value will be provided to all functions that expect an argument with the same name). To overwrite this value in other functions, provide it explicitly in the corresponding steps kwargs. Returns ------- list[indra.statements.Statement] The list of INDRA Statements resulting from running the pipeline on the list of input Statements. """ logger.info('Running the pipeline') for step in self.steps: statements = self.run_function(step, statements, **kwargs) return statements def append(self, func, *args, **kwargs): """Append a step to the end of the pipeline. Args and kwargs here can be of any type. All functions referenced here have to be either imported and passed as function objects or registered with @register_pipeline decorator and passed as function names (strings). For arguments that require calling a different function, use RunnableArgument class. Parameters ---------- func : str or function A function or the string name of a function to add to the pipeline. args : args Args that are passed to func when calling it. kwargs : kwargs Kwargs that are passed to func when calling it. """ if inspect.isfunction(func): func_name = func.__name__ if func_name not in pipeline_functions: register_pipeline(func) elif isinstance(func, str): func_name = func else: raise TypeError('Should be a function object or a string') new_step = self.create_new_step(func_name, *args, **kwargs) self.steps.append(new_step) def insert(self, ix, func, *args, **kwargs): """Insert a step to any position in the pipeline. Args and kwargs here can be of any type. All functions referenced here have to be either imported and passed as function objects or registered with @register_pipeline decorator and passed as function names (strings). For arguments that require calling a different function, use RunnableArgument class. Parameters ---------- func : str or function A function or the string name of a function to add to the pipeline. args : args Args that are passed to func when calling it. kwargs : kwargs Kwargs that are passed to func when calling it. """ if inspect.isfunction(func): func_name = func.__name__ if func_name not in pipeline_functions: register_pipeline(func) elif isinstance(func, str): func_name = func else: raise TypeError('Should be a function object or a string') new_step = self.create_new_step(func_name, *args, **kwargs) self.steps.insert(ix, new_step) def create_new_step(self, func_name, *args, **kwargs): """Create a dictionary representing a new step in the pipeline. Parameters ---------- func_name : str The string name of a function to create as a step. args : args Args that are passed to the function when calling it. kwargs : kwargs Kwargs that are passed to the function when calling it. Returns ------- dict A dict structure representing a step in the pipeline. """ assert self.get_function_from_name(func_name) new_step = {'function': func_name} if args: new_step['args'] = [jsonify_arg_input(arg) for arg in args] if kwargs: new_step['kwargs'] = { k: jsonify_arg_input(v) for (k, v) in kwargs.items()} return new_step @staticmethod def get_function_parameters(func_dict): """Retrieve a function name and arguments from function dictionary. Parameters ---------- func_dict : dict A dict structure representing a function and its args and kwargs. Returns ------- tuple of str, list and dict A tuple with the following elements: the name of the function, the args of the function, and the kwargs of the function. """ func_name = func_dict['function'] args = func_dict.get('args', []) kwargs = func_dict.get('kwargs', {}) return func_name, args, kwargs @staticmethod def get_function_from_name(name): """Return a function object by name if available or raise exception. Parameters ---------- name : str The name of the function. Returns ------- function The function that was found based on its name. If not found, a NotRegisteredFunctionError is raised. """ if name in pipeline_functions: return pipeline_functions[name] raise NotRegisteredFunctionError('%s is not registered' % name) @staticmethod def run_simple_function(func, *args, **kwargs): """Run a simple function and return the result. Simple here means a function all arguments of which are simple values (do not require extra function calls). Parameters ---------- func : function The function to call. args : args Args that are passed to the function when calling it. kwargs : kwargs Kwargs that are passed to the function when calling it. Returns ------- object Any value that the given function returns. """ statements = kwargs.pop('statements', None) if statements is not None: return func(statements, *args, **kwargs) return func(*args, **kwargs) def run_function(self, func_dict, statements=None, **kwargs): """Run a given function and return the results. For each of the arguments, if it requires an extra function call, recursively call the functions until we get a simple function. Parameters ---------- func_dict : dict A dict representing the function to call, its args and kwargs. args : args Args that are passed to the function when calling it. kwargs : kwargs Kwargs that are passed to the function when calling it. Returns ------- object Any value that the given function returns. """ func_name, func_args, func_kwargs = self.get_function_parameters( func_dict) func = self.get_function_from_name(func_name) logger.info('Calling %s' % func_name) new_args = [] new_kwargs = {} for arg in func_args: arg_value = self.get_argument_value(arg) new_args.append(arg_value) for k, v in func_kwargs.items(): kwarg_value = self.get_argument_value(v) new_kwargs[k] = kwarg_value if statements is not None: new_kwargs['statements'] = statements if kwargs: for k, v in kwargs.items(): if k not in new_kwargs and k in inspect.getargspec(func).args: new_kwargs[k] = v return self.run_simple_function(func, *new_args, **new_kwargs) @staticmethod def is_function(argument, keyword='function'): """Check if an argument should be converted to a specific object type, e.g. a function or a statement type. Parameters ---------- argument : dict or other object The argument is a dict, its keyword entry is checked, and if it is there, we return True, otherwise we return False. keyword : Optional[str] The keyword to check if it's there if the argument is a dict. Default: function """ if not isinstance(argument, dict): return False if keyword not in argument: return False return True def get_argument_value(self, arg_json): """Get a value of an argument from its json version.""" if self.is_function(arg_json, 'function'): # Argument is a function if arg_json.get('no_run', False): value = self.get_function_from_name(arg_json['function']) # Argument is a result of a function else: value = self.run_function(arg_json) # Argument is a statement type elif self.is_function(arg_json, 'stmt_type'): value = get_statement_by_name(arg_json.get('stmt_type')) # Argument is a simple value (str, int, boolean, etc.) else: value = arg_json return value def __len__(self): return len(self.steps) def __iter__(self): return iter(self.steps) class NotRegisteredFunctionError(Exception): pass class RunnableArgument(): """Class representing arguments generated by calling a function. RunnableArguments should be used as args or kwargs in AssemblyPipeline `append` and `insert` methods. Parameters ---------- func : str or function A function or a name of a function to be called to generate argument value. """ def __init__(self, func, *args, **kwargs): if inspect.isfunction(func): self.func_name = func.__name__ if self.func_name not in pipeline_functions: register_pipeline(func) elif isinstance(func, str): self.func_name = func else: raise TypeError('Should be a function object or a string') self.args = args self.kwargs = kwargs def to_json(self): """Jsonify to standard AssemblyPipeline step format.""" json_dict = {'function': self.func_name} new_args = [] new_kwargs = {} for arg in self.args: new_args.append(jsonify_arg_input(arg)) for k, v in self.kwargs.items(): new_kwargs[k] = jsonify_arg_input(v) if new_args: json_dict['args'] = new_args if new_kwargs: json_dict['kwargs'] = new_kwargs return json_dict def jsonify_arg_input(arg): """Jsonify user input (in AssemblyPipeline `append` and `insert` methods) into a standard step json.""" if isinstance(arg, RunnableArgument): return arg.to_json() # If a function object or name of a function is provided, we assume it # does not have to be run (function itself is argument). if inspect.isfunction(arg): func_name = arg.__name__ if func_name not in pipeline_functions: register_pipeline(arg) return {'function': func_name, 'no_run': True} if isinstance(arg, str) and arg in pipeline_functions: return {'function': arg, 'no_run': True} # For some functions Statement type has to be argument if inspect.isclass(arg) and issubclass(arg, Statement): return {'stmt_type': arg.__name__} # Argument is a simple value and can be stored as provided return arg
# # Jasy - Web Tooling Framework # Copyright 2010-2012 Zynga Inc. # Copyright 2013-2014 Sebastian Werner # # # License: MPL 1.1/GPL 2.0/LGPL 2.1 # Authors: # - Brendan Eich <[email protected]> (Original JavaScript) (2004-2010) # - Sebastian Werner <[email protected]> (Python Port) (2010) # import jasy.script.parse.Node class VanillaBuilder: """The vanilla AST builder.""" def COMMENTS_add(self, currNode, prevNode, comments): if not comments: return currComments = [] prevComments = [] for comment in comments: # post comments - for previous node if comment.context == "inline": prevComments.append(comment) # all other comment styles are attached to the current one else: currComments.append(comment) # Merge with previously added ones if hasattr(currNode, "comments"): currNode.comments.extend(currComments) else: currNode.comments = currComments if prevNode: if hasattr(prevNode, "comments"): prevNode.comments.extend(prevComments) else: prevNode.comments = prevComments else: # Don't loose the comment in tree (if not previous node is there, attach it to this node) currNode.comments.extend(prevComments) def IF_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "if") def IF_setCondition(self, node, expression): node.append(expression, "condition") def IF_setThenPart(self, node, statement): node.append(statement, "thenPart") def IF_setElsePart(self, node, statement): node.append(statement, "elsePart") def IF_finish(self, node): pass def SWITCH_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "switch") node.defaultIndex = -1 return node def SWITCH_setDiscriminant(self, node, expression): node.append(expression, "discriminant") def SWITCH_setDefaultIndex(self, node, index): node.defaultIndex = index def SWITCH_addCase(self, node, childNode): node.append(childNode) def SWITCH_finish(self, node): pass def CASE_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "case") def CASE_setLabel(self, node, expression): node.append(expression, "label") def CASE_initializeStatements(self, node, tokenizer): node.append(jasy.script.parse.Node.Node(tokenizer, "block"), "statements") def CASE_addStatement(self, node, statement): node.statements.append(statement) def CASE_finish(self, node): pass def DEFAULT_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "default") def DEFAULT_initializeStatements(self, node, tokenizer): node.append(jasy.script.parse.Node.Node(tokenizer, "block"), "statements") def DEFAULT_addStatement(self, node, statement): node.statements.append(statement) def DEFAULT_finish(self, node): pass def FOR_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "for") node.isLoop = True node.isEach = False return node def FOR_rebuildForEach(self, node): node.isEach = True # NB: This function is called after rebuildForEach, if that'statement called at all. def FOR_rebuildForIn(self, node): node.type = "for_in" def FOR_setCondition(self, node, expression): node.append(expression, "condition") def FOR_setSetup(self, node, expression): node.append(expression, "setup") def FOR_setUpdate(self, node, expression): node.append(expression, "update") def FOR_setObject(self, node, expression, forBlock=None): # wpbasti: not sure what forBlock stands for but it is used in the parser # JS tolerates the optinal unused parameter, but not so Python. node.append(expression, "object") def FOR_setIterator(self, node, expression, forBlock=None): # wpbasti: not sure what forBlock stands for but it is used in the parser # JS tolerates the optinal unused parameter, but not so Python. node.append(expression, "iterator") def FOR_setBody(self, node, statement): node.append(statement, "body") def FOR_finish(self, node): pass def WHILE_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "while") node.isLoop = True return node def WHILE_setCondition(self, node, expression): node.append(expression, "condition") def WHILE_setBody(self, node, statement): node.append(statement, "body") def WHILE_finish(self, node): pass def DO_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "do") node.isLoop = True return node def DO_setCondition(self, node, expression): node.append(expression, "condition") def DO_setBody(self, node, statement): node.append(statement, "body") def DO_finish(self, node): pass def BREAK_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "break") def BREAK_setLabel(self, node, label): node.label = label def BREAK_setTarget(self, node, target): # Hint, no append() - relation, but not a child node.target = target def BREAK_finish(self, node): pass def CONTINUE_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "continue") def CONTINUE_setLabel(self, node, label): node.label = label def CONTINUE_setTarget(self, node, target): # Hint, no append() - relation, but not a child node.target = target def CONTINUE_finish(self, node): pass def TRY_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "try") return node def TRY_setTryBlock(self, node, statement): node.append(statement, "tryBlock") def TRY_addCatch(self, node, childNode): node.append(childNode) def TRY_finishCatches(self, node): pass def TRY_setFinallyBlock(self, node, statement): node.append(statement, "finallyBlock") def TRY_finish(self, node): pass def CATCH_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "catch") return node def CATCH_wrapException(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "exception") node.value = tokenizer.token.value return node def CATCH_setException(self, node, exception): node.append(exception, "exception") def CATCH_setGuard(self, node, expression): node.append(expression, "guard") def CATCH_setBlock(self, node, statement): node.append(statement, "block") def CATCH_finish(self, node): pass def THROW_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "throw") def THROW_setException(self, node, expression): node.append(expression, "exception") def THROW_finish(self, node): pass def RETURN_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "return") def RETURN_setValue(self, node, expression): node.append(expression, "value") def RETURN_finish(self, node): pass def YIELD_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "yield") def YIELD_setValue(self, node, expression): node.append(expression, "value") def YIELD_finish(self, node): pass def GENERATOR_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "generator") def GENERATOR_setExpression(self, node, expression): node.append(expression, "expression") def GENERATOR_setTail(self, node, childNode): node.append(childNode, "tail") def GENERATOR_finish(self, node): pass def WITH_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "with") def WITH_setObject(self, node, expression): node.append(expression, "object") def WITH_setBody(self, node, statement): node.append(statement, "body") def WITH_finish(self, node): pass def DEBUGGER_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "debugger") def SEMICOLON_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "semicolon") def SEMICOLON_setExpression(self, node, expression): node.append(expression, "expression") def SEMICOLON_finish(self, node): pass def LABEL_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "label") def LABEL_setLabel(self, node, label): node.label = label def LABEL_setStatement(self, node, statement): node.append(statement, "statement") def LABEL_finish(self, node): pass def FUNCTION_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer) if node.type != "function": if tokenizer.token.value == "get": node.type = "getter" else: node.type = "setter" return node def FUNCTION_setName(self, node, identifier): node.name = identifier def FUNCTION_initParams(self, node, tokenizer): node.append(jasy.script.parse.Node.Node(tokenizer, "list"), "params") def FUNCTION_wrapParam(self, tokenizer): param = jasy.script.parse.Node.Node(tokenizer) param.value = tokenizer.token.value return param def FUNCTION_addParam(self, node, tokenizer, expression): node.params.append(expression) def FUNCTION_setExpressionClosure(self, node, expressionClosure): node.expressionClosure = expressionClosure def FUNCTION_setBody(self, node, statement): # copy over function parameters to function body params = getattr(node, "params", None) # if params: # statement.params = [param.value for param in params] node.append(statement, "body") def FUNCTION_hoistVars(self, x): pass def FUNCTION_finish(self, node, x): pass def VAR_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "var") def VAR_addDecl(self, node, childNode, childContext=None): node.append(childNode) def VAR_finish(self, node): pass def CONST_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "const") def CONST_addDecl(self, node, childNode, childContext=None): node.append(childNode) def CONST_finish(self, node): pass def LET_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "let") def LET_addDecl(self, node, childNode, childContext=None): node.append(childNode) def LET_finish(self, node): pass def DECL_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "declaration") def DECL_setNames(self, node, expression): node.append(expression, "names") def DECL_setName(self, node, identifier): node.name = identifier def DECL_setInitializer(self, node, expression): node.append(expression, "initializer") def DECL_setReadOnly(self, node, readOnly): node.readOnly = readOnly def DECL_finish(self, node): pass def LETBLOCK_build(self, tokenizer): node = jasy.script.parse.Node.Node(tokenizer, "let_block") return node def LETBLOCK_setVariables(self, node, childNode): node.append(childNode, "variables") def LETBLOCK_setExpression(self, node, expression): node.append(expression, "expression") def LETBLOCK_setBlock(self, node, statement): node.append(statement, "block") def LETBLOCK_finish(self, node): pass def BLOCK_build(self, tokenizer, id): node = jasy.script.parse.Node.Node(tokenizer, "block") # node.id = id return node def BLOCK_hoistLets(self, node): pass def BLOCK_addStatement(self, node, childNode): node.append(childNode) def BLOCK_finish(self, node): pass def EXPRESSION_build(self, tokenizer, tokenType): return jasy.script.parse.Node.Node(tokenizer, tokenType) def EXPRESSION_addOperand(self, node, childNode): node.append(childNode) def EXPRESSION_finish(self, node): pass def ASSIGN_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "assign") def ASSIGN_addOperand(self, node, childNode): node.append(childNode) def ASSIGN_setAssignOp(self, node, operator): node.assignOp = operator def ASSIGN_finish(self, node): pass def HOOK_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "hook") def HOOK_setCondition(self, node, expression): node.append(expression, "condition") def HOOK_setThenPart(self, node, childNode): node.append(childNode, "thenPart") def HOOK_setElsePart(self, node, childNode): node.append(childNode, "elsePart") def HOOK_finish(self, node): pass def OR_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "or") def OR_addOperand(self, node, childNode): node.append(childNode) def OR_finish(self, node): pass def AND_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "and") def AND_addOperand(self, node, childNode): node.append(childNode) def AND_finish(self, node): pass def BITWISEOR_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "bitwise_or") def BITWISEOR_addOperand(self, node, childNode): node.append(childNode) def BITWISEOR_finish(self, node): pass def BITWISEXOR_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "bitwise_xor") def BITWISEXOR_addOperand(self, node, childNode): node.append(childNode) def BITWISEXOR_finish(self, node): pass def BITWISEAND_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "bitwise_and") def BITWISEAND_addOperand(self, node, childNode): node.append(childNode) def BITWISEAND_finish(self, node): pass def EQUALITY_build(self, tokenizer): # NB: tokenizer.token.type must be "eq", "ne", "strict_eq", or "strict_ne". return jasy.script.parse.Node.Node(tokenizer) def EQUALITY_addOperand(self, node, childNode): node.append(childNode) def EQUALITY_finish(self, node): pass def RELATIONAL_build(self, tokenizer): # NB: tokenizer.token.type must be "lt", "le", "ge", or "gt". return jasy.script.parse.Node.Node(tokenizer) def RELATIONAL_addOperand(self, node, childNode): node.append(childNode) def RELATIONAL_finish(self, node): pass def SHIFT_build(self, tokenizer): # NB: tokenizer.token.type must be "lsh", "rsh", or "ursh". return jasy.script.parse.Node.Node(tokenizer) def SHIFT_addOperand(self, node, childNode): node.append(childNode) def SHIFT_finish(self, node): pass def ADD_build(self, tokenizer): # NB: tokenizer.token.type must be "plus" or "minus". return jasy.script.parse.Node.Node(tokenizer) def ADD_addOperand(self, node, childNode): node.append(childNode) def ADD_finish(self, node): pass def MULTIPLY_build(self, tokenizer): # NB: tokenizer.token.type must be "mul", "div", or "mod". return jasy.script.parse.Node.Node(tokenizer) def MULTIPLY_addOperand(self, node, childNode): node.append(childNode) def MULTIPLY_finish(self, node): pass def UNARY_build(self, tokenizer): # NB: tokenizer.token.type must be "delete", "void", "typeof", "not", "bitwise_not", # "unary_plus", "unary_minus", "increment", or "decrement". if tokenizer.token.type == "plus": tokenizer.token.type = "unary_plus" elif tokenizer.token.type == "minus": tokenizer.token.type = "unary_minus" return jasy.script.parse.Node.Node(tokenizer) def UNARY_addOperand(self, node, childNode): node.append(childNode) def UNARY_setPostfix(self, node): node.postfix = True def UNARY_finish(self, node): pass def MEMBER_build(self, tokenizer, tokenType=None): node = jasy.script.parse.Node.Node(tokenizer, tokenType) if node.type == "identifier": node.value = tokenizer.token.value return node def MEMBER_rebuildNewWithArgs(self, node): node.type = "new_with_args" def MEMBER_addOperand(self, node, childNode): node.append(childNode) def MEMBER_finish(self, node): pass def PRIMARY_build(self, tokenizer, tokenType): # NB: tokenizer.token.type must be "null", "this", "true", "false", "identifier", "number", "string", or "regexp". node = jasy.script.parse.Node.Node(tokenizer, tokenType) if tokenType in ("identifier", "string", "regexp", "number"): node.value = tokenizer.token.value return node def PRIMARY_finish(self, node): pass def ARRAYINIT_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "array_init") def ARRAYINIT_addElement(self, node, childNode): node.append(childNode) def ARRAYINIT_finish(self, node): pass def ARRAYCOMP_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "array_comp") def ARRAYCOMP_setExpression(self, node, expression): node.append(expression, "expression") def ARRAYCOMP_setTail(self, node, childNode): node.append(childNode, "tail") def ARRAYCOMP_finish(self, node): pass def COMPTAIL_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "comp_tail") def COMPTAIL_setGuard(self, node, expression): node.append(expression, "guard") def COMPTAIL_addFor(self, node, childNode): node.append(childNode, "for") def COMPTAIL_finish(self, node): pass def OBJECTINIT_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "object_init") def OBJECTINIT_addProperty(self, node, childNode): node.append(childNode) def OBJECTINIT_finish(self, node): pass def PROPERTYINIT_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "property_init") def PROPERTYINIT_addOperand(self, node, childNode): node.append(childNode) def PROPERTYINIT_finish(self, node): pass def COMMA_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "comma") def COMMA_addOperand(self, node, childNode): node.append(childNode) def COMMA_finish(self, node): pass def LIST_build(self, tokenizer): return jasy.script.parse.Node.Node(tokenizer, "list") def LIST_addOperand(self, node, childNode): node.append(childNode) def LIST_finish(self, node): pass def setHoists(self, id, vds): pass
'''tzinfo timezone information for America/Asuncion.''' from pytz.tzinfo import DstTzInfo from pytz.tzinfo import memorized_datetime as d from pytz.tzinfo import memorized_ttinfo as i class Asuncion(DstTzInfo): '''America/Asuncion timezone definition. See datetime.tzinfo for details''' zone = 'America/Asuncion' _utc_transition_times = [ d(1,1,1,0,0,0), d(1931,10,10,3,50,40), d(1972,10,1,4,0,0), d(1974,4,1,3,0,0), d(1975,10,1,4,0,0), d(1976,3,1,3,0,0), d(1976,10,1,4,0,0), d(1977,3,1,3,0,0), d(1977,10,1,4,0,0), d(1978,3,1,3,0,0), d(1978,10,1,4,0,0), d(1979,4,1,3,0,0), d(1979,10,1,4,0,0), d(1980,4,1,3,0,0), d(1980,10,1,4,0,0), d(1981,4,1,3,0,0), d(1981,10,1,4,0,0), d(1982,4,1,3,0,0), d(1982,10,1,4,0,0), d(1983,4,1,3,0,0), d(1983,10,1,4,0,0), d(1984,4,1,3,0,0), d(1984,10,1,4,0,0), d(1985,4,1,3,0,0), d(1985,10,1,4,0,0), d(1986,4,1,3,0,0), d(1986,10,1,4,0,0), d(1987,4,1,3,0,0), d(1987,10,1,4,0,0), d(1988,4,1,3,0,0), d(1988,10,1,4,0,0), d(1989,4,1,3,0,0), d(1989,10,22,4,0,0), d(1990,4,1,3,0,0), d(1990,10,1,4,0,0), d(1991,4,1,3,0,0), d(1991,10,6,4,0,0), d(1992,3,1,3,0,0), d(1992,10,5,4,0,0), d(1993,3,31,3,0,0), d(1993,10,1,4,0,0), d(1994,2,27,3,0,0), d(1994,10,1,4,0,0), d(1995,2,26,3,0,0), d(1995,10,1,4,0,0), d(1996,3,1,3,0,0), d(1996,10,6,4,0,0), d(1997,2,23,3,0,0), d(1997,10,5,4,0,0), d(1998,3,1,3,0,0), d(1998,10,4,4,0,0), d(1999,3,7,3,0,0), d(1999,10,3,4,0,0), d(2000,3,5,3,0,0), d(2000,10,1,4,0,0), d(2001,3,4,3,0,0), d(2001,10,7,4,0,0), d(2002,4,7,3,0,0), d(2002,9,1,4,0,0), d(2003,4,6,3,0,0), d(2003,9,7,4,0,0), d(2004,4,4,3,0,0), d(2004,10,17,4,0,0), d(2005,3,13,3,0,0), d(2005,10,16,4,0,0), d(2006,3,12,3,0,0), d(2006,10,15,4,0,0), d(2007,3,11,3,0,0), d(2007,10,21,4,0,0), d(2008,3,9,3,0,0), d(2008,10,19,4,0,0), d(2009,3,8,3,0,0), d(2009,10,18,4,0,0), d(2010,3,14,3,0,0), d(2010,10,17,4,0,0), d(2011,3,13,3,0,0), d(2011,10,16,4,0,0), d(2012,3,11,3,0,0), d(2012,10,21,4,0,0), d(2013,3,10,3,0,0), d(2013,10,20,4,0,0), d(2014,3,9,3,0,0), d(2014,10,19,4,0,0), d(2015,3,8,3,0,0), d(2015,10,18,4,0,0), d(2016,3,13,3,0,0), d(2016,10,16,4,0,0), d(2017,3,12,3,0,0), d(2017,10,15,4,0,0), d(2018,3,11,3,0,0), d(2018,10,21,4,0,0), d(2019,3,10,3,0,0), d(2019,10,20,4,0,0), d(2020,3,8,3,0,0), d(2020,10,18,4,0,0), d(2021,3,14,3,0,0), d(2021,10,17,4,0,0), d(2022,3,13,3,0,0), d(2022,10,16,4,0,0), d(2023,3,12,3,0,0), d(2023,10,15,4,0,0), d(2024,3,10,3,0,0), d(2024,10,20,4,0,0), d(2025,3,9,3,0,0), d(2025,10,19,4,0,0), d(2026,3,8,3,0,0), d(2026,10,18,4,0,0), d(2027,3,14,3,0,0), d(2027,10,17,4,0,0), d(2028,3,12,3,0,0), d(2028,10,15,4,0,0), d(2029,3,11,3,0,0), d(2029,10,21,4,0,0), d(2030,3,10,3,0,0), d(2030,10,20,4,0,0), d(2031,3,9,3,0,0), d(2031,10,19,4,0,0), d(2032,3,14,3,0,0), d(2032,10,17,4,0,0), d(2033,3,13,3,0,0), d(2033,10,16,4,0,0), d(2034,3,12,3,0,0), d(2034,10,15,4,0,0), d(2035,3,11,3,0,0), d(2035,10,21,4,0,0), d(2036,3,9,3,0,0), d(2036,10,19,4,0,0), d(2037,3,8,3,0,0), d(2037,10,18,4,0,0), ] _transition_info = [ i(-13860,0,'AMT'), i(-14400,0,'PYT'), i(-10800,0,'PYT'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), i(-14400,0,'PYT'), i(-10800,3600,'PYST'), ] Asuncion = Asuncion()
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import os import logging import ebstall.errors as errors import collections import re import ebstall.util as util import subprocess import types import ebstall.osutil as osutil import shutil import pkg_resources __author__ = 'dusanklinec' logger = logging.getLogger(__name__) CONFIG_LINE_BLANK = 0 CONFIG_LINE_COMMENT = 1 CONFIG_LINE_CMD_COMMENT = 2 CONFIG_LINE_CMD = 3 class ConfigLine(object): """ # One open vpn config line """ def __init__(self, idx=None, raw=None, ltype=None, cmd=None, params=None, comment=None, paired=False, newline='\n', *args, **kwargs): self.idx = idx self._raw = raw self.ltype = ltype self.cmd = cmd self.params = params self.comment = comment self.paired = paired self.newline = newline def __repr__(self): return 'ConfigLine(idx=%r, ltype=%r, cmd=%r, params=%r, comment=%r, raw=%r, paired=%r)' \ % (self.idx, self.ltype, self.cmd, self.params, self.comment, self._raw, self.paired) def __str__(self): return self.raw @property def raw(self): """ Builds raw config line :return: """ if self.ltype in [CONFIG_LINE_COMMENT, CONFIG_LINE_BLANK]: return util.defval(self._raw, '') if self.paired: res = ['<%s>' % self.cmd, self.params, '</%s>' % self.cmd] if self.ltype == CONFIG_LINE_CMD_COMMENT: return ';' + (''.join(res)).strip() return (self.newline.join(res)).strip() res = '' if self.ltype == CONFIG_LINE_CMD else ';' res += '%s %s %s' % (util.defval(self.cmd, ''), util.defval(self.params, ''), util.defval(self.comment, '')) return res.strip() @raw.setter def raw(self, val): self._raw = val @classmethod def build(cls, line, idx=0): line = line.strip() cl = cls(idx=idx, raw=line) if line is None or len(line.strip()) == 0: cl.ltype = CONFIG_LINE_BLANK return cl cmt_match = re.match(r'^\s*#.*', line) if cmt_match is not None: cl.ltype = CONFIG_LINE_COMMENT return cl cmd_cmt_match = re.match(r'^\s*;.*', line) cmd_match = re.match(r'^\s*(;)?\s*([a-zA-Z0-9\-_]+)(\s+.+?)?(\s*(#|;).+)??$', line) cmd_pair = re.match(r'^\s*(;)?\s*<([a-zA-Z0-9\-_]+)>(\s+.+?)?</([a-zA-Z0-9\-_]+)>$', line, re.MULTILINE | re.DOTALL) if cmd_pair: cl.ltype = CONFIG_LINE_CMD if cmd_pair.group(1) is None else CONFIG_LINE_CMD_COMMENT open_tag = cmd_pair.group(2) data_tag = cmd_pair.group(3) close_tag = cmd_pair.group(4) if open_tag != close_tag: raise ValueError('Open tag does not equal close tag') cl.cmd = open_tag cl.params = data_tag.strip() cl.paired = True return cl if cmd_match is None and cmd_cmt_match is None: logger.debug('VPN unrecognized config line: %s' % line) cl.ltype = CONFIG_LINE_COMMENT return cl if cmd_match is None and cmd_cmt_match is not None: cl.ltype = CONFIG_LINE_COMMENT return cl cl.ltype = CONFIG_LINE_CMD if cmd_match.group(1) is None else CONFIG_LINE_CMD_COMMENT cl.cmd = util.strip(cmd_match.group(2)) cl.params = util.strip(cmd_match.group(3)) cl.comment = util.strip(cmd_match.group(4)) return cl class OpenVpnConfig(object): """ Parses OpenVPN configuration, allows to modify the configuration and save changes back to the file. """ def __init__(self, config_path=None, static_config=None, audit=None, newline='\n', *args, **kwargs): self.config_path = config_path self.static_config = static_config self.config_data = None self.config_modified = False self.audit = audit self.newline = newline def load(self): """ Loads the config file :return: """ self.config_data = self.load_config_file_lines() def load_config_file_lines(self): """ Loads config file to a string :return: array of ConfigLine or None if file does not exist """ config = [] lines = [] cpath = self.config_path if cpath is None or not os.path.exists(cpath): bare = self.static_config lines = [x.strip() for x in bare.split('\n')] else: with open(cpath, 'r') as fh: for line in fh: lines.append(line.strip()) self.audit.audit_file_read(cpath) # Parsing config file line by line, with support for paired config tags, e.g., <ca>ABF...</ca> # Paired tags can be multiline, in that case we consider it as one string, with \n characters. paired_tag = None paired_buff = [] for idx, line in enumerate(lines): cline = line.strip() # Opened paired tag. Either find a closing tag or add line to the buffer and continue with reading. if paired_tag is not None: end_tag = '</%s>' % paired_tag paired_buff.append(line) if end_tag in cline and not cline.endswith(end_tag): raise ValueError('Parse error, closing tag is on the same line, but not the last element') elif end_tag in cline: ln = ConfigLine.build(line=self.newline.join(paired_buff), idx=idx) ln.newline = self.newline config.append(ln) paired_tag = None continue # Check for opening tag pair_match = re.match(r'^\s*(;)?\s*<([a-zA-Z0-9\-_]+)>(\s+.+?)?$', cline) if pair_match is not None: if paired_tag is not None: raise ValueError('Parse error, unclosed previously opened tag: %s' % paired_tag) paired_buff = [line] paired_tag = pair_match.group(2) end_tag = '</%s>' % paired_tag tail = pair_match.group(3) if tail is not None and end_tag in tail and not tail.endswith(end_tag): raise ValueError('Parse error, closing tag is on the same line, but not the last element') if tail is not None and end_tag in tail: ln = ConfigLine.build(line=line, idx=idx) config.append(ln) paired_tag = None continue if paired_tag is not None: continue # Normal one-line directive ln = ConfigLine.build(line=line, idx=idx) config.append(ln) if paired_tag is not None: raise ValueError('Parsing error, unclosed paired tag %s' % paired_tag) return config def set_config_value(self, cmd, values=None, remove=False, under_directive=None): """ Sets command to the specified value in the configuration file. Loads file from the disk if server_config_data is None (file was not yet loaded). Supports also multicommands - one command with more values. Modifies self.config_data, self.config_modified :param cmd: :param values: single value or array of values for multi-commands (e.g., push). None & remove -> remove all commands. Otherwise just commands with the given values are removed. :param remove: if True, configuration command is removed :param under_directive: if specified, command is placed under specified directive, if exists :return: True if file was modified """ # If file is not loaded - load if self.config_data is None: self.config_data = self.load_config_file_lines() # default position - end of the config file last_cmd_idx = len(self.config_data) - 1 file_changed = False single_directive = False # no parameter given if values is None: single_directive = True values = [None] if not isinstance(values, types.ListType): values = [values] values_set = [False] * len(values) for idx, cfg in enumerate(self.config_data): if cfg.ltype not in [CONFIG_LINE_CMD, CONFIG_LINE_CMD_COMMENT]: continue if under_directive is not None and util.equals_any(cfg.cmd, under_directive): last_cmd_idx = idx if cfg.cmd != cmd: continue # Only commands of interest here last_cmd_idx = idx is_desired_value = cfg.params in values is_desired_value |= remove and (util.is_empty(values) or single_directive) is_desired_value |= not remove and (util.is_empty(values) or single_directive) and util.is_empty(cfg.params) value_idx = values.index(cfg.params) if not remove and cfg.params in values else None if is_desired_value: if cfg.ltype == CONFIG_LINE_CMD and not remove: # Command is already set to the same value. File not modified. # Cannot quit yet, has to comment out other values if value_idx is not None: if not values_set[value_idx]: values_set[value_idx] = True else: cfg.ltype = CONFIG_LINE_CMD_COMMENT file_changed = True pass elif cfg.ltype == CONFIG_LINE_CMD: # Remove command - comment out cfg.ltype = CONFIG_LINE_CMD_COMMENT file_changed = True elif cfg.ltype == CONFIG_LINE_CMD_COMMENT and remove: # Remove && comment - leave as it is # Cannot quit yet, has to comment out other values pass else: # CONFIG_LINE_CMD_COMMENT and not remove. # Just change the type to active value - switch from comment to command # Cannot quit yet, has to comment out other values do_change = True if value_idx is not None: if not values_set[value_idx]: values_set[value_idx] = True else: do_change = False if do_change: cfg.ltype = CONFIG_LINE_CMD file_changed = True elif cfg.ltype == CONFIG_LINE_CMD and not remove: # Same command, but different value - comment this out # If remove is True, only desired values were removed. cfg.ltype = CONFIG_LINE_CMD_COMMENT file_changed = True if remove: self.config_modified |= file_changed return file_changed # Add those commands not set in the cycle above ctr = 0 for idx, cval in enumerate(values): if values_set[idx]: continue cl = ConfigLine(idx=None, raw=None, ltype=CONFIG_LINE_CMD, cmd=cmd, params=cval) self.config_data.insert(last_cmd_idx + 1 + ctr, cl) ctr += 1 file_changed = True self.config_modified |= file_changed return file_changed def dump(self): """ Dumps config to the string :return: """ data = [] for cl in self.config_data: data.append(cl.raw) return self.newline.join(data) def update_config_file(self, force=False): """ Updates server configuration file. Resets server_config_modified after the file update was flushed to the disk :return: True if file was modified """ if not force and not self.config_modified: return False fh, backup = util.safe_create_with_backup(self.config_path, mode='w', chmod=0o644, backup_suffix='.backup') with fh: for cl in self.config_data: fh.write(cl.raw + self.newline) self.audit.audit_file_write(self.config_path) self.config_modified = False # reset after flush return True class OpenVpn(object): """ OpenVPN server configuration & management """ SETTINGS_DIR = '/etc/openvpn' SETTINGS_FILE = 'server.conf' PORT_NUM = 1194 PORT_TCP = True def __init__(self, sysconfig=None, audit=None, write_dots=False, client_config_path=None, *args, **kwargs): self.sysconfig = sysconfig self.write_dost = write_dots self.audit = audit self.doing_reinstall = False # Result of load_config_file_lines self.config = None self.server_config = None self.client_config = None self.client_config_windows = None self.client_config_path = client_config_path self.client_config_path_windows = None # # Settings # def get_ip_net(self): """ Network address for the VPN server :return: """ return '10.8.0.0' def get_ip_vpn_server(self): """ Returns IP address of the VPN server for clients on the VPN :return: """ return '10.8.0.1' def get_ip_net_size(self): """ returns network size of the network allocated for OpenVPN :return: """ return 24 def get_ip_mask(self): """ Returns the mask of the network used by OpenVPN :return: """ return util.net_size_to_mask(self.get_ip_net_size()) def get_port(self): """ Returns port to use for OpenVPN :return: (port, tcp) """ return self.PORT_NUM, self.PORT_TCP def get_user(self): """ returns user the VPN server is going to run under :return: """ return 'nobody' def get_group(self): """ returns user the VPN server is going to run under :return: """ return 'nobody' # # server.conf reading & modification # def get_config_dir(self): return self.SETTINGS_DIR def get_config_dir_subfile(self, filename): return os.path.join(self.get_config_dir(), filename) def get_config_file_path(self): """ Returns config file path :return: server config file path """ return os.path.join(self.SETTINGS_DIR, self.SETTINGS_FILE) def load_static_config(self): """ Loads static config from the package :return: """ resource_package = __name__ resource_path = '/'.join(('..', 'consts', 'ovpn-server.conf')) return pkg_resources.resource_string(resource_package, resource_path) def init_server_config(self): """ Initializes server configuration parser :return: """ if self.server_config is None: self.server_config = OpenVpnConfig(config_path=self.get_config_file_path(), static_config=self.load_static_config(), audit=self.audit) def init_client_config(self): """ Client configuration parser :return: """ if self.client_config_path is None: logger.debug('Client configuration path not provided') return if not os.path.exists(self.client_config_path): raise errors.SetupError('Could not find client VPN configuration file: %s' % self.client_config_path) if self.client_config is None: self.client_config = OpenVpnConfig(config_path=self.client_config_path, audit=self.audit) # # Configuration # def load_from_config(self): """ Loads configuration from settings - for already configured VPN server Can load IP addresses and so on. :return: """ pass def generate_dh_group(self, overwrite=True): """ Generates a new Diffie-Hellman group for the server. openssl dhparam -out dh2048.pem 2048 :return: """ size = 2048 # constant for now dh_file = os.path.join(self.SETTINGS_DIR, 'dh%d.pem' % size) if not overwrite and os.path.exists(dh_file): logger.debug('VPN DH file exists, skipping') return 0 cmd = 'sudo openssl dhparam -out \'%s\' %d' % (dh_file, size) return self.sysconfig.exec_shell(cmd, write_dots=self.write_dost) def configure_crl(self, crl_path): """ Configures server with the given CRL file :param crl_path: :return: True if file was changed """ self.init_server_config() self.server_config.set_config_value('crl-verify', crl_path, remove=crl_path is None, under_directive='key') return self.server_config.update_config_file() def configure_server(self): """ Perform base server configuration. :return: True if file was changed """ port, tcp = self.get_port() self.init_server_config() self.server_config.set_config_value('port', '%s' % port) self.server_config.set_config_value('proto', 'udp' if not tcp else 'tcp') self.server_config.set_config_value('server', '%s %s' % (self.get_ip_net(), self.get_ip_mask())) self.server_config.set_config_value('dh', 'dh2048.pem') self.server_config.set_config_value('ca', 'ca.crt') self.server_config.set_config_value('cert', 'server.crt') self.server_config.set_config_value('key', 'server.key') self.server_config.set_config_value('status', 'openvpn-status.log 10') self.server_config.set_config_value('client-to-client') self.server_config.set_config_value('persist-tun') # needed to chroot the process self.server_config.set_config_value('comp-lzo', remove=True) self.server_config.set_config_value('keepalive', '2 20') self.server_config.set_config_value('topology', 'subnet') self.server_config.set_config_value('sndbuf', '0') self.server_config.set_config_value('rcvbuf', '0') # Protocol dependent if tcp: self.server_config.set_config_value('replay-window', remove=True) else: self.server_config.set_config_value('replay-window', '2048') self.server_config.set_config_value('cipher', 'AES-256-CBC') self.server_config.set_config_value('auth', 'SHA256') # This can be enabled after certificates are generated with exact usage. # self.server_config.set_config_value('remote-cert-tls', 'server') self.server_config.set_config_value('user', self.get_user()) self.server_config.set_config_value('group', self.get_group()) # Use internal DNS to prevent DNS leaks push_values = ['"dhcp-option DNS %s"' % self.get_ip_vpn_server(), '"redirect-gateway def1 bypass-dhcp"', '"sndbuf 393216"', '"rcvbuf 393216"', '"route 172.16.0.0 255.240.0.0 net_gateway"', '"route 192.168.0.0 255.255.0.0 net_gateway"', '"route 10.0.0.0 255.0.0.0 net_gateway"', # '"route 0.0.0.0 0.0.0.0"', # '"route-metric 512"' ] self.server_config.set_config_value('push', push_values) # Store VPN config to config self.config.vpn_server_addr = self.get_ip_vpn_server() self.config.vpn_net_addr = self.get_ip_net() self.config.vpn_net_size = self.get_ip_net_size() return self.server_config.update_config_file() def configure_server_scripts(self, connect=None, disconnect=None, up=None, down=None): """ Installs scripts to the VPN server :param connect: :param disconnect: :param up: :param down: :return: """ script_names = ['client-connect', 'client-disconnect', 'up', 'down'] script_values = [connect, disconnect, up, down] for idx, script_value in enumerate(script_values): script_name = script_names[idx] if script_value is None: self.server_config.set_config_value(script_name, remove=True) else: self.server_config.set_config_value(script_name, '"%s"' % script_value) return self.server_config.update_config_file() def _configure_client_win(self): """ Configures windows client :return: """ basename = os.path.basename(self.client_config_path) filename, file_extension = os.path.splitext(basename) self.client_config_path_windows = os.path.join(self.client_config_path.rsplit('/', 1)[0], '%s_windows%s' % (filename, file_extension)) shutil.copy(self.client_config_path, self.client_config_path_windows) self.client_config_windows = OpenVpnConfig(config_path=self.client_config_path_windows, audit=self.audit, newline='\r\n') self.client_config_windows.set_config_value('route', '0.0.0.0 0.0.0.0 vpn_gateway 999') self.client_config_windows.set_config_value('block-outside-dns') self.client_config_windows.update_config_file() def configure_client(self): """ Configures client VPN file :return: """ self.init_client_config() if self.client_config is None: logger.debug('Could not configure client - no config object') return port, tcp = self.get_port() self.client_config.set_config_value('proto', 'udp' if not tcp else 'tcp') self.client_config.set_config_value('cipher', 'AES-256-CBC') self.client_config.set_config_value('auth', 'SHA256') self.client_config.set_config_value('persist-tun', remove=True) self.client_config.set_config_value('keepalive', '2 20') self.client_config.set_config_value('comp-lzo', remove=True) self.client_config.set_config_value('block-outside-dns', remove=True) self.client_config.set_config_value('server-poll-timeout', '2') self.client_config.set_config_value('tran-window', '604800') # Protocol dependent if tcp: self.client_config.set_config_value('replay-window', remove=True) else: self.client_config.set_config_value('replay-window', '2048') ret = self.client_config.update_config_file() self._configure_client_win() return ret def store_server_cert(self, ca, cert, key): """ Stores CA, Cert, Key to the storage and fixes permissions :return: """ shutil.copy(ca, self.get_config_dir_subfile('ca.crt')) shutil.copy(cert, self.get_config_dir_subfile('server.crt')) # Key is tricky - do not expose the raw key key_file = self.get_config_dir_subfile('server.key') if os.path.exists(key_file): os.remove(key_file) # just UX remove, not security sensitive # Create file with correct permissions set fh = util.safe_open(key_file, 'w', chmod=0o600) fh.close() ret = self.sysconfig.exec_shell('sudo chown root:root \'%s\'' % key_file, shell=True, write_dots=self.write_dost) if ret != 0: return ret cmd_exec = 'sudo cat \'%s\' >> \'%s\'' % (key, key_file) return self.sysconfig.exec_shell(cmd_exec, write_dots=self.write_dost) # # Installation # def install(self): """ Installs itself :return: installer return code """ cmd_exec = 'sudo yum install -y openvpn' if self.sysconfig.get_packager() == osutil.PKG_APT: cmd_exec = 'sudo apt-get install -y openvpn' return self.sysconfig.exec_shell(cmd_exec, write_dots=self.write_dost) def get_svc_map(self): """ Returns service naming for different start systems :return: """ return { osutil.START_SYSTEMD: 'openvpn.service', osutil.START_INITD: 'openvpn' } def enable(self): """ Enables service after OS start :return: """ return self.sysconfig.enable_svc(self.get_svc_map()) def switch(self, start=None, stop=None, restart=None): """ Starts/stops/restarts the service :param start: :param stop: :param restart: :return: """ return self.sysconfig.switch_svc(self.get_svc_map(), start=start, stop=stop, restart=restart) def setup_os(self): """ Configures OS Enables packet forwarding, sets up the masquerade :return: """ # Enable packet forwarding ret = self.sysconfig.packet_forwarding() if ret != 0: return ret # Set the masquerade ret = self.sysconfig.masquerade(self.get_ip_net(), self.get_ip_net_size()) if ret != 0: return ret # Allow port on the firewall port, tcp = self.get_port() ret = self.sysconfig.allow_port(port=port, tcp=tcp) return ret
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt # util __init__.py from __future__ import unicode_literals, print_function from werkzeug.test import Client import os, re, sys, json, hashlib, requests, traceback from markdown2 import markdown as _markdown from .html_utils import sanitize_html import frappe from frappe.utils.identicon import Identicon from email.utils import parseaddr, formataddr # utility functions like cint, int, flt, etc. from frappe.utils.data import * from six.moves.urllib.parse import quote from six import text_type, string_types default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by', 'parent', 'parentfield', 'parenttype', 'idx', 'docstatus'] # used in import_docs.py # TODO: deprecate it def getCSVelement(v): """ Returns the CSV value of `v`, For example: * apple becomes "apple" * hi"there becomes "hi""there" """ v = cstr(v) if not v: return '' if (',' in v) or ('\n' in v) or ('"' in v): if '"' in v: v = v.replace('"', '""') return '"'+v+'"' else: return v or '' def get_fullname(user=None): """get the full name (first name + last name) of the user from User""" if not user: user = frappe.session.user if not hasattr(frappe.local, "fullnames"): frappe.local.fullnames = {} if not frappe.local.fullnames.get(user): p = frappe.db.get_value("User", user, ["first_name", "last_name"], as_dict=True) if p: frappe.local.fullnames[user] = " ".join(filter(None, [p.get('first_name'), p.get('last_name')])) or user else: frappe.local.fullnames[user] = user return frappe.local.fullnames.get(user) def get_formatted_email(user): """get Email Address of user formatted as: `John Doe <[email protected]>`""" if user == "Administrator": return user fullname = get_fullname(user) return formataddr((fullname, user)) def extract_email_id(email): """fetch only the email part of the Email Address""" email_id = parse_addr(email)[1] if email_id and isinstance(email_id, string_types) and not isinstance(email_id, text_type): email_id = email_id.decode("utf-8", "ignore") return email_id def validate_email_add(email_str, throw=False): """Validates the email string""" email = email_str = (email_str or "").strip() def _check(e): _valid = True if not e: _valid = False if 'undisclosed-recipient' in e: return False elif " " in e and "<" not in e: # example: "[email protected] [email protected]" will return "[email protected]" after parseaddr!!! _valid = False else: e = extract_email_id(e) match = re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", e.lower()) if e else None if not match: _valid = False else: matched = match.group(0) if match: match = matched==e.lower() if not _valid: if throw: frappe.throw(frappe._("{0} is not a valid Email Address").format(e), frappe.InvalidEmailAddressError) return None else: return matched out = [] for e in email_str.split(','): email = _check(e.strip()) if email: out.append(email) return ', '.join(out) def split_emails(txt): email_list = [] # emails can be separated by comma or newline s = re.sub(r'[\t\n\r]', ' ', cstr(txt)) for email in re.split('''[,\\n](?=(?:[^"]|"[^"]*")*$)''', s): email = strip(cstr(email)) if email: email_list.append(email) return email_list def random_string(length): """generate a random string""" import string from random import choice return ''.join([choice(string.ascii_letters + string.digits) for i in range(length)]) def has_gravatar(email): '''Returns gravatar url if user has set an avatar at gravatar.com''' if (frappe.flags.in_import or frappe.flags.in_install or frappe.flags.in_test): # no gravatar if via upload # since querying gravatar for every item will be slow return '' hexdigest = hashlib.md5(frappe.as_unicode(email).encode('utf-8')).hexdigest() gravatar_url = "https://secure.gravatar.com/avatar/{hash}?d=404&s=200".format(hash=hexdigest) try: res = requests.get(gravatar_url) if res.status_code==200: return gravatar_url else: return '' except requests.exceptions.ConnectionError: return '' def get_gravatar_url(email): return "https://secure.gravatar.com/avatar/{hash}?d=mm&s=200".format(hash=hashlib.md5(email.encode('utf-8')).hexdigest()) def get_gravatar(email): gravatar_url = has_gravatar(email) if not gravatar_url: gravatar_url = Identicon(email).base64() return gravatar_url def get_traceback(): """ Returns the traceback of the Exception """ exc_type, exc_value, exc_tb = sys.exc_info() trace_list = traceback.format_exception(exc_type, exc_value, exc_tb) body = "".join(cstr(t) for t in trace_list) return body def log(event, details): frappe.logger().info(details) def dict_to_str(args, sep='&'): """ Converts a dictionary to URL """ t = [] for k in list(args): t.append(str(k)+'='+quote(str(args[k] or ''))) return sep.join(t) # Get Defaults # ============================================================================== def get_defaults(key=None): """ Get dictionary of default values from the defaults, or a value if key is passed """ return frappe.db.get_defaults(key) def set_default(key, val): """ Set / add a default value to defaults` """ return frappe.db.set_default(key, val) def remove_blanks(d): """ Returns d with empty ('' or None) values stripped """ empty_keys = [] for key in d: if d[key]=='' or d[key]==None: # del d[key] raises runtime exception, using a workaround empty_keys.append(key) for key in empty_keys: del d[key] return d def strip_html_tags(text): """Remove html tags from text""" return re.sub("\<[^>]*\>", "", text) def get_file_timestamp(fn): """ Returns timestamp of the given file """ from frappe.utils import cint try: return str(cint(os.stat(fn).st_mtime)) except OSError as e: if e.args[0]!=2: raise else: return None # to be deprecated def make_esc(esc_chars): """ Function generator for Escaping special characters """ return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s]) # esc / unescape characters -- used for command line def esc(s, esc_chars): """ Escape special characters """ if not s: return "" for c in esc_chars: esc_str = '\\' + c s = s.replace(c, esc_str) return s def unesc(s, esc_chars): """ UnEscape special characters """ for c in esc_chars: esc_str = '\\' + c s = s.replace(esc_str, c) return s def execute_in_shell(cmd, verbose=0): # using Popen instead of os.system - as recommended by python docs from subprocess import Popen import tempfile with tempfile.TemporaryFile() as stdout: with tempfile.TemporaryFile() as stderr: p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr) p.wait() stdout.seek(0) out = stdout.read() stderr.seek(0) err = stderr.read() if verbose: if err: print(err) if out: print(out) return err, out def get_path(*path, **kwargs): base = kwargs.get('base') if not base: base = frappe.local.site_path return os.path.join(base, *path) def get_site_base_path(sites_dir=None, hostname=None): return frappe.local.site_path def get_site_path(*path): return get_path(base=get_site_base_path(), *path) def get_files_path(*path, **kwargs): return get_site_path("private" if kwargs.get("is_private") else "public", "files", *path) def get_bench_path(): return os.path.realpath(os.path.join(os.path.dirname(frappe.__file__), '..', '..', '..')) def get_backups_path(): return get_site_path("private", "backups") def get_request_site_address(full_address=False): return get_url(full_address=full_address) def encode_dict(d, encoding="utf-8"): for key in d: if isinstance(d[key], string_types) and isinstance(d[key], text_type): d[key] = d[key].encode(encoding) return d def decode_dict(d, encoding="utf-8"): for key in d: if isinstance(d[key], string_types) and not isinstance(d[key], text_type): d[key] = d[key].decode(encoding, "ignore") return d def get_site_name(hostname): return hostname.split(':')[0] def get_disk_usage(): """get disk usage of files folder""" files_path = get_files_path() if not os.path.exists(files_path): return 0 err, out = execute_in_shell("du -hsm {files_path}".format(files_path=files_path)) return cint(out.split("\n")[-2].split("\t")[0]) def touch_file(path): with open(path, 'a'): os.utime(path, None) return path def get_test_client(): from frappe.app import application return Client(application) def get_hook_method(hook_name, fallback=None): method = (frappe.get_hooks().get(hook_name)) if method: method = frappe.get_attr(method[0]) return method if fallback: return fallback def call_hook_method(hook, *args, **kwargs): out = None for method_name in frappe.get_hooks(hook): out = out or frappe.get_attr(method_name)(*args, **kwargs) return out def update_progress_bar(txt, i, l): if not getattr(frappe.local, 'request', None): lt = len(txt) if lt < 36: txt = txt + " "*(36-lt) complete = int(float(i+1) / l * 40) sys.stdout.write("\r{0}: [{1}{2}]".format(txt, "="*complete, " "*(40-complete))) sys.stdout.flush() def get_html_format(print_path): html_format = None if os.path.exists(print_path): with open(print_path, "r") as f: html_format = f.read() for include_directive, path in re.findall("""({% include ['"]([^'"]*)['"] %})""", html_format): for app_name in frappe.get_installed_apps(): include_path = frappe.get_app_path(app_name, *path.split(os.path.sep)) if os.path.exists(include_path): with open(include_path, "r") as f: html_format = html_format.replace(include_directive, f.read()) break return html_format def is_markdown(text): if "<!-- markdown -->" in text: return True elif "<!-- html -->" in text: return False else: return not re.search("<p[\s]*>|<br[\s]*>", text) def get_sites(sites_path=None): if not sites_path: sites_path = getattr(frappe.local, 'sites_path', None) or '.' sites = [] for site in os.listdir(sites_path): path = os.path.join(sites_path, site) if (os.path.isdir(path) and not os.path.islink(path) and os.path.exists(os.path.join(path, 'site_config.json'))): # is a dir and has site_config.json sites.append(site) return sorted(sites) def get_request_session(max_retries=3): from urllib3.util import Retry session = requests.Session() session.mount("http://", requests.adapters.HTTPAdapter(max_retries=Retry(total=5, status_forcelist=[500]))) session.mount("https://", requests.adapters.HTTPAdapter(max_retries=Retry(total=5, status_forcelist=[500]))) return session def watch(path, handler=None, debug=True): import time from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class Handler(FileSystemEventHandler): def on_any_event(self, event): if debug: print("File {0}: {1}".format(event.event_type, event.src_path)) if not handler: print("No handler specified") return handler(event.src_path, event.event_type) event_handler = Handler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() def markdown(text, sanitize=True, linkify=True): html = _markdown(text) if sanitize: html = html.replace("<!-- markdown -->", "") html = sanitize_html(html, linkify=linkify) return html def sanitize_email(emails): sanitized = [] for e in split_emails(emails): if not validate_email_add(e): continue full_name, email_id = parse_addr(e) sanitized.append(formataddr((full_name, email_id))) return ", ".join(sanitized) def parse_addr(email_string): """ Return email_id and user_name based on email string Raise error if email string is not valid """ name, email = parseaddr(email_string) if check_format(email): name = get_name_from_email_string(email_string, email, name) return (name, email) else: email_regex = re.compile(r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)") email_list = re.findall(email_regex, email_string) if len(email_list) > 0 and check_format(email_list[0]): #take only first email address email = email_list[0] name = get_name_from_email_string(email_string, email, name) return (name, email) return (None, email) def check_format(email_id): """ Check if email_id is valid. valid email:[email protected] String check ensures that email_id contains both '.' and '@' and index of '@' is less than '.' """ is_valid = False try: pos = email_id.rindex("@") is_valid = pos > 0 and (email_id.rindex(".") > pos) and (len(email_id) - pos > 4) except Exception: #print(e) pass return is_valid def get_name_from_email_string(email_string, email_id, name): name = email_string.replace(email_id, '') name = re.sub('[^A-Za-z0-9\u00C0-\u024F\/\_\' ]+', '', name).strip() if not name: name = email_id return name def get_installed_apps_info(): out = [] for app in frappe.get_installed_apps(): out.append({ 'app_name': app, 'version': getattr(frappe.get_module(app), '__version__', 'Unknown') }) return out def get_site_info(): from frappe.utils.user import get_system_managers from frappe.core.doctype.user.user import STANDARD_USERS from frappe.email.queue import get_emails_sent_this_month # only get system users users = frappe.get_all('User', filters={'user_type': 'System User', 'name': ('not in', STANDARD_USERS)}, fields=['name', 'enabled', 'last_login', 'last_active', 'language', 'time_zone']) system_managers = get_system_managers(only_name=True) for u in users: # tag system managers u.is_system_manager = 1 if u.name in system_managers else 0 u.full_name = get_fullname(u.name) u.email = u.name del u['name'] system_settings = frappe.db.get_singles_dict('System Settings') space_usage = frappe._dict((frappe.local.conf.limits or {}).get('space_usage', {})) site_info = { 'installed_apps': get_installed_apps_info(), 'users': users, 'country': system_settings.country, 'language': system_settings.language or 'english', 'time_zone': system_settings.time_zone, 'setup_complete': cint(system_settings.setup_complete), 'scheduler_enabled': system_settings.enable_scheduler, # usage 'emails_sent': get_emails_sent_this_month(), 'space_used': flt((space_usage.total or 0) / 1024.0, 2), 'database_size': space_usage.database_size, 'backup_size': space_usage.backup_size, 'files_size': space_usage.files_size } # from other apps for method_name in frappe.get_hooks('get_site_info'): site_info.update(frappe.get_attr(method_name)(site_info) or {}) # dumps -> loads to prevent datatype conflicts return json.loads(frappe.as_json(site_info)) def parse_json(val): """ Parses json if string else return """ if isinstance(val, string_types): return json.loads(val) return val def cast_fieldtype(fieldtype, value): if fieldtype in ("Currency", "Float", "Percent"): value = flt(value) elif fieldtype in ("Int", "Check"): value = cint(value) elif fieldtype in ("Data", "Text", "Small Text", "Long Text", "Text Editor", "Select", "Link", "Dynamic Link"): value = cstr(value) elif fieldtype == "Date": value = getdate(value) elif fieldtype == "Datetime": value = get_datetime(value) elif fieldtype == "Time": value = to_timedelta(value) return value
# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2014 Jeff Applewhite. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver library for NetApp C-mode block storage systems. """ from oslo_log import log as logging from oslo_utils import units import six from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import utils from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities from cinder.volume.drivers.netapp.dataontap.utils import data_motion from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) @six.add_metaclass(utils.TraceWrapperMetaclass) class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary, data_motion.DataMotionMixin): """NetApp block storage library for Data ONTAP (Cluster-mode).""" REQUIRED_CMODE_FLAGS = ['netapp_vserver'] def __init__(self, driver_name, driver_protocol, **kwargs): super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name, driver_protocol, **kwargs) self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.driver_mode = 'cluster' self.failed_over_backend_name = kwargs.get('active_backend_id') self.failed_over = self.failed_over_backend_name is not None self.replication_enabled = ( True if self.get_replication_backend_names( self.configuration) else False) def do_setup(self, context): super(NetAppBlockStorageCmodeLibrary, self).do_setup(context) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) # cDOT API client self.zapi_client = cmode_utils.get_client_for_backend( self.failed_over_backend_name or self.backend_name) self.vserver = self.zapi_client.vserver # Performance monitoring library self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) # Storage service catalog self.ssc_library = capabilities.CapabilitiesLibrary( self.driver_protocol, self.vserver, self.zapi_client, self.configuration) def _update_zapi_client(self, backend_name): """Set cDOT API client for the specified config backend stanza name.""" self.zapi_client = cmode_utils.get_client_for_backend(backend_name) self.vserver = self.zapi_client.vserver self.ssc_library._update_for_failover(self.zapi_client, self._get_flexvol_to_pool_map()) ssc = self.ssc_library.get_ssc() self.perf_library._update_for_failover(self.zapi_client, ssc) # Clear LUN table cache self.lun_table = {} def check_for_setup_error(self): """Check that the driver is working and can communicate.""" self.ssc_library.check_api_permissions() if not self._get_flexvol_to_pool_map(): msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise exception.NetAppDriverException(msg) self._add_looping_tasks() super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error() def _add_looping_tasks(self): """Add tasks that need to be executed at a fixed interval.""" # Note(cknight): Run the update once in the current thread to prevent a # race with the first invocation of _update_volume_stats. self._update_ssc() # Add the task that updates the slow-changing storage service catalog self.loopingcalls.add_task(self._update_ssc, loopingcalls.ONE_HOUR, loopingcalls.ONE_HOUR) # Add the task that harvests soft-deleted QoS policy groups. self.loopingcalls.add_task( self.zapi_client.remove_unused_qos_policy_groups, loopingcalls.ONE_MINUTE, loopingcalls.ONE_MINUTE) self.loopingcalls.add_task( self._handle_housekeeping_tasks, loopingcalls.TEN_MINUTES, 0) super(NetAppBlockStorageCmodeLibrary, self)._add_looping_tasks() def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" # Harvest soft-deleted QoS policy groups self.zapi_client.remove_unused_qos_policy_groups() active_backend = self.failed_over_backend_name or self.backend_name LOG.debug("Current service state: Replication enabled: %(" "replication)s. Failed-Over: %(failed)s. Active Backend " "ID: %(active)s", { 'replication': self.replication_enabled, 'failed': self.failed_over, 'active': active_backend, }) # Create pool mirrors if whole-backend replication configured if self.replication_enabled and not self.failed_over: self.ensure_snapmirrors( self.configuration, self.backend_name, self.ssc_library.get_ssc_flexvol_names()) def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Creates a LUN, handling Data ONTAP differences as needed.""" self.zapi_client.create_lun( volume_name, lun_name, size, metadata, qos_policy_group_name) def _create_lun_handle(self, metadata, vserver=None): """Returns LUN handle based on filer type.""" vserver = vserver or self.vserver return '%s:%s' % (self.vserver, metadata['Path']) def _find_mapped_lun_igroup(self, path, initiator_list): """Find an igroup for a LUN mapped to the given initiator(s).""" initiator_igroups = self.zapi_client.get_igroup_by_initiators( initiator_list) lun_maps = self.zapi_client.get_lun_map(path) if initiator_igroups and lun_maps: for igroup in initiator_igroups: igroup_name = igroup['initiator-group-name'] if igroup_name.startswith(na_utils.OPENSTACK_PREFIX): for lun_map in lun_maps: if lun_map['initiator-group'] == igroup_name: return igroup_name, lun_map['lun-id'] return None, None def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None, is_snapshot=False): """Clone LUN with the given handle to the new name.""" if not space_reserved: space_reserved = self.lun_space_reservation metadata = self._get_lun_attr(name, 'metadata') volume = metadata['Volume'] self.zapi_client.clone_lun(volume, name, new_name, space_reserved, qos_policy_group_name=qos_policy_group_name, src_block=src_block, dest_block=dest_block, block_count=block_count, source_snapshot=source_snapshot, is_snapshot=is_snapshot) LOG.debug("Cloned LUN with new name %s", new_name) lun = self.zapi_client.get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s' % (volume, new_name)) if len(lun) == 0: msg = _("No cloned LUN named %s found on the filer") raise exception.VolumeBackendAPIException(data=msg % new_name) clone_meta = self._create_lun_meta(lun[0]) self._add_lun_to_table( block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'], clone_meta['Path']), new_name, lun[0].get_child_content('size'), clone_meta)) def _create_lun_meta(self, lun): """Creates LUN metadata dictionary.""" self.zapi_client.check_is_naelement(lun) meta_dict = {} meta_dict['Vserver'] = lun.get_child_content('vserver') meta_dict['Volume'] = lun.get_child_content('volume') meta_dict['Qtree'] = lun.get_child_content('qtree') meta_dict['Path'] = lun.get_child_content('path') meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') meta_dict['SpaceReserved'] = \ lun.get_child_content('is-space-reservation-enabled') meta_dict['UUID'] = lun.get_child_content('uuid') return meta_dict def _get_fc_target_wwpns(self, include_partner=True): return self.zapi_client.get_fc_target_wwpns() def _update_volume_stats(self, filter_function=None, goodness_function=None): """Retrieve backend stats.""" LOG.debug('Updating volume stats') data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.driver_name data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = self.driver_protocol data['pools'] = self._get_pool_stats( filter_function=filter_function, goodness_function=goodness_function) data['sparse_copy_volume'] = True # Used for service state report data['replication_enabled'] = self.replication_enabled self.zapi_client.provide_ems(self, self.driver_name, self.app_version) self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (Data ONTAP flexvol) stats. Pool statistics are assembled from static driver capabilities, the Storage Service Catalog of flexvol attributes, and real-time capacity and controller utilization metrics. The pool name is the flexvol name. """ pools = [] ssc = self.ssc_library.get_ssc() if not ssc: return pools # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) # Get up-to-date aggregate capacities just once aggregates = self.ssc_library.get_ssc_aggregates() aggr_capacities = self.zapi_client.get_aggregate_capacities(aggregates) for ssc_vol_name, ssc_vol_info in ssc.items(): pool = dict() # Add storage service catalog data pool.update(ssc_vol_info) # Add driver capabilities and config info pool['QoS_support'] = True pool['multiattach'] = True pool['consistencygroup_support'] = True pool['reserved_percentage'] = self.reserved_percentage pool['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) # Add up-to-date capacity info capacity = self.zapi_client.get_flexvol_capacity( flexvol_name=ssc_vol_name) size_total_gb = capacity['size-total'] / units.Gi pool['total_capacity_gb'] = na_utils.round_down(size_total_gb) size_available_gb = capacity['size-available'] / units.Gi pool['free_capacity_gb'] = na_utils.round_down(size_available_gb) pool['provisioned_capacity_gb'] = round( pool['total_capacity_gb'] - pool['free_capacity_gb'], 2) aggregate_name = ssc_vol_info.get('netapp_aggregate') aggr_capacity = aggr_capacities.get(aggregate_name, {}) pool['netapp_aggregate_used_percent'] = aggr_capacity.get( 'percent-used', 0) # Add utilization data utilization = self.perf_library.get_node_utilization_for_pool( ssc_vol_name) pool['utilization'] = na_utils.round_down(utilization) pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function # Add replication capabilities/stats pool.update( self.get_replication_backend_stats(self.configuration)) pools.append(pool) return pools def _update_ssc(self): """Refresh the storage service catalog with the latest set of pools.""" self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) def _get_flexvol_to_pool_map(self): """Get the flexvols that match the pool name search pattern. The map is of the format suitable for seeding the storage service catalog: {<flexvol_name> : {'pool_name': <flexvol_name>}} """ pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) pools = {} flexvol_names = self.zapi_client.list_flexvols() for flexvol_name in flexvol_names: msg_args = { 'flexvol': flexvol_name, 'vol_pattern': pool_regex.pattern, } if pool_regex.match(flexvol_name): msg = "Volume '%(flexvol)s' matches %(vol_pattern)s" LOG.debug(msg, msg_args) pools[flexvol_name] = {'pool_name': flexvol_name} else: msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s" LOG.debug(msg, msg_args) return pools def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume) try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) except exception.Invalid: # Delete even if there was invalid qos policy specified for the # volume. qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s' LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info}) def _get_preferred_target_from_list(self, target_details_list, filter=None): # cDOT iSCSI LIFs do not migrate from controller to controller # in failover. Rather, an iSCSI LIF must be configured on each # controller and the initiator has to take responsibility for # using a LIF that is UP. In failover, the iSCSI LIF on the # downed controller goes DOWN until the controller comes back up. # # Currently Nova only accepts a single target when obtaining # target details from Cinder, so we pass back the first portal # with an UP iSCSI LIF. There are plans to have Nova accept # and try multiple targets. When that happens, we can and should # remove this filter and return all targets since their operational # state could change between the time we test here and the time # Nova uses the target. operational_addresses = ( self.zapi_client.get_operational_lif_addresses()) return (super(NetAppBlockStorageCmodeLibrary, self) ._get_preferred_target_from_list(target_details_list, filter=operational_addresses)) def _setup_qos_for_volume(self, volume, extra_specs): try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume, extra_specs) except exception.Invalid: msg = _('Invalid QoS specification detected while getting QoS ' 'policy for volume %s') % volume['id'] raise exception.VolumeBackendAPIException(data=msg) self.zapi_client.provision_qos_policy_group(qos_policy_group_info) return qos_policy_group_info def _get_volume_model_update(self, volume): """Provide any updates necessary for a volume being created/managed.""" if self.replication_enabled: return {'replication_status': fields.ReplicationStatus.ENABLED} def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info): self.zapi_client.mark_qos_policy_group_for_deletion( qos_policy_group_info) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. """ try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) except exception.Invalid: # Unmanage even if there was invalid qos policy specified for the # volume. qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume) def failover_host(self, context, volumes, secondary_id=None): """Failover a backend to a secondary replication target.""" return self._failover_host(volumes, secondary_id=secondary_id) def _get_backing_flexvol_names(self): """Returns a list of backing flexvol names.""" return self.ssc_library.get_ssc().keys()
#app = contacts_and_people from django.db import models from django.db.utils import DatabaseError from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User from django.template.defaultfilters import slugify from django.utils.functional import cached_property from django.conf import settings from django.core.urlresolvers import reverse from cms.models import Page, CMSPlugin from cms.models.fields import PlaceholderField from mptt.models import MPTTModel, TreeForeignKey from mptt.managers import TreeManager from filer.fields.image import FilerImageField from arkestra_utilities.mixins import URLModelMixin from arkestra_utilities.text import concatenate from arkestra_utilities.settings import ( MULTIPLE_ENTITY_MODE, ARKESTRA_BASE_ENTITY, DEFAULT_NEWS_PAGE_TITLE, DEFAULT_CONTACTS_PAGE_TITLE, DEFAULT_VACANCIES_PAGE_TITLE, DEFAULT_PUBLICATIONS_PAGE_TITLE ) import news_and_events class Site(models.Model): """Maintains a list of an institution's geographical sites""" site_name = models.CharField(max_length=50, unique=True) post_town = models.CharField(max_length=50) country = models.CharField(max_length=50) description = models.TextField(max_length=500, null=True, blank=True) class Meta: ordering = ('country', 'site_name', 'post_town') def __unicode__(self): return self.site_name def buildings(self): return self.place.all().count() @property def maps(self): return [ building for building in self.place.all() if building.has_map() ] class BuildingManager(models.Manager): def get_by_natural_key(self, slug): return self.get(slug=slug) class Building(models.Model): # the Building model should really be named Place objects = BuildingManager() name = models.CharField(max_length=100, null=True, blank=True) number = models.CharField(max_length=10, blank=True) street = models.CharField( "Street name", max_length=100, blank=True ) additional_street_address = models.CharField( help_text=u"If required", max_length=100, null=True, blank=True) postcode = models.CharField(max_length=9, null=True, blank=True) site = models.ForeignKey( Site, on_delete=models.PROTECT, related_name="place" ) slug = models.SlugField( blank=True, help_text=u"Leave blank to regenerate; amend only if required", max_length=255, null=True, unique=True ) image = FilerImageField( on_delete=models.SET_NULL, null=True, blank=True ) # for the place page summary = models.TextField( verbose_name="Summary", max_length=256, default="", help_text="A very short description (maximum two lines)", ) description = PlaceholderField( 'body', related_name="building_description", help_text="A fuller description" ) getting_here = PlaceholderField( 'simple', related_name="getting_here", help_text="How to get here" ) access_and_parking = PlaceholderField( 'simple', related_name="building_access_and_parking", help_text="Where to park, how to get in, etc" ) map = models.BooleanField( "Show map", default=False, help_text="Use Google Maps to <a target='_blank' \ style='text-decoration: underline;' \ href='http://universimmedia.pagesperso-orange.fr/geo/loc.htm'>look up\ Latitude & Longitude</a>") latitude = models.FloatField(null=True, blank=True) longitude = models.FloatField(null=True, blank=True) zoom = models.IntegerField(blank=True, null=True, default=17) class Meta: ordering = ('site', 'street', 'number', 'name',) def __unicode__(self): """ A text-friendly way of referring to a building """ if self.name: return self.name elif self.street: return concatenate([self.number, self.street], " ") else: return self.postcode @property def admin_identifier(self): return u"%s (%s)" % (self.__unicode__(), unicode(self.site)) def get_absolute_url(self): return reverse("contact-place", kwargs={"slug": self.slug}) def save(self): # if the slug is blank, regenerate it if not self.slug: self.slug = slugify(self.__unicode__()) super(Building, self).save() @property def get_postal_address(self): """ Assembles the postal (external) parts of an address """ # print "getting postal address" address = [] if self.name: address.append(self.name) if self.number or self.street: address.append( concatenate( strings=[self.number, self.street], with_string=" " ) ) if self.additional_street_address: address.append(self.additional_street_address) # there will always be a site.post_town; no need to check fragments = concatenate( strings=[self.site.post_town, self.postcode], with_string=" " ) address.append(fragments) return address def has_map(self): return ( self.latitude is not None and self.longitude is not None and self.zoom and self.map ) has_map.boolean = True @cached_property def events(self): # invoke the lister to find out more lister = news_and_events.lister.EventsPlaceLister( place=self, entity=None, display="events", order_by="date", item_format="details image", # request=instance.request ) return lister @property def get_website(self): return None class PhoneContact(models.Model): LABEL_CHOICES = ( ('', '-----'), ('Office', 'Office'), ('Laboratory', 'Laboratory'), ('Mobile', 'Mobile'), ('Fax', 'Fax'), ('Out of hours', 'Out of hours'), ('Pager', 'Pager'), ) label = models.CharField(max_length=64, null=True, blank=True) country_code = models.CharField(max_length=5, default="44") area_code = models.CharField( max_length=5, default="029", help_text="Not 02920" ) number = models.CharField(max_length=12) internal_extension = models.CharField(max_length=6, null=True, blank=True) content_type = models.ForeignKey(ContentType) object_id = models.IntegerField(db_index=True) content_object = generic.GenericForeignKey() class Meta: ordering = ('label',) def __unicode__(self): return u"%s: %s" % (self.label, self.number) class CommonFields(URLModelMixin): precise_location = models.CharField( help_text=u"Precise location <em>within</em> the building, \ for visitors", max_length=255, null=True, blank=True ) access_note = models.CharField( help_text=u"Notes on access/visiting hours/etc", max_length=255, null=True, blank=True ) email = models.EmailField( verbose_name="Email address", null=True, blank=True ) phone_contacts = generic.GenericRelation(PhoneContact) image = FilerImageField(on_delete=models.SET_NULL, null=True, blank=True) class Meta: abstract = True class EntityLite(models.Model): name = models.CharField( max_length=100, help_text="e.g. Department of Haematology" ) def __unicode__(self): return unicode(self.name) class EntityManager(TreeManager): def get_by_natural_key(self, slug): return self.get(slug=slug) def base_entity(self): try: # are Entities available at all? list(Entity.objects.all()) # print "** Entity objects are available from the database" except: # no - the database isn't ready # print "** Entity objects are not available from the database" pass else: # we managed to get Entity.objects.all() # we don't use default_entity (or default_entity_id) in # MULTIPLE_ENTITY_MODE try: # print "trying to match", ARKESTRA_BASE_ENTITY entity = self.model.objects.get(id=ARKESTRA_BASE_ENTITY) # it can't be found, maybe because of a misconfiguation or because # we haven't added any Entities yet except (Entity.DoesNotExist, DatabaseError): # print "** Either the Entity does not exist, or I got a # DatabaseError:" # print "**", e pass else: # print "** I successfully found a default entity:", entity return entity # only used in single-entity mode def default_entity_id(self): if self.base_entity and not MULTIPLE_ENTITY_MODE: return ARKESTRA_BASE_ENTITY class Entity(MPTTModel, EntityLite, CommonFields): objects = EntityManager() # URLModelMixin's get_absolute_url() requires a view_name view_name = "contact-entity" short_name = models.CharField( blank=True, help_text="e.g. Haematology", max_length=100, null=True, verbose_name="Short name for menus" ) abstract_entity = models.BooleanField( "abstract", default=False, help_text=u"Select if this <em>group</em> of entities, but not an \ entity itself, or if it's just a grouping of people",) parent = TreeForeignKey( 'self', null=True, blank=True, related_name='children' ) display_parent = models.BooleanField( u"Include parent entity's name in address", default=True, help_text=u"Deselect if this entity recapitulates its parent's name" ) building_recapitulates_entity_name = models.BooleanField( default=False, help_text=u""" Removes the first line of the address - use to avoid, for example:<br /><em>Department of Haematology<br />Haematology Building<br />...</em> """ ) building = models.ForeignKey( Building, null=True, blank=True, on_delete=models.SET_NULL, help_text=u"Select the place where this Entity is based" ) website = models.ForeignKey( Page, verbose_name="Home page", related_name='entity', unique=True, null=True, blank=True, on_delete=models.SET_NULL, help_text=u"Select the Page that is the home page of this Entity \ (leave blank if this is an external Entity)",) auto_news_page = models.BooleanField( u"Publish an automatic news & events page", default=False, ) news_page_menu_title = models.CharField( u"Title", max_length=50, default=DEFAULT_NEWS_PAGE_TITLE ) news_page_intro = PlaceholderField( 'body', related_name="news_page_intro", ) auto_contacts_page = models.BooleanField( u"Publish an automatic contacts & people page", default=False, ) contacts_page_menu_title = models.CharField( u"Title", max_length=50, default=DEFAULT_CONTACTS_PAGE_TITLE, ) contacts_page_intro = PlaceholderField( 'body', related_name="contacts_page_intro", help_text="Text for the Contacts & people page" ) auto_vacancies_page = models.BooleanField( u"Publish an automatic vacancies & studentships page", default=False, ) vacancies_page_menu_title = models.CharField( u"Title", max_length=50, default=DEFAULT_VACANCIES_PAGE_TITLE, ) vacancies_page_intro = PlaceholderField( 'body', related_name="vacancies_page_intro", ) if 'publications' in settings.INSTALLED_APPS: auto_publications_page = models.BooleanField( u"Publish a publications page for this entity automatcally", default=False ) publications_page_menu_title = models.CharField( u"Title", max_length=50, default=DEFAULT_PUBLICATIONS_PAGE_TITLE, ) class Meta: verbose_name_plural = "Entities" ordering = ['tree_id', 'lft'] # def natural_key(self): # return (self.slug) def __unicode__(self): return self.name @property def get_real_ancestor(self): """ Find the nearest non-abstract Entity amongst this Entity's ancestors """ for ancestor in self.get_ancestors(ascending=True): if not ancestor.abstract_entity: return ancestor @property def get_building(self): """ Return the Building for this Entity (or its nearest parent) """ if self.abstract_entity: return elif self.building: return self.building else: try: return self.get_real_ancestor.get_building except AttributeError: return None @property def _get_institutional_address(self): """ Lists the parts of an address within the institution (Section of YYY, Department of XXX and YYY, School of ZZZ) """ if self.abstract_entity: return else: ancestors = [] showparent = self.display_parent for entity in self.get_ancestors(ascending=True) \ .exclude(abstract_entity=True): if showparent: ancestors.append(entity.name) showparent = entity.display_parent return ancestors @property def get_full_address(self): """ Returns the full address of the entity """ if self.abstract_entity: return [] else: address = self._get_institutional_address building = self.get_building if building: if self.building_recapitulates_entity_name: address.extend(building.get_postal_address[1:]) else: address.extend(building.get_postal_address) return address @property def get_website(self): """ Return the Django CMS page that this Entity has attached to it (or to its nearest parent) """ if self.website: return self.website else: try: return self.parent.get_website except AttributeError: return None def get_website_url(self): """ Return the Django CMS page's url that this Entity has attached to it (or to its nearest parent) """ if self.website: return self.website.get_absolute_url() elif self.external_url: return self.external_url.url elif self.parent: # try return self.parent.get_website_url() else: # except return None def get_auto_page_url(self, view_name): """ Returns a URL not for the entity, but for its /contact page, /news-and-events, or whatever. If the entity is the base entity, doesn't add the entity slug to the URL """ if not view_name: return "" # external entities don't have info pages elif self.external_url: return "" # info pages for base entity elif self == Entity.objects.base_entity(): return reverse(view_name) # info pages for other entities else: return reverse(view_name, kwargs={"slug": self.slug}) def get_template(self): """ Returns a template for any pages that need to render based on this entity """ if self.get_website: return self.get_website.get_template() else: return settings.CMS_TEMPLATES[0][0] def get_contacts(self): """ Return designated contacts for the entity """ return self.members.filter( person__active=True, key_contact=True ).order_by('importance_to_entity') def get_people_with_roles(self, key_members_only=False): """ Publishes an ordered list of key members grouped by their most significant roles in the entity Ranks roles by importance to entity, then gathers people under that role Optionally, will return *all* members with roles """ memberships = self.members.\ filter(person__active=True).\ exclude(role="").\ order_by( '-importance_to_entity', 'person__surname', 'person__given_name' ) if key_members_only: memberships = memberships.filter(importance_to_entity__gte=3) # create a set with which to check for duplicates duplicates = set() membership_list = [] for membership in memberships: # if this is the first time we've seen this role... if membership.role not in duplicates: # put this role on the duplicates list for future reference, # and add everyone with that role to the membership_list duplicates.add(membership.role) membership_list.extend( memberships.filter(role=membership.role) ) # returns a list of memberships, in the right order - we use a regroup # tag to group them by person in the template return membership_list def get_key_people(self): return self.get_people_with_roles(key_members_only=True) def get_roles_for_members(self, members): """ Given a list of its members (as Persons), returns the best role for each. The roles returned are in alphabetical order by Person. """ for m in members: ms = m.member_of # get the best named membership in the entity named_memberships = ms.filter(entity=self) \ .exclude(role="").order_by('-importance_to_person') if named_memberships: m.membership = named_memberships[0] else: # see if there's a display_role membership - actually this one # should go first display_role_memberships = ms.filter(entity=self) \ .exclude(display_role=None). \ order_by('-importance_to_person',) if display_role_memberships: m.membership = display_role_memberships[0].display_role else: # find the best named membership anywhere we can best_named_ms = ms.exclude(role="") \ .order_by('-importance_to_person',) if best_named_ms: m.membership = best_named_ms[0] else: # add the unnamed membership for this entity - it's # all we have unnamed_mss = ms.order_by('-importance_to_person',) m.membership = unnamed_mss[0] return members def get_people(self, letter=None): """ Publishes a list of every member, and of every member of all children """ people = Person.objects.filter( active=True, member_of__entity__in=self.get_descendants(include_self=True)). \ order_by('surname', 'given_name', 'middle_names').distinct() if letter: people = people.filter(surname__istartswith=letter) return people def get_people_and_initials(self, letter=None): """ Returns a list of people and/or their initials for use in people lists More than 20 people, or a letter was provided? Return initials Fewer than 20 people? Return the people """ people = self.get_people(letter) # letter or long list? show initials if letter or people.count() > 20: initials = set(person.surname[0].upper() for person in people) initials = list(initials) initials.sort() # no letter but list is long? initials only if not letter: people = people[:0] # no letter, short list? don't show initials else: initials = None return (people, initials) class Title(models.Model): title = models.CharField(max_length=50, unique=True) abbreviation = models.CharField(max_length=20, unique=True) class Meta: ordering = ['title'] def __unicode__(self): return self.abbreviation class PersonLite(models.Model): title = models.ForeignKey( 'contacts_and_people.Title', blank=True, null=True, on_delete=models.SET_NULL) given_name = models.CharField(max_length=50, blank=True) middle_names = models.CharField(max_length=100, blank=True) surname = models.CharField(max_length=50) def __unicode__(self): # to-do: make it smarter, i.e. don't include empty/None strings return u"%s %s %s" % (self.given_name, self.middle_names, self.surname) def __getInitials(self): if self.given_name != '' and self.middle_names != '': return self.given_name[0] + '.' + self.middle_names[0] + '.' elif self.given_name != '': return self.given_name[0] + '.' else: return '' initials = property(__getInitials,) class PersonManager(models.Manager): def get_by_natural_key(self, slug): return self.get(slug=slug) class Person(PersonLite, CommonFields): objects = PersonManager() # URLModelMixin's get_absolute_url() requires a view_name view_name = "contact-person" user = models.ForeignKey( User, related_name='person_user', unique=True, blank=True, null=True, verbose_name='Arkestra User', on_delete=models.PROTECT ) institutional_username = models.CharField( max_length=10, blank=True, null=True ) active = models.BooleanField(default=True,) description = PlaceholderField('body') entities = models.ManyToManyField( Entity, related_name='people', through='Membership', blank=True, null=True ) building = models.ForeignKey( Building, verbose_name='Specify building', help_text=u""" <strong>Only</strong> required if this Person's <strong>Home entity</strong> has a different address """, blank=True, null=True, on_delete=models.SET_NULL ) override_entity = models.ForeignKey( Entity, verbose_name='Specify entity', help_text=u""" <strong>Temporarily specify</strong> an entity for contact information - over-rides entity and postal address """, related_name='people_override', blank=True, null=True, on_delete=models.SET_NULL ) please_contact = models.ForeignKey( 'self', help_text=u""" Publish another person's details as contact information for this person """, related_name='contact_for', blank=True, null=True, on_delete=models.SET_NULL) staff_id = models.CharField(null=True, blank=True, max_length=20) data_feed_locked = models.BooleanField(default=False) # def natural_key(self): # return (self.slug) class Meta: ordering = ['surname', 'given_name', 'user'] verbose_name_plural = "People" def __unicode__(self): title = self.title or "" return u" ".join( name_part for name_part in ( unicode(title), self.given_name, self.surname ) if name_part ) @property def get_role(self): """ Returns a Membership object. Works the Membership object representing a Person's best role, which has to be in a real, not abstract, entity, and it must be at least Significant (gte = 2) to the person If it can't find any role, it returns None. """ memberships = self.member_of.filter( entity__abstract_entity=False, importance_to_person__gte=2).order_by('-importance_to_person') if memberships: return memberships[0] else: # the poor person had no memberships return None @property def get_entity(self): """ Works out a person's best entity, based on get_role A person needs at least a named role to have an entity. """ if self.override_entity and not self.override_entity.abstract_entity: return self.override_entity elif self.get_role: return self.get_role.entity return None def get_entity_short_name(self): if self.get_entity: return self.get_entity.short_name else: return u"" get_entity_short_name.short_description = "Entity" @property def get_building(self): """ Returns a Person's Building, if possible """ if self.building: return self.building elif self.get_entity: return self.get_entity.get_building @property def get_full_address(self): """ Works out a person's address, based on their home/best entity or information that overrides this """ if self.get_entity: # needs an entity to work if self.building: address = self.get_entity._get_institutional_address address.extend(self.building.get_postal_address) return address else: return self.get_entity.get_full_address else: return [] def get_please_contact(self): """ Works out whether to display someone else's contact details """ if self.please_contact: return self.please_contact.get_please_contact() else: return self def get_phone_contacts(self): return self.get_please_contact().phone_contacts.all() def get_email(self): return self.get_please_contact().email @property def real_entity_memberships(self): # returns Memberships of non-abstract entities the person belongs to return self.member_of.filter(entity__abstract_entity=False) def gather_entities(self): """ Returns all the entities that a person belongs to, including implicit membership """ entitylist = set() for entity in self.entities.all(): entitylist.add(entity) entitylist.update(entity.get_ancestors()) #set(entity for entity in entitylist if not entity.abstract_entity) return entitylist def check_please_contact_has_loop(self, compare_to, person_list=None): if person_list is None: person_list = [compare_to] if not self == compare_to: person_list.append(self) if self.please_contact: if compare_to == self.please_contact: person_list.append(compare_to) return True, person_list else: return self.please_contact.check_please_contact_has_loop( compare_to, person_list ) else: return False, person_list def save(self, *args, **kwargs): do_check_please_contact_loop = kwargs.pop( 'do_check_please_contact_loop', True ) if do_check_please_contact_loop and self.check_please_contact_has_loop( compare_to=self) is True: raise Exception # TODO: raise a more appropriate exception return super(Person, self).save(*args, **kwargs) class Membership(models.Model): PERSON_DISPLAY_PRIORITY = ( (1, 'No role'), (2, 'Significant'), (3, 'More significant'), (4, 'Very significant'), (5, 'Home'), ) ENTITY_DISPLAY_PRIORITY = ( (1, 'No role'), (2, 'Has a role'), (3, 'Key member'), (4, 'Keyer member'), (5, 'Keyest member'), ) person = models.ForeignKey(Person, related_name='member_of') entity = models.ForeignKey(Entity, related_name='members') # this is currently too complex to manage - in this version it remains # unused display_role = models.ForeignKey( 'self', related_name="display_roles", null=True, blank=True, on_delete=models.SET_NULL) key_contact = models.BooleanField(default=False) role = models.CharField(max_length=50, null=True, blank=True) # how important the role is to the person importance_to_person = models.IntegerField( blank=True, null=True, choices=PERSON_DISPLAY_PRIORITY, default=1 ) # how important the role is to the entity importance_to_entity = models.IntegerField( blank=True, null=True, choices=ENTITY_DISPLAY_PRIORITY, default=1 ) class Meta: ordering = ('-importance_to_entity', 'person__surname') def __unicode__(self): if self.display_role: return "%s-%s" % ( unicode(self.entity.short_name), unicode(self.display_role) ) else: return unicode(self.role) def save(self, *args, **kwargs): """ The rules: order importance_to_entity --------------------- --------------------- has no role: 1 has no role: 1 has a role: 2-4 has a role: 2 home: 5 key member: 3-5 """ # if there's just one membership, make it home; if this one is home, # make home on all the others false memberships = self.person.member_of.all() if self.importance_to_person == 5: for membership in memberships: if membership.importance_to_person == 5: membership.importance_to_person = 4 super(Membership, membership).save() self.importance_to_person = 5 # if no role is set, then it can't be home or a key membership, and # orders must be the lowest if not self.role: self.importance_to_person = 1 # if there is a role set, orders must be > 1 else: # with a role, order must be at least 2 if self.importance_to_person < 2: self.importance_to_person = 2 # and importance_to_entity must be 2 if self.importance_to_entity < 2: self.importance_to_entity = 2 super(Membership, self).save(*args, **kwargs) class EntityAutoPageLinkPluginEditor(CMSPlugin): AUTO_PAGES = { 'contacts-and-people': ( u'Contacts & people', 'contact-entity', 'contacts_page_menu_title', 'auto_contacts_page' ), 'news-and-events': ( u'News & events', 'news-and-events', 'news_page_menu_title', 'auto_news_page' ), 'vacancies-and-studentships': ( u'Vacancies & studentships', 'vacancies-and-studentships', 'vacancies_page_menu_title', 'auto_vacancies_page' ), 'publications': ( u'Publications', 'publications', 'publications_page_menu_title', 'auto_publications_page'), } link_to = models.CharField( max_length=50, choices=[(x, y[0]) for x, y in sorted(AUTO_PAGES.items())] ) entity = models.ForeignKey( Entity, null=True, blank=True, help_text="Leave blank for autoselect", related_name="auto_page_plugin", on_delete=models.SET_NULL) text_override = models.CharField( max_length=256, null=True, blank=True, help_text="Override the default link text" ) class EntityDirectoryPluginEditor(CMSPlugin): DIRECTORY_TYPE = ( ('children', u'Immediate children only'), ('descendants', u'All descendants'), ) entity = models.ForeignKey( Entity, null=True, blank=True, help_text="Leave blank for autoselect", related_name="directory_plugin", on_delete=models.SET_NULL ) levels = models.PositiveSmallIntegerField( help_text=u'Leave blank/set to 0 to display all sub-levels', null=True, blank=True ) display_descriptions_to_level = models.PositiveSmallIntegerField( default=0, help_text=u'Blank for all levels, 0 for none, 1 for first', null=True, blank=True ) link_icons = models.BooleanField( help_text=u"Display link icons (first level only)", default=True ) use_short_names = models.BooleanField(default=True) class EntityMembersPluginEditor(CMSPlugin): entity = models.ForeignKey( Entity, null=True, blank=True, help_text="Leave blank for autoselect", related_name="entity_members_plugin", on_delete=models.SET_NULL ) # try: # mptt.register(Entity) # except mptt.AlreadyRegistered: # pass # default_entity_id is used to autofill the default entity where required, # when MULTIPLE_ENTITY_MODE = False # default_entity is used throughout the system # make default_entity and default_entity_id available # default_entity = Entity.objects.base_entity() # get it from the Entity # custom manager method # if default_entity and not MULTIPLE_ENTITY_MODE: # default_entity_id = ARKESTRA_BASE_ENTITY # else: # default_entity_id = None # crazymaniac's wild monkeypatch# # """ # THE FOLLOWING CODE IS A LOADED GUN AND MAY VERY WELL BACKFIRE. # # I STRONGLY ADVICE AGAINST USING THIS CODE AND IF YOU STILL WANT TO USE IT, # YOU ARE # DOING SO AT YOUR OWN RISK. # """ # # from cms.admin.forms import PageForm # from cms.admin.pageadmin import PageAdmin # set up the attributes of the the meta_description in the PageForm # PageForm.base_fields['meta_description'].required = True # PageForm.base_fields['meta_description'].label = "Summary" # PageForm.base_fields['meta_description'].help_text = \ # "A <em>brief</em> (25-30 words maximum) summary of the page's message or # contents in the clearest, simplest language possible." # get the SEO settings fields # tmp = list(PageAdmin.fieldsets[4][1]['fields']) # we can't amend the fieldsets tuple itself, so we'll just leave the SEO # fieldset blank # this is in fact a good metaphor for the empty nature of SEO # tmp.remove('meta_keywords') # tmp.remove('meta_description') # tmp.remove('page_title') # PageAdmin.fieldsets[4][1]['fields'] = tmp # rescue the meta_description field from its undeserved obscurity # and put it in the first fieldset on the page # PageAdmin.fieldsets[0][1]['fields'].insert(1, 'meta_description') # page_title really belongs in the Advanced settings fieldset # PageAdmin.fieldsets[03][1]['fields'].insert(1, 'page_title')
import sys import unittest2 from mock import Mock, patch import stripe from stripe.test.helper import StripeUnitTestCase VALID_API_METHODS = ('get', 'post', 'delete') class HttpClientTests(StripeUnitTestCase): def setUp(self): super(HttpClientTests, self).setUp() self.original_filters = stripe.http_client.warnings.filters[:] stripe.http_client.warnings.simplefilter('ignore') def tearDown(self): stripe.http_client.warnings.filters = self.original_filters super(HttpClientTests, self).tearDown() def check_default(self, none_libs, expected): for lib in none_libs: setattr(stripe.http_client, lib, None) inst = stripe.http_client.new_default_http_client() self.assertTrue(isinstance(inst, expected)) def test_new_default_http_client_urlfetch(self): self.check_default((), stripe.http_client.UrlFetchClient) def test_new_default_http_client_requests(self): self.check_default(('urlfetch',), stripe.http_client.RequestsClient) def test_new_default_http_client_pycurl(self): self.check_default(('urlfetch', 'requests',), stripe.http_client.PycurlClient) def test_new_default_http_client_urllib2(self): self.check_default(('urlfetch', 'requests', 'pycurl'), stripe.http_client.Urllib2Client) class ClientTestBase(): @property def request_mock(self): return self.request_mocks[self.request_client.name] @property def valid_url(self, path='/foo'): return 'https://api.stripe.com%s' % (path,) def make_request(self, method, url, headers, post_data): client = self.request_client(verify_ssl_certs=True) return client.request(method, url, headers, post_data) def mock_response(self, body, code): raise NotImplementedError( 'You must implement this in your test subclass') def mock_error(self, error): raise NotImplementedError( 'You must implement this in your test subclass') def check_call(self, meth, abs_url, headers, params): raise NotImplementedError( 'You must implement this in your test subclass') def test_request(self): self.mock_response(self.request_mock, '{"foo": "baz"}', 200) for meth in VALID_API_METHODS: abs_url = self.valid_url data = '' if meth != 'post': abs_url = '%s?%s' % (abs_url, data) data = None headers = {'my-header': 'header val'} body, code, _ = self.make_request( meth, abs_url, headers, data) self.assertEqual(200, code) self.assertEqual('{"foo": "baz"}', body) self.check_call(self.request_mock, meth, abs_url, data, headers) def test_exception(self): self.mock_error(self.request_mock) self.assertRaises(stripe.error.APIConnectionError, self.make_request, 'get', self.valid_url, {}, None) class RequestsVerify(object): def __eq__(self, other): return other and other.endswith('stripe/data/ca-certificates.crt') class RequestsClientTests(StripeUnitTestCase, ClientTestBase): request_client = stripe.http_client.RequestsClient def test_timeout(self): headers = {'my-header': 'header val'} data = '' self.make_request('POST', self.valid_url, headers, data, timeout=5) self.check_call(self.request_mock, 'POST', self.valid_url, data, headers, timeout=5) def make_request(self, method, url, headers, post_data, timeout=80): client = self.request_client(verify_ssl_certs=True, timeout=timeout, proxy='http://slap/') return client.request(method, url, headers, post_data) def mock_response(self, mock, body, code): result = Mock() result.content = body result.status_code = code mock.request = Mock(return_value=result) def mock_error(self, mock): mock.exceptions.RequestException = Exception mock.request.side_effect = mock.exceptions.RequestException() def check_call(self, mock, meth, url, post_data, headers, timeout=80): mock.request.assert_called_with(meth, url, headers=headers, data=post_data, verify=RequestsVerify(), proxies={"http": "http://slap/", "https": "http://slap/"}, timeout=timeout) class UrlFetchClientTests(StripeUnitTestCase, ClientTestBase): request_client = stripe.http_client.UrlFetchClient def mock_response(self, mock, body, code): result = Mock() result.content = body result.status_code = code mock.fetch = Mock(return_value=result) def mock_error(self, mock): mock.Error = mock.InvalidURLError = Exception mock.fetch.side_effect = mock.InvalidURLError() def check_call(self, mock, meth, url, post_data, headers): mock.fetch.assert_called_with( url=url, method=meth, headers=headers, validate_certificate=True, deadline=55, payload=post_data ) class Urllib2ClientTests(StripeUnitTestCase, ClientTestBase): request_client = stripe.http_client.Urllib2Client def make_request(self, method, url, headers, post_data, proxy=None): self.client = self.request_client(verify_ssl_certs=True, proxy=proxy) self.proxy = proxy return self.client.request(method, url, headers, post_data) def mock_response(self, mock, body, code): response = Mock response.read = Mock(return_value=body) response.code = code response.info = Mock(return_value={}) self.request_object = Mock() mock.Request = Mock(return_value=self.request_object) mock.urlopen = Mock(return_value=response) opener = Mock opener.open = Mock(return_value=response) mock.build_opener = Mock(return_value=opener) mock.build_opener.open = opener.open mock.ProxyHandler = Mock(return_value=opener) mock.urlopen = Mock(return_value=response) def mock_error(self, mock): mock.urlopen.side_effect = ValueError mock.build_opener().open.side_effect = ValueError mock.build_opener.reset_mock() def check_call(self, mock, meth, url, post_data, headers): if sys.version_info >= (3, 0) and isinstance(post_data, basestring): post_data = post_data.encode('utf-8') mock.Request.assert_called_with(url, post_data, headers) if (self.client._proxy): self.assertTrue(type(self.client._proxy) is dict) mock.ProxyHandler.assert_called_with(self.client._proxy) mock.build_opener.open.assert_called_with(self.request_object) self.assertTrue(not mock.urlopen.called) if (not self.client._proxy): mock.urlopen.assert_called_with(self.request_object) self.assertTrue(not mock.build_opener.called) self.assertTrue(not mock.build_opener.open.called) class Urllib2ClientHttpsProxyTests(Urllib2ClientTests): def make_request(self, method, url, headers, post_data, proxy=None): return super(Urllib2ClientHttpsProxyTests, self).make_request( method, url, headers, post_data, {"http": "http://slap/", "https": "http://slap/"}) class Urllib2ClientHttpProxyTests(Urllib2ClientTests): def make_request(self, method, url, headers, post_data, proxy=None): return super(Urllib2ClientHttpProxyTests, self).make_request( method, url, headers, post_data, "http://slap/") class PycurlClientTests(StripeUnitTestCase, ClientTestBase): request_client = stripe.http_client.PycurlClient def make_request(self, method, url, headers, post_data, proxy=None): self.client = self.request_client(verify_ssl_certs=True, proxy=proxy) self.proxy = proxy return self.client.request(method, url, headers, post_data) @property def request_mock(self): if not hasattr(self, 'curl_mock'): lib_mock = self.request_mocks[self.request_client.name] self.curl_mock = Mock() lib_mock.Curl = Mock(return_value=self.curl_mock) return self.curl_mock def setUp(self): super(PycurlClientTests, self).setUp() self.sio_patcher = patch('stripe.util.StringIO.StringIO') sio_mock = Mock() self.sio_patcher.start().return_value = sio_mock self.sio_getvalue = sio_mock.getvalue def tearDown(self): super(PycurlClientTests, self).tearDown() self.sio_patcher.stop() def mock_response(self, mock, body, code): self.sio_getvalue.return_value = body mock.getinfo.return_value = code def mock_error(self, mock): class FakeException(BaseException): def __getitem__(self, i): return 'foo' stripe.http_client.pycurl.error = FakeException mock.perform.side_effect = stripe.http_client.pycurl.error def check_call(self, mock, meth, url, post_data, headers): lib_mock = self.request_mocks[self.request_client.name] # A note on methodology here: we don't necessarily need to verify # _every_ call to setopt, but check a few of them to make sure the # right thing is happening. Keep an eye specifically on conditional # statements where things are more likely to go wrong. self.curl_mock.setopt.assert_any_call(lib_mock.NOSIGNAL, 1) self.curl_mock.setopt.assert_any_call(lib_mock.URL, stripe.util.utf8(url)) if meth == 'get': self.curl_mock.setopt.assert_any_call(lib_mock.HTTPGET, 1) elif meth == 'post': self.curl_mock.setopt.assert_any_call(lib_mock.POST, 1) else: self.curl_mock.setopt.assert_any_call(lib_mock.CUSTOMREQUEST, meth.upper()) self.curl_mock.perform.assert_any_call() class PycurlClientHttpProxyTests(PycurlClientTests): def make_request(self, method, url, headers, post_data, proxy=None): return super(PycurlClientHttpProxyTests, self).make_request( method, url, headers, post_data, "http://user:withPwd@slap:8888/") def check_call(self, mock, meth, url, post_data, headers): lib_mock = self.request_mocks[self.request_client.name] self.curl_mock.setopt.assert_any_call(lib_mock.PROXY, "slap") self.curl_mock.setopt.assert_any_call(lib_mock.PROXYPORT, 8888) self.curl_mock.setopt.assert_any_call(lib_mock.PROXYUSERPWD, "user:withPwd") super(PycurlClientHttpProxyTests, self).check_call( mock, meth, url, post_data, headers) class PycurlClientHttpsProxyTests(PycurlClientTests): def make_request(self, method, url, headers, post_data, proxy=None): return super(PycurlClientHttpsProxyTests, self).make_request( method, url, headers, post_data, {"http": "http://slap:8888/", "https": "http://slap2:444/"}) def check_call(self, mock, meth, url, post_data, headers): lib_mock = self.request_mocks[self.request_client.name] self.curl_mock.setopt.assert_any_call(lib_mock.PROXY, "slap2") self.curl_mock.setopt.assert_any_call(lib_mock.PROXYPORT, 444) super(PycurlClientHttpsProxyTests, self).check_call( mock, meth, url, post_data, headers) class APIEncodeTest(StripeUnitTestCase): def test_encode_dict(self): body = { 'foo': { 'dob': { 'month': 1, }, 'name': 'bat' }, } values = [t for t in stripe.api_requestor._api_encode(body)] self.assertTrue(('foo[dob][month]', 1) in values) self.assertTrue(('foo[name]', 'bat') in values) def test_encode_array(self): body = { 'foo': [{ 'dob': { 'month': 1, }, 'name': 'bat' }], } values = [t for t in stripe.api_requestor._api_encode(body)] self.assertTrue(('foo[][dob][month]', 1) in values) self.assertTrue(('foo[][name]', 'bat') in values) if __name__ == '__main__': unittest2.main()
# coding: utf-8 """ Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: [email protected] Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) try: from onshape_client.oas.models import bt_inner_array_parameter_location2368 except ImportError: bt_inner_array_parameter_location2368 = sys.modules[ "onshape_client.oas.models.bt_inner_array_parameter_location2368" ] try: from onshape_client.oas.models import bt_inner_array_parameter_location2368_all_of except ImportError: bt_inner_array_parameter_location2368_all_of = sys.modules[ "onshape_client.oas.models.bt_inner_array_parameter_location2368_all_of" ] try: from onshape_client.oas.models import bt_inner_derived_parameter_location591 except ImportError: bt_inner_derived_parameter_location591 = sys.modules[ "onshape_client.oas.models.bt_inner_derived_parameter_location591" ] try: from onshape_client.oas.models import bt_inner_parameter_location1715 except ImportError: bt_inner_parameter_location1715 = sys.modules[ "onshape_client.oas.models.bt_inner_parameter_location1715" ] class BTInnerArrayParameterLocation2368(ModelComposed): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = {} validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "type": (str,), # noqa: E501 "bt_type": (str,), # noqa: E501 "index": (int,), # noqa: E501 "outer_parameter_id": (str,), # noqa: E501 } @staticmethod def discriminator(): return { "type": { "BTInnerArrayParameterLocation": bt_inner_array_parameter_location2368.BTInnerArrayParameterLocation2368, "BTInnerDerivedParameterLocation": bt_inner_derived_parameter_location591.BTInnerDerivedParameterLocation591, }, } attribute_map = { "type": "@type", # noqa: E501 "bt_type": "btType", # noqa: E501 "index": "index", # noqa: E501 "outer_parameter_id": "outerParameterId", # noqa: E501 } required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", "_composed_instances", "_var_name_to_model_instances", "_additional_properties_model_instances", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """bt_inner_array_parameter_location2368.BTInnerArrayParameterLocation2368 - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. type (str): [optional] # noqa: E501 bt_type (str): [optional] # noqa: E501 index (int): [optional] # noqa: E501 outer_parameter_id (str): [optional] # noqa: E501 """ self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration constant_args = { "_check_type": _check_type, "_path_to_item": _path_to_item, "_from_server": _from_server, "_configuration": _configuration, } required_args = {} # remove args whose value is Null because they are unset required_arg_names = list(required_args.keys()) for required_arg_name in required_arg_names: if required_args[required_arg_name] is nulltype.Null: del required_args[required_arg_name] model_args = {} model_args.update(required_args) model_args.update(kwargs) composed_info = validate_get_composed_info(constant_args, model_args, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] unused_args = composed_info[3] for var_name, var_value in required_args.items(): setattr(self, var_name, var_value) for var_name, var_value in six.iteritems(kwargs): if ( var_name in unused_args and self._configuration is not None and self._configuration.discard_unknown_keys and not self._additional_properties_model_instances ): # discard variable. continue setattr(self, var_name, var_value) @staticmethod def _composed_schemas(): # we need this here to make our import statements work # we must store _composed_schemas in here so the code is only run # when we invoke this method. If we kept this at the class # level we would get an error beause the class level # code would be run when this module is imported, and these composed # classes don't exist yet because their module has not finished # loading return { "anyOf": [], "allOf": [ bt_inner_array_parameter_location2368_all_of.BTInnerArrayParameterLocation2368AllOf, bt_inner_parameter_location1715.BTInnerParameterLocation1715, ], "oneOf": [], } @classmethod def get_discriminator_class(cls, from_server, data): """Returns the child class specified by the discriminator""" discriminator = cls.discriminator() discr_propertyname_py = list(discriminator.keys())[0] discr_propertyname_js = cls.attribute_map[discr_propertyname_py] if from_server: class_name = data[discr_propertyname_js] else: class_name = data[discr_propertyname_py] class_name_to_discr_class = discriminator[discr_propertyname_py] return class_name_to_discr_class.get(class_name)
from core_serializers.utils import empty, get_attribute, is_html_input class ValidationError(Exception): pass class SkipField(Exception): pass class Field(object): _creation_counter = 0 MESSAGES = { 'required': 'This field is required.' } _NOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`' _NOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`' _NOT_READ_ONLY_DEFAULT = 'May not set both `read_only` and `default`' _NOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`' _MISSING_ERROR_MESSAGE = ( 'ValidationError raised by `{class_name}`, but error key `{key}` does ' 'not exist in the `MESSAGES` dictionary.' ) def __init__(self, read_only=False, write_only=False, required=None, default=empty, initial=None, source=None, label=None, style=None): self._creation_counter = Field._creation_counter Field._creation_counter += 1 # If `required` is unset, then use `True` unless a default is provided. if required is None: required = default is empty and not read_only # Some combinations of keyword arguments do not make sense. assert not (read_only and write_only), self._NOT_READ_ONLY_WRITE_ONLY assert not (read_only and required), self._NOT_READ_ONLY_REQUIRED assert not (read_only and default is not empty), self._NOT_READ_ONLY_DEFAULT assert not (required and default is not empty), self._NOT_REQUIRED_DEFAULT self.read_only = read_only self.write_only = write_only self.required = required self.default = default self.source = source self.initial = initial self.label = label self.style = {} if style is None else style def bind(self, field_name, parent, root): """ Setup the context for the field instance. """ self.field_name = field_name self.parent = parent self.root = root # `self.label` should deafult to being based on the field name. if self.label is None: self.label = self.field_name.replace('_', ' ').capitalize() # self.source should default to being the same as the field name. if self.source is None: self.source = field_name # self.source_attrs is a list of attributes that need to be looked up # when serializing the instance, or populating the validated data. if self.source == '*': self.source_attrs = [] else: self.source_attrs = self.source.split('.') def get_initial(self): """ Return a value to use when the field is being returned as a primative value, without any object instance. """ return self.initial def get_value(self, dictionary): """ Given the *incoming* primative data, return the value for this field that should be validated and transformed to a native value. """ return dictionary.get(self.field_name, empty) def get_attribute(self, instance): """ Given the *outgoing* object instance, return the value for this field that should be returned as a primative value. """ return get_attribute(instance, self.source_attrs) def get_default(self): """ Return the default value to use when validating data if no input is provided for this field. If a default has not been set for this field then this will simply return `empty`, indicating that no value should be set in the validated data for this field. """ if self.default is empty: raise SkipField() return self.default def validate(self, data=empty): """ Validate a simple representation and return the internal value. The provided data may be `empty` if no representation was included. May return `empty` if the field should not be included in the validated data. """ if data is empty: if self.required: self.fail('required') return self.get_default() return self.to_native(data) def to_native(self, data): """ Transform the *incoming* primative data into a native value. """ return data def to_primative(self, value): """ Transform the *outgoing* native value into primative data. """ return value def fail(self, key, **kwargs): """ A helper method that simply raises a validation error. """ try: raise ValidationError(self.MESSAGES[key].format(**kwargs)) except KeyError: class_name = self.__class__.__name__ msg = self._MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key) raise AssertionError(msg) class BooleanField(Field): MESSAGES = { 'required': 'This field is required.', 'invalid_value': '`{input}` is not a valid boolean.' } TRUE_VALUES = {'t', 'T', 'true', 'True', 'TRUE', '1', 1, True} FALSE_VALUES = {'f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False} def get_value(self, dictionary): if is_html_input(dictionary): # HTML forms do not send a `False` value on an empty checkbox, # so we override the default empty value to be False. return dictionary.get(self.field_name, False) return dictionary.get(self.field_name, empty) def to_native(self, data): if data in self.TRUE_VALUES: return True elif data in self.FALSE_VALUES: return False self.fail('invalid_value', input=data) class CharField(Field): MESSAGES = { 'required': 'This field is required.', 'blank': 'This field may not be blank.' } def __init__(self, *args, **kwargs): self.allow_blank = kwargs.pop('allow_blank', False) super(CharField, self).__init__(*args, **kwargs) def to_native(self, data): if data == '' and not self.allow_blank: self.fail('blank') return str(data) class ChoiceField(Field): MESSAGES = { 'required': 'This field is required.', 'invalid_choice': '`{input}` is not a valid choice.' } coerce_to_type = str def __init__(self, *args, **kwargs): choices = kwargs.pop('choices') assert choices, '`choices` argument is required and may not be empty' # Allow either single or paired choices style: # choices = [1, 2, 3] # choices = [(1, 'First'), (2, 'Second'), (3, 'Third')] pairs = [ isinstance(item, (list, tuple)) and len(item) == 2 for item in choices ] if all(pairs): self.choices = {key: val for key, val in choices} else: self.choices = {item: item for item in choices} # Map the string representation of choices to the underlying value. # Allows us to deal with eg. integer choices while supporting either # integer or string input, but still get the correct datatype out. self.choice_strings_to_values = { str(key): key for key in self.choices.keys() } super(ChoiceField, self).__init__(*args, **kwargs) def to_native(self, data): try: return self.choice_strings_to_values[str(data)] except KeyError: self.fail('invalid_choice', input=data) class MultipleChoiceField(ChoiceField): MESSAGES = { 'required': 'This field is required.', 'invalid_choice': '`{input}` is not a valid choice.', 'not_a_list': 'Expected a list of items but got type `{input_type}`' } def to_native(self, data): if not hasattr(data, '__iter__'): self.fail('not_a_list', input_type=type(data).__name__) return set([ super(MultipleChoiceField, self).to_native(item) for item in data ]) class IntegerField(Field): MESSAGES = { 'required': 'This field is required.', 'invalid_integer': 'A valid integer is required.' } def to_native(self, data): try: data = int(str(data)) except (ValueError, TypeError): self.fail('invalid_integer') return data class MethodField(Field): def __init__(self, **kwargs): kwargs['source'] = '*' kwargs['read_only'] = True super(MethodField, self).__init__(**kwargs) def to_primative(self, value): attr = 'get_{field_name}'.format(field_name=self.field_name) method = getattr(self.parent, attr) return method(value)
from django import forms from django.contrib import admin from django.contrib.admin import AdminSite from django.contrib.contenttypes.admin import GenericStackedInline from django.core import checks from django.test import SimpleTestCase, override_settings from .models import ( Album, Author, Book, City, Influence, Song, State, TwoAlbumFKAndAnE, ) class SongForm(forms.ModelForm): pass class ValidFields(admin.ModelAdmin): form = SongForm fields = ['title'] class ValidFormFieldsets(admin.ModelAdmin): def get_form(self, request, obj=None, **kwargs): class ExtraFieldForm(SongForm): name = forms.CharField(max_length=50) return ExtraFieldForm fieldsets = ( (None, { 'fields': ('name',), }), ) class MyAdmin(admin.ModelAdmin): def check(self, **kwargs): return ['error!'] @override_settings( SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True) INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'admin_checks'] ) class SystemChecksTestCase(SimpleTestCase): def test_checks_are_performed(self): admin.site.register(Song, MyAdmin) try: errors = checks.run_checks() expected = ['error!'] self.assertEqual(errors, expected) finally: admin.site.unregister(Song) @override_settings(INSTALLED_APPS=['django.contrib.admin']) def test_contenttypes_dependency(self): errors = admin.checks.check_dependencies() expected = [ checks.Error( "'django.contrib.contenttypes' must be in " "INSTALLED_APPS in order to use the admin application.", id="admin.E401", ) ] self.assertEqual(errors, expected) @override_settings( INSTALLED_APPS=[ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', ], TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [], }, }], ) def test_auth_contextprocessor_dependency(self): errors = admin.checks.check_dependencies() expected = [ checks.Error( "'django.contrib.auth.context_processors.auth' must be in " "TEMPLATES in order to use the admin application.", id="admin.E402", ) ] self.assertEqual(errors, expected) def test_custom_adminsite(self): class CustomAdminSite(admin.AdminSite): pass custom_site = CustomAdminSite() custom_site.register(Song, MyAdmin) try: errors = checks.run_checks() expected = ['error!'] self.assertEqual(errors, expected) finally: custom_site.unregister(Song) def test_allows_checks_relying_on_other_modeladmins(self): class MyBookAdmin(admin.ModelAdmin): def check(self, **kwargs): errors = super(MyBookAdmin, self).check(**kwargs) author_admin = self.admin_site._registry.get(Author) if author_admin is None: errors.append('AuthorAdmin missing!') return errors class MyAuthorAdmin(admin.ModelAdmin): pass admin.site.register(Book, MyBookAdmin) admin.site.register(Author, MyAuthorAdmin) try: self.assertEqual(admin.site.check(None), []) finally: admin.site.unregister(Book) admin.site.unregister(Author) def test_field_name_not_in_list_display(self): class SongAdmin(admin.ModelAdmin): list_editable = ["original_release"] errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "The value of 'list_editable[0]' refers to 'original_release', " "which is not contained in 'list_display'.", obj=SongAdmin, id='admin.E122', ) ] self.assertEqual(errors, expected) def test_readonly_and_editable(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ["original_release"] list_display = ["pk", "original_release"] list_editable = ["original_release"] fieldsets = [ (None, { "fields": ["title", "original_release"], }), ] errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "The value of 'list_editable[0]' refers to 'original_release', " "which is not editable through the admin.", obj=SongAdmin, id='admin.E125', ) ] self.assertEqual(errors, expected) def test_editable(self): class SongAdmin(admin.ModelAdmin): list_display = ["pk", "title"] list_editable = ["title"] fieldsets = [ (None, { "fields": ["title", "original_release"], }), ] errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_custom_modelforms_with_fields_fieldsets(self): """ # Regression test for #8027: custom ModelForms with fields/fieldsets """ errors = ValidFields(Song, AdminSite()).check() self.assertEqual(errors, []) def test_custom_get_form_with_fieldsets(self): """ The fieldsets checks are skipped when the ModelAdmin.get_form() method is overridden. """ errors = ValidFormFieldsets(Song, AdminSite()).check() self.assertEqual(errors, []) def test_fieldsets_fields_non_tuple(self): """ The first fieldset's fields must be a list/tuple. """ class NotATupleAdmin(admin.ModelAdmin): list_display = ["pk", "title"] list_editable = ["title"] fieldsets = [ (None, { "fields": "title" # not a tuple }), ] errors = NotATupleAdmin(Song, AdminSite()).check() expected = [ checks.Error( "The value of 'fieldsets[0][1]['fields']' must be a list or tuple.", obj=NotATupleAdmin, id='admin.E008', ) ] self.assertEqual(errors, expected) def test_nonfirst_fieldset(self): """ The second fieldset's fields must be a list/tuple. """ class NotATupleAdmin(admin.ModelAdmin): fieldsets = [ (None, { "fields": ("title",) }), ('foo', { "fields": "author" # not a tuple }), ] errors = NotATupleAdmin(Song, AdminSite()).check() expected = [ checks.Error( "The value of 'fieldsets[1][1]['fields']' must be a list or tuple.", obj=NotATupleAdmin, id='admin.E008', ) ] self.assertEqual(errors, expected) def test_exclude_values(self): """ Tests for basic system checks of 'exclude' option values (#12689) """ class ExcludedFields1(admin.ModelAdmin): exclude = 'foo' errors = ExcludedFields1(Book, AdminSite()).check() expected = [ checks.Error( "The value of 'exclude' must be a list or tuple.", obj=ExcludedFields1, id='admin.E014', ) ] self.assertEqual(errors, expected) def test_exclude_duplicate_values(self): class ExcludedFields2(admin.ModelAdmin): exclude = ('name', 'name') errors = ExcludedFields2(Book, AdminSite()).check() expected = [ checks.Error( "The value of 'exclude' contains duplicate field(s).", obj=ExcludedFields2, id='admin.E015', ) ] self.assertEqual(errors, expected) def test_exclude_in_inline(self): class ExcludedFieldsInline(admin.TabularInline): model = Song exclude = 'foo' class ExcludedFieldsAlbumAdmin(admin.ModelAdmin): model = Album inlines = [ExcludedFieldsInline] errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check() expected = [ checks.Error( "The value of 'exclude' must be a list or tuple.", obj=ExcludedFieldsInline, id='admin.E014', ) ] self.assertEqual(errors, expected) def test_exclude_inline_model_admin(self): """ Regression test for #9932 - exclude in InlineModelAdmin should not contain the ForeignKey field used in ModelAdmin.model """ class SongInline(admin.StackedInline): model = Song exclude = ['album'] class AlbumAdmin(admin.ModelAdmin): model = Album inlines = [SongInline] errors = AlbumAdmin(Album, AdminSite()).check() expected = [ checks.Error( "Cannot exclude the field 'album', because it is the foreign key " "to the parent model 'admin_checks.Album'.", obj=SongInline, id='admin.E201', ) ] self.assertEqual(errors, expected) def test_valid_generic_inline_model_admin(self): """ Regression test for #22034 - check that generic inlines don't look for normal ForeignKey relations. """ class InfluenceInline(GenericStackedInline): model = Influence class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_generic_inline_model_admin_non_generic_model(self): """ A model without a GenericForeignKey raises problems if it's included in an GenericInlineModelAdmin definition. """ class BookInline(GenericStackedInline): model = Book class SongAdmin(admin.ModelAdmin): inlines = [BookInline] errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "'admin_checks.Book' has no GenericForeignKey.", obj=BookInline, id='admin.E301', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_bad_ct_field(self): "A GenericInlineModelAdmin raises problems if the ct_field points to a non-existent field." class InfluenceInline(GenericStackedInline): model = Influence ct_field = 'nonexistent' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.", obj=InfluenceInline, id='admin.E302', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_bad_fk_field(self): "A GenericInlineModelAdmin raises problems if the ct_fk_field points to a non-existent field." class InfluenceInline(GenericStackedInline): model = Influence ct_fk_field = 'nonexistent' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.", obj=InfluenceInline, id='admin.E303', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_non_gfk_ct_field(self): """ A GenericInlineModelAdmin raises problems if the ct_field points to a field that isn't part of a GenericForeignKey. """ class InfluenceInline(GenericStackedInline): model = Influence ct_field = 'name' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "'admin_checks.Influence' has no GenericForeignKey using " "content type field 'name' and object ID field 'object_id'.", obj=InfluenceInline, id='admin.E304', ) ] self.assertEqual(errors, expected) def test_generic_inline_model_admin_non_gfk_fk_field(self): """ A GenericInlineModelAdmin raises problems if the ct_fk_field points to a field that isn't part of a GenericForeignKey. """ class InfluenceInline(GenericStackedInline): model = Influence ct_fk_field = 'name' class SongAdmin(admin.ModelAdmin): inlines = [InfluenceInline] errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "'admin_checks.Influence' has no GenericForeignKey using " "content type field 'content_type' and object ID field 'name'.", obj=InfluenceInline, id='admin.E304', ) ] self.assertEqual(errors, expected) def test_app_label_in_admin_checks(self): """ Regression test for #15669 - Include app label in admin system check messages """ class RawIdNonexistingAdmin(admin.ModelAdmin): raw_id_fields = ('nonexisting',) errors = RawIdNonexistingAdmin(Album, AdminSite()).check() expected = [ checks.Error( "The value of 'raw_id_fields[0]' refers to 'nonexisting', " "which is not an attribute of 'admin_checks.Album'.", obj=RawIdNonexistingAdmin, id='admin.E002', ) ] self.assertEqual(errors, expected) def test_fk_exclusion(self): """ Regression test for #11709 - when testing for fk excluding (when exclude is given) make sure fk_name is honored or things blow up when there is more than one fk to the parent model. """ class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE exclude = ("e",) fk_name = "album1" class MyAdmin(admin.ModelAdmin): inlines = [TwoAlbumFKAndAnEInline] errors = MyAdmin(Album, AdminSite()).check() self.assertEqual(errors, []) def test_inline_self_check(self): class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE class MyAdmin(admin.ModelAdmin): inlines = [TwoAlbumFKAndAnEInline] errors = MyAdmin(Album, AdminSite()).check() expected = [ checks.Error( "'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.", obj=TwoAlbumFKAndAnEInline, id='admin.E202', ) ] self.assertEqual(errors, expected) def test_inline_with_specified(self): class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE fk_name = "album1" class MyAdmin(admin.ModelAdmin): inlines = [TwoAlbumFKAndAnEInline] errors = MyAdmin(Album, AdminSite()).check() self.assertEqual(errors, []) def test_readonly(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("title",) errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_readonly_on_method(self): def my_function(obj): pass class SongAdmin(admin.ModelAdmin): readonly_fields = (my_function,) errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_readonly_on_modeladmin(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("readonly_method_on_modeladmin",) def readonly_method_on_modeladmin(self, obj): pass errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_readonly_dynamic_attribute_on_modeladmin(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("dynamic_method",) def __getattr__(self, item): if item == "dynamic_method": def method(obj): pass return method raise AttributeError errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_readonly_method_on_model(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("readonly_method_on_model",) errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_nonexistent_field(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("title", "nonexistent") errors = SongAdmin(Song, AdminSite()).check() expected = [ checks.Error( "The value of 'readonly_fields[1]' is not a callable, an attribute " "of 'SongAdmin', or an attribute of 'admin_checks.Song'.", obj=SongAdmin, id='admin.E035', ) ] self.assertEqual(errors, expected) def test_nonexistent_field_on_inline(self): class CityInline(admin.TabularInline): model = City readonly_fields = ['i_dont_exist'] # Missing attribute errors = CityInline(State, AdminSite()).check() expected = [ checks.Error( "The value of 'readonly_fields[0]' is not a callable, an attribute " "of 'CityInline', or an attribute of 'admin_checks.City'.", obj=CityInline, id='admin.E035', ) ] self.assertEqual(errors, expected) def test_extra(self): class SongAdmin(admin.ModelAdmin): def awesome_song(self, instance): if instance.title == "Born to Run": return "Best Ever!" return "Status unknown." errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_readonly_lambda(self): class SongAdmin(admin.ModelAdmin): readonly_fields = (lambda obj: "test",) errors = SongAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_graceful_m2m_fail(self): """ Regression test for #12203/#12237 - Fail more gracefully when a M2M field that specifies the 'through' option is included in the 'fields' or the 'fieldsets' ModelAdmin options. """ class BookAdmin(admin.ModelAdmin): fields = ['authors'] errors = BookAdmin(Book, AdminSite()).check() expected = [ checks.Error( "The value of 'fields' cannot include the ManyToManyField 'authors', " "because that field manually specifies a relationship model.", obj=BookAdmin, id='admin.E013', ) ] self.assertEqual(errors, expected) def test_cannot_include_through(self): class FieldsetBookAdmin(admin.ModelAdmin): fieldsets = ( ('Header 1', {'fields': ('name',)}), ('Header 2', {'fields': ('authors',)}), ) errors = FieldsetBookAdmin(Book, AdminSite()).check() expected = [ checks.Error( "The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField " "'authors', because that field manually specifies a relationship model.", obj=FieldsetBookAdmin, id='admin.E013', ) ] self.assertEqual(errors, expected) def test_nested_fields(self): class NestedFieldsAdmin(admin.ModelAdmin): fields = ('price', ('name', 'subtitle')) errors = NestedFieldsAdmin(Book, AdminSite()).check() self.assertEqual(errors, []) def test_nested_fieldsets(self): class NestedFieldsetAdmin(admin.ModelAdmin): fieldsets = ( ('Main', {'fields': ('price', ('name', 'subtitle'))}), ) errors = NestedFieldsetAdmin(Book, AdminSite()).check() self.assertEqual(errors, []) def test_explicit_through_override(self): """ Regression test for #12209 -- If the explicitly provided through model is specified as a string, the admin should still be able use Model.m2m_field.through """ class AuthorsInline(admin.TabularInline): model = Book.authors.through class BookAdmin(admin.ModelAdmin): inlines = [AuthorsInline] errors = BookAdmin(Book, AdminSite()).check() self.assertEqual(errors, []) def test_non_model_fields(self): """ Regression for ensuring ModelAdmin.fields can contain non-model fields that broke with r11737 """ class SongForm(forms.ModelForm): extra_data = forms.CharField() class FieldsOnFormOnlyAdmin(admin.ModelAdmin): form = SongForm fields = ['title', 'extra_data'] errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_non_model_first_field(self): """ Regression for ensuring ModelAdmin.field can handle first elem being a non-model field (test fix for UnboundLocalError introduced with r16225). """ class SongForm(forms.ModelForm): extra_data = forms.CharField() class Meta: model = Song fields = '__all__' class FieldsOnFormOnlyAdmin(admin.ModelAdmin): form = SongForm fields = ['extra_data', 'title'] errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check() self.assertEqual(errors, []) def test_check_sublists_for_duplicates(self): class MyModelAdmin(admin.ModelAdmin): fields = ['state', ['state']] errors = MyModelAdmin(Song, AdminSite()).check() expected = [ checks.Error( "The value of 'fields' contains duplicate field(s).", obj=MyModelAdmin, id='admin.E006' ) ] self.assertEqual(errors, expected) def test_check_fieldset_sublists_for_duplicates(self): class MyModelAdmin(admin.ModelAdmin): fieldsets = [ (None, { 'fields': ['title', 'album', ('title', 'album')] }), ] errors = MyModelAdmin(Song, AdminSite()).check() expected = [ checks.Error( "There are duplicate field(s) in 'fieldsets[0][1]'.", obj=MyModelAdmin, id='admin.E012' ) ] self.assertEqual(errors, expected) def test_list_filter_works_on_through_field_even_when_apps_not_ready(self): """ Ensure list_filter can access reverse fields even when the app registry is not ready; refs #24146. """ class BookAdminWithListFilter(admin.ModelAdmin): list_filter = ['authorsbooks__featured'] # Temporarily pretending apps are not ready yet. This issue can happen # if the value of 'list_filter' refers to a 'through__field'. Book._meta.apps.ready = False try: errors = BookAdminWithListFilter(Book, AdminSite()).check() self.assertEqual(errors, []) finally: Book._meta.apps.ready = True
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from telemetry.core import exceptions from telemetry.core import util from telemetry import decorators from telemetry.internal.actions import page_action from telemetry.page import action_runner as action_runner_module from telemetry.testing import tab_test_case from telemetry.timeline import model from telemetry.timeline import tracing_category_filter from telemetry.timeline import tracing_options from telemetry.web_perf import timeline_interaction_record as tir_module util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'mock') import mock # pylint:disable=import-error class ActionRunnerInteractionTest(tab_test_case.TabTestCase): def GetInteractionRecords(self, trace_data): timeline_model = model.TimelineModel(trace_data) renderer_thread = timeline_model.GetRendererThreadFromTabId(self._tab.id) return [ tir_module.TimelineInteractionRecord.FromAsyncEvent(e) for e in renderer_thread.async_slices if tir_module.IsTimelineInteractionRecord(e.name) ] def VerifyIssuingInteractionRecords(self, **interaction_kwargs): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('interaction_enabled_page.html') action_runner.Wait(1) options = tracing_options.TracingOptions() options.enable_chrome_trace = True self._browser.platform.tracing_controller.Start( options, tracing_category_filter.CreateNoOverheadFilter()) with action_runner.CreateInteraction('InteractionName', **interaction_kwargs): pass trace_data = self._browser.platform.tracing_controller.Stop() records = self.GetInteractionRecords(trace_data) self.assertEqual( 1, len(records), 'Failed to issue the interaction record on the tracing timeline.' ' Trace data:\n%s' % repr(trace_data._raw_data)) self.assertEqual('InteractionName', records[0].label) for attribute_name in interaction_kwargs: self.assertTrue(getattr(records[0], attribute_name)) # Test disabled for android: crbug.com/437057 @decorators.Disabled('android', 'chromeos') def testIssuingMultipleMeasurementInteractionRecords(self): self.VerifyIssuingInteractionRecords(repeatable=True) class ActionRunnerTest(tab_test_case.TabTestCase): def testExecuteJavaScript(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript('var testing = 42;') self.assertEqual(42, self._tab.EvaluateJavaScript('testing')) def testWaitForNavigate(self): self.Navigate('page_with_link.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ClickElement('#clickme') action_runner.WaitForNavigate() self.assertTrue(self._tab.EvaluateJavaScript( 'document.readyState == "interactive" || ' 'document.readyState == "complete"')) self.assertEqual( self._tab.EvaluateJavaScript('document.location.pathname;'), '/blank.html') def testWait(self): action_runner = action_runner_module.ActionRunner(self._tab) self.Navigate('blank.html') action_runner.ExecuteJavaScript( 'window.setTimeout(function() { window.testing = 101; }, 50);') action_runner.Wait(0.1) self.assertEqual(101, self._tab.EvaluateJavaScript('window.testing')) action_runner.ExecuteJavaScript( 'window.setTimeout(function() { window.testing = 102; }, 100);') action_runner.Wait(0.2) self.assertEqual(102, self._tab.EvaluateJavaScript('window.testing')) def testWaitForJavaScriptCondition(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript('window.testing = 219;') action_runner.WaitForJavaScriptCondition( 'window.testing == 219', timeout_in_seconds=0.1) action_runner.ExecuteJavaScript( 'window.setTimeout(function() { window.testing = 220; }, 50);') action_runner.WaitForJavaScriptCondition( 'window.testing == 220', timeout_in_seconds=0.1) self.assertEqual(220, self._tab.EvaluateJavaScript('window.testing')) def testWaitForElement(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript( '(function() {' ' var el = document.createElement("div");' ' el.id = "test1";' ' el.textContent = "foo";' ' document.body.appendChild(el);' '})()') action_runner.WaitForElement('#test1', timeout_in_seconds=0.1) action_runner.WaitForElement(text='foo', timeout_in_seconds=0.1) action_runner.WaitForElement( element_function='document.getElementById("test1")') action_runner.ExecuteJavaScript( 'window.setTimeout(function() {' ' var el = document.createElement("div");' ' el.id = "test2";' ' document.body.appendChild(el);' '}, 50)') action_runner.WaitForElement('#test2', timeout_in_seconds=0.1) action_runner.ExecuteJavaScript( 'window.setTimeout(function() {' ' document.getElementById("test2").textContent = "bar";' '}, 50)') action_runner.WaitForElement(text='bar', timeout_in_seconds=0.1) action_runner.ExecuteJavaScript( 'window.setTimeout(function() {' ' var el = document.createElement("div");' ' el.id = "test3";' ' document.body.appendChild(el);' '}, 50)') action_runner.WaitForElement( element_function='document.getElementById("test3")') def testWaitForElementWithWrongText(self): action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) self.Navigate('blank.html') action_runner.ExecuteJavaScript( '(function() {' ' var el = document.createElement("div");' ' el.id = "test1";' ' el.textContent = "foo";' ' document.body.appendChild(el);' '})()') action_runner.WaitForElement('#test1', timeout_in_seconds=0.2) def WaitForElement(): action_runner.WaitForElement(text='oo', timeout_in_seconds=0.2) self.assertRaises(exceptions.TimeoutException, WaitForElement) def testClickElement(self): self.Navigate('page_with_clickables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ExecuteJavaScript('valueSettableByTest = 1;') action_runner.ClickElement('#test') self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 2;') action_runner.ClickElement(text='Click/tap me') self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 3;') action_runner.ClickElement( element_function='document.body.firstElementChild;') self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest')) def WillFail(): action_runner.ClickElement('#notfound') self.assertRaises(exceptions.EvaluateException, WillFail) @decorators.Disabled('android', 'debug', # crbug.com/437068 'chromeos') # crbug.com/483212 def testTapElement(self): self.Navigate('page_with_clickables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ExecuteJavaScript('valueSettableByTest = 1;') action_runner.TapElement('#test') self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 2;') action_runner.TapElement(text='Click/tap me') self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest')) action_runner.ExecuteJavaScript('valueSettableByTest = 3;') action_runner.TapElement( element_function='document.body.firstElementChild') self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest')) def WillFail(): action_runner.TapElement('#notfound') self.assertRaises(exceptions.EvaluateException, WillFail) @decorators.Disabled('android', # crbug.com/437065. 'chromeos') # crbug.com/483212. def testScroll(self): if not page_action.IsGestureSourceTypeSupported( self._tab, 'touch'): return self.Navigate('page_with_swipeables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.ScrollElement( selector='#left-right', direction='right', left_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#left-right").scrollLeft') > 75) action_runner.ScrollElement( selector='#top-bottom', direction='down', top_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#top-bottom").scrollTop') > 75) action_runner.ScrollPage(direction='right', left_start_ratio=0.9, distance=100) self.assertTrue(action_runner.EvaluateJavaScript( 'document.body.scrollLeft') > 75) @decorators.Disabled('android', # crbug.com/437065. 'chromeos') # crbug.com/483212. def testSwipe(self): if not page_action.IsGestureSourceTypeSupported( self._tab, 'touch'): return self.Navigate('page_with_swipeables.html') action_runner = action_runner_module.ActionRunner(self._tab, skip_waits=True) action_runner.SwipeElement( selector='#left-right', direction='left', left_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#left-right").scrollLeft') > 75) action_runner.SwipeElement( selector='#top-bottom', direction='up', top_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.querySelector("#top-bottom").scrollTop') > 75) action_runner.SwipePage(direction='left', left_start_ratio=0.9) self.assertTrue(action_runner.EvaluateJavaScript( 'document.body.scrollLeft') > 75) class InteractionTest(unittest.TestCase): def setUp(self): self.mock_action_runner = mock.Mock(action_runner_module.ActionRunner) def testIssuingInteractionRecordCommand(self): with action_runner_module.Interaction( self.mock_action_runner, label='ABC', flags=[]): pass expected_calls = [ mock.call.ExecuteJavaScript('console.time("Interaction.ABC");'), mock.call.ExecuteJavaScript('console.timeEnd("Interaction.ABC");')] self.assertEqual(expected_calls, self.mock_action_runner.mock_calls) def testExceptionRaisedInWithInteraction(self): class FooException(Exception): pass # Test that the Foo exception raised in the with block is propagated to the # caller. with self.assertRaises(FooException): with action_runner_module.Interaction( self.mock_action_runner, label='ABC', flags=[]): raise FooException() # Test that the end console.timeEnd(...) isn't called because exception was # raised. expected_calls = [ mock.call.ExecuteJavaScript('console.time("Interaction.ABC");')] self.assertEqual(expected_calls, self.mock_action_runner.mock_calls)
# Copyright 2015 Fortinet Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from oslo_config import cfg from oslo_db.sqlalchemy import session import six from networking_fortinet.ml2 import mech_fortinet from networking_fortinet.tests.unit import ( test_fortinet_common as mocked) TEST_SEG1 = 'seg1' SUPPORTED_DR = ['vlan'] class TestFortinetMechDriver(base.BaseTestCase, mocked.ConfigMixin): def setUp(self): super(TestFortinetMechDriver, self).setUp() mocked.ConfigMixin.set_up_mocks(self) self.driver = mech_fortinet.FortinetMechanismDriver() self.driver.sync_conf_to_db = mock.Mock() self.driver.sync_conf_to_db.return_value = 'ok' self.patcher1 = mock.patch( 'networking_fortinet.db.models.Fortinet_ML2_Namespace') self.patcher2 = mock.patch( 'networking_fortinet.common.resources.Vdom') self.patcher3 = mock.patch( 'networking_fortinet.db.models.Fortinet_Interface') self.addCleanup(self.patcher1.stop) self.mock_db_namespace = self.patcher1.start() self.addCleanup(self.patcher2.stop) self.mock_res_vdom = self.patcher2.start() self.addCleanup(self.patcher3.stop) self.mock_db_inf = self.patcher3.start() def test_initialize(self): self.driver.initialize() def _setup_network_context(self): net = { 'name': 'test', 'tenant_id': 'test', 'provider: network_type': '', 'router:external': False, 'id': '123', 'provider:segmentation_id': 0 } segment = { 'segmentation_id': 0, 'physical_network': 'physnet1', 'id': '123', 'network_type': 'vlan' } context = Fake_context() mech_context = Fake_mech_context(_plugin_context=context, current=net, network_segments=[segment]) return mech_context def _setup_subnet_context(self): subnet = { 'allocation_pools': [{ 'start': '172.20.21.2', 'end': '172.20.21.254' }], 'cidr': '172.20.21.0/24', 'id': 'ee1506dc-d1a9-45b3-840e-137bdaebce52', 'enable_dhcp': True, 'network_id': u'ad47f7b8-4bb7-4591-b8ed-f720237dd24f', 'tenant_id': u'11513667f4ee4a14acb0985659456f24', 'dns_nameservers': [], 'gateway_ip': u'172.20.21.1', 'shared': False } context = Fake_context() mech_context = Fake_mech_context(_plugin_context=context, current=subnet) return mech_context def _setup_port_context(self): port = { 'device_owner': 'network:router_interface', 'fixed_ips': [{ 'subnet_id': u'f645b09c-a34a-42fb-9c14-b999e43a54c7', 'ip_address': u'172.20.21.1' }], 'id': 'fb66def6-bd5e-44a0-a3f7-7c0e8e08d9ff', 'device_id': u'e4020c65-7003-468b-a34d-31af297397a0', 'admin_state_up': True, 'network_id': u'f8e34426-ccf7-429c-b726-3809d54cabdc', 'tenant_id': u'11513667f4ee4a14acb0985659456f24', 'mac_address': u'00: 0c: 29: d9: 18: 3f' } context = Fake_context() mech_context = Fake_mech_context(_plugin_context=context, current=port) return mech_context @mock.patch('networking_fortinet.common.resources.VlanInterface') def test_create_network_postcommit(self, VlanInterface): self.driver.initialize() # query_inf_db_none = mock.Mock(return_value=None) # self.inf_db.query = query_inf_db_none # add_inf_db_ok = mock.Mock(return_value='cool') # self.inf_db.add_record = add_inf_db_ok # self.inf_res = VlanInterface() # get_inf_res_ok = mock.Mock(return_value='cool') # get_inf_res_404 = mock.Mock(side_effect=exception.ResourceNotFound) # add_inf_res_ok = mock.Mock(return_value='cool') mech_context = self._setup_network_context() # self.inf_res.get = get_inf_res_ok # self.driver.create_network_postcommit(mech_context) # self.inf_res.get = get_inf_res_404 #print self.inf_res.get('dfafa') # self.inf_res.add = add_inf_res_ok self.driver.create_network_postcommit(mech_context) # print self.inf_db.add_record.called # self.assertTrue(self.namespace_db.add_record.called) @mock.patch('networking_fortinet.common.resources.VlanInterface') def test_delete_network_precommit(self, VlanInterface): self.driver.initialize() mech_context = self._setup_network_context() namespace = mock.Mock() namespace.vdom = 'osvdm123' with mock.patch('networking_fortinet.db.models.query_record', side_effect=[None, namespace, 'fgt_intf']): self.driver.delete_network_precommit(mech_context) @mock.patch('networking_fortinet.common.resources.VlanInterface') @mock.patch('networking_fortinet.db.models.Fortinet_Vdom_Vlink') @mock.patch('networking_fortinet.db.models.Fortinet_Vlink_Vlan_Allocation') @mock.patch('networking_fortinet.db.models.Fortinet_Vlink_IP_Allocation') @mock.patch('networking_fortinet.db.models.Fortinet_Static_Router') @mock.patch('networking_fortinet.common.resources.RouterStatic') @mock.patch('networking_fortinet.common.resources.VdomLink') def test_delete_network_postcommit(self, VlanInterface, Fortinet_Vdom_Vlink, Fortinet_Vlink_Vlan_Allocation, Fortinet_Vlink_IP_Allocation, Fortinet_Static_Router, RouterStatic, VdomLink): self.driver.initialize() mech_context = self._setup_network_context() namespace = mock.Mock() namespace.tenant_id = mech_context.current['tenant_id'] with mock.patch('networking_fortinet.db.models.query_count', return_value=0): with mock.patch('networking_fortinet.db.models.query_record', return_value=namespace): with mock.patch( 'networking_fortinet.db.models.query_records', return_value='cool'): self.driver.delete_network_postcommit(mech_context) @mock.patch('networking_fortinet.db.models.Fortinet_Static_Router') @mock.patch('networking_fortinet.common.resources.RouterStatic') @mock.patch('networking_fortinet.db.models.Fortinet_ML2_Subnet') @mock.patch('networking_fortinet.common.resources.DhcpServer') @mock.patch('networking_fortinet.common.resources.VlanInterface') def test_create_subnet_postcommit(self, Fortinet_Static_Router, RouterStatic, Fortinet_ML2_Subnet, DhcpServer, VlanInterface): self.driver.initialize() mech_context = self._setup_subnet_context() namespace = mock.Mock() namespace.vdom = 'osvdm123' with mock.patch('networking_fortinet.db.models.query_record', return_value='external network'): self.driver.create_subnet_postcommit(mech_context) with mock.patch('networking_fortinet.db.models.query_record', side_effect=[None, namespace, 101, 'fortinet_inf']): self.driver.create_subnet_postcommit(mech_context) @mock.patch('networking_fortinet.db.models.Fortinet_Static_Router') @mock.patch('networking_fortinet.common.resources.RouterStatic') @mock.patch('networking_fortinet.db.models.Fortinet_ML2_Subnet') @mock.patch('networking_fortinet.common.resources.DhcpServer') def test_delete_subnet_postcommit(self, Fortinet_Static_Router, RouterStatic, Fortinet_ML2_Subnet, DhcpServer): self.driver.initialize() mech_context = self._setup_subnet_context() router_record = mock.Mock() router_record.edit_id = 123 with mock.patch('networking_fortinet.db.models.query_record', return_value=router_record): self.driver.delete_subnet_postcommit(mech_context) @mock.patch('networking_fortinet.db.models.Fortinet_Firewall_Address') @mock.patch('networking_fortinet.common.resources.FirewallAddress') @mock.patch('networking_fortinet.common.resources.FirewallAddrgrp') @mock.patch('networking_fortinet.db.models.Fortinet_Firewall_Policy') @mock.patch('networking_fortinet.common.resources.FirewallPolicy') @mock.patch('networking_fortinet.db.models.Fortinet_Vlink_Vlan_Allocation') @mock.patch('networking_fortinet.db.models.Fortinet_Vlink_IP_Allocation') @mock.patch('networking_fortinet.db.models.Fortinet_Vdom_Vlink') @mock.patch('networking_fortinet.common.resources.VdomLink') @mock.patch('networking_fortinet.common.resources.VlanInterface') @mock.patch('networking_fortinet.db.models.Fortinet_Static_Router') @mock.patch('networking_fortinet.common.resources.RouterStatic') @mock.patch('networking_fortinet.db.models.Fortinet_Firewall_IPPool') @mock.patch('networking_fortinet.common.resources.FirewallIppool') @mock.patch('networking_fortinet.db.models.Fortinet_ML2_ReservedIP') @mock.patch('networking_fortinet.common.resources.DhcpServerRsvAddr') @mock.patch('networking_fortinet.db.models.Fortinet_Interface_subip') def test_create_port_precommit_and_del_port_postcommit(self, Fortinet_Firewall_Address, FirewallAddress, FirewallAddrgrp, Fortinet_Firewall_Policy, FirewallPolicy, Fortinet_Vlink_Vlan_Allocation, Fortinet_Vlink_IP_Allocation, Fortinet_Vdom_Vlink, VdomLink, VlanInterface, Fortinet_Static_Router, RouterStatic, Fortinet_Firewall_IPPool, FirewallIppool, Fortinet_ML2_ReservedIP, DhcpServerRsvAddr, Fortinet_Interface_subip): self.driver.initialize() mech_context = self._setup_port_context() namespace = mock.Mock() namespace.vdom = 'osvdm1234' subnet = mock.Mock() subnet.cidr = '172.20.21.0/24' subnet.edit_id = '123' subnet.vdom = 'osvdm123' fwaddr = mock.Mock() fwaddr.name = 'cool' fwaddr.group = 'addrgrp1' fwpolicy = mock.Mock() fwpolicy.edit_id = '123' fwpolicy.vdom = 'osvdm123' fwippool = mock.Mock() fwippool.edit_id = '123' fwippool.vdom = 'osvdmext' fwippool.name = '172.20.21.1' router = mock.Mock() router.tenant_id = 'test' router.edit_id = '123' router.vdom = 'osvdm123' router.gw_port_id = None vlink = mock.Mock() vlink.inf_name_ext_vdom = 'vlink_1' vlink.id = '1234' vlink.ip = '169.254.0.10' vlink.edit_id = '123' vlink.vdom = 'osvdm123' vlink.inf_name_int_vdom = 'vlink_0' fgt_intf = mock.Mock() fgt_intf.name = 'port32' fgt_intf.ip = '1.1.1.1' subip = mock.Mock() subip.ip = '172.20.21.1 255.255.255.0' reserveip = mock.Mock() reserveip.edit_id = '123' reserveip.ip = '172.20.21.123' reserveip.mac = 'aa:aa:aa:aa:aa:aa' with mock.patch('networking_fortinet.db.models.query_record', side_effect=[namespace, subnet, fwaddr]): with mock.patch('networking_fortinet.db.models.query_records', side_effect=[[fwaddr]]): self.driver.create_port_precommit(mech_context) with mock.patch('networking_fortinet.db.models.query_record', side_effect=[subnet, subnet, fwpolicy, fwaddr]): with mock.patch('networking_fortinet.db.models.query_records', side_effect=[[fwaddr]]): self.driver.delete_port_postcommit(mech_context) mech_context.current['device_owner'] = 'network:router_gateway' with mock.patch('networking_fortinet.db.models.query_record', side_effect=[namespace, subnet, 'external_net', router, vlink, subnet, fgt_intf]): with mock.patch('networking_fortinet.common.utils.getip', side_effect=['169.254.0.10', '160.254.0.11']): with mock.patch( 'networking_fortinet.db.models.query_records', side_effect=[[subip]]): self.driver.create_port_precommit(mech_context) with mock.patch('networking_fortinet.db.models.query_record', side_effect=[subnet, subnet, 'external', subnet, fwpolicy, fwippool, router, namespace, vlink, vlink.ip, router, vlink, namespace]): with mock.patch('networking_fortinet.db.models.query_records', side_effect=[[subip], [router]]): with mock.patch('networking_fortinet.db.models.query_count', return_value=0): self.driver.delete_port_postcommit(mech_context) mech_context.current['device_owner'] = 'network:compute:None' with mock.patch('networking_fortinet.db.models.query_record', side_effect=[namespace, subnet, [reserveip], subnet]): self.driver.create_port_precommit(mech_context) with mock.patch('networking_fortinet.db.models.query_records', side_effect=[[reserveip]]): with mock.patch('networking_fortinet.db.models.query_record', side_effect=[subnet] * 3): self.driver.delete_port_postcommit(mech_context) def test_create_port_postcommit(self): mech_context = self._setup_port_context() with mock.patch('networking_fortinet.tasks.tasks.TaskManager'): self.driver.create_port_postcommit(mech_context) class Fake_context(object): def __init__(self): engine = session.EngineFacade.from_config(cfg.CONF) if not [driver for driver in cfg.CONF.ml2.type_drivers if driver in SUPPORTED_DR]: exit() self.session = engine.get_session(autocommit=True, expire_on_commit=False) self.request_id = 'fake_migration_context' class Fake_mech_context(object): def __init__(self, **kwargs): for key, value in six.iteritems(kwargs): setattr(self, key, value)