filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_0_14155
#!/usr/bin/python # Classification (U) """Program: get_status.py Description: Unit testing of get_status in elastic_db_admin.py. Usage: test/unit/elastic_db_admin/get_status.py Arguments: """ # Libraries and Global Variables # Standard import sys import os if sys.version_info < (2, 7): import unittest2 as unittest else: import unittest # Third-party import mock # Local sys.path.append(os.getcwd()) import elastic_db_admin import lib.gen_libs as gen_libs import version __version__ = version.__version__ class ElasticSearchStatus(object): """Class: ElasticSearchStatus Description: Class representation of the ElasticSearchStatus class. Methods: __init__ -> Initialize configuration environment. get_mem_status -> Holder for ElasticSearchStatus.get_mem_status method. get_nodes -> Stub holder for ElasticSearchStatus.get_nodes method. get_cluster -> Stub holder for ElasticSearchStatus.get_cluster method. get_all -> Stub holder for ElasticSearchStatus.get_all method. """ def __init__(self, hosts, port): """Method: __init__ Description: Initialization instance of the class. Arguments: (input) hosts -> Host name. (input) port -> Port number. """ self.hosts = hosts self.port = port def get_mem_status(self): """Method: get_mem_status Description: Holder for ElasticSearchStatus.get_mem_status method. Arguments: """ return {"memory": "memory_status"} def get_nodes(self): """Method: get_nodes Description: Stub holder for ElasticSearchStatus.get_nodes method. Arguments: """ return {"node": "node_name"} def get_cluster(self): """Method: get_cluster Description: Stub holder for ElasticSearchStatus.get_cluster method. Arguments: """ return {"cluster": "cluster_name"} def get_all(self): """Method: get_all Description: Stub holder for ElasticSearchStatus.get_all method. Arguments: """ return True class ElasticSearch(object): """Class: ElasticSearch Description: Class representation of the ElasticSearch class. Methods: __init__ -> Initialize configuration environment. """ def __init__(self): """Method: __init__ Description: Initialization instance of the class. Arguments: """ self.hosts = ["nodename1", "nodename2"] self.port = 9200 class UnitTest(unittest.TestCase): """Class: UnitTest Description: Class which is a representation of a unit testing. Methods: setUp -> Initialization for unit testing. test_empty_display_list -> Test with empty display list. test_incorrect_option -> Test with incorrect option. test_one_option -> Test with one option. test_all -> Test with all option. test_no_options -> Test with no options. test_display_all -> Test with display all option. test_display_default -> Test with display default option. """ def setUp(self): """Function: setUp Description: Initialization for unit testing. Arguments: """ self.els = ElasticSearch() self.args_array = {"-D": ["all"]} self.args_array2 = {"-D": ["memory"]} self.args_array3 = {"-D": []} self.args_array4 = {"-D": [], "-j": True} self.args_array5 = {"-D": ["all"], "-j": True} self.args_array6 = {"-D": ["memory"], "-j": True} self.args_array7 = {"-D": ["incorrect"], "-j": True} self.args_array8 = {"-D": []} self.status_call = {"memory": "get_mem_status"} @mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus") def test_empty_display_list(self, mock_class): """Function: test_empty_display_list Description: Test with empty display list. Arguments: """ mock_class.return_value = ElasticSearchStatus(self.els.hosts, self.els.port) with gen_libs.no_std_out(): self.assertFalse( elastic_db_admin.get_status( self.els, status_call=self.status_call, args_array=self.args_array8)) @mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus") def test_incorrect_option(self, mock_class): """Function: test_incorrect_option Description: Test with incorrect option. Arguments: """ mock_class.return_value = ElasticSearchStatus(self.els.hosts, self.els.port) with gen_libs.no_std_out(): self.assertFalse( elastic_db_admin.get_status( self.els, status_call=self.status_call, args_array=self.args_array7)) @mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus") def test_one_option(self, mock_class): """Function: test_one_option Description: Test with one option. Arguments: """ mock_class.return_value = ElasticSearchStatus(self.els.hosts, self.els.port) with gen_libs.no_std_out(): self.assertFalse( elastic_db_admin.get_status( self.els, status_call=self.status_call, args_array=self.args_array6)) @mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus") def test_all(self, mock_class): """Function: test_all Description: Test with all option. Arguments: """ mock_class.return_value = ElasticSearchStatus(self.els.hosts, self.els.port) with gen_libs.no_std_out(): self.assertFalse( elastic_db_admin.get_status( self.els, status_call=self.status_call, args_array=self.args_array5)) @mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus") def test_no_options(self, mock_class): """Function: test_no_options Description: Test with no options. Arguments: """ mock_class.return_value = ElasticSearchStatus(self.els.hosts, self.els.port) with gen_libs.no_std_out(): self.assertFalse( elastic_db_admin.get_status( self.els, status_call=self.status_call, args_array=self.args_array4)) @mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus") def test_display_all(self, mock_class): """Function: test_display_all Description: Test with display all option. Arguments: """ mock_class.return_value = ElasticSearchStatus(self.els.hosts, self.els.port) with gen_libs.no_std_out(): self.assertFalse( elastic_db_admin.get_status( self.els, status_call=self.status_call, args_array=self.args_array)) @mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus") def test_display_default(self, mock_class): """Function: test_display_default Description: Test with display default option. Arguments: """ mock_class.return_value = ElasticSearchStatus(self.els.hosts, self.els.port) with gen_libs.no_std_out(): self.assertFalse( elastic_db_admin.get_status( self.els, status_call=self.status_call, args_array={})) if __name__ == "__main__": unittest.main()
the-stack_0_14156
from enum import Enum from typing import Optional import numpy as np from pydantic import PrivateAttr, validator from ..events import EventedModel from ..events.custom_types import Array from ..translations import trans from .colorbars import make_colorbar from .standardize_color import transform_color class ColormapInterpolationMode(str, Enum): """INTERPOLATION: Interpolation mode for colormaps. Selects an interpolation mode for the colormap. * linear: colors are defined by linear interpolation between colors of neighboring controls points. * zero: colors are defined by the value of the color in the bin between by neighboring controls points. """ LINEAR = 'linear' ZERO = 'zero' class Colormap(EventedModel): """Colormap that relates intensity values to colors. Attributes ---------- colors : array, shape (N, 4) Data used in the colormap. name : str Name of the colormap. display_name : str Display name of the colormap. controls : array, shape (N,) or (N+1,) Control points of the colormap. interpolation : str Colormap interpolation mode, either 'linear' or 'zero'. If 'linear', ncontrols = ncolors (one color per control point). If 'zero', ncontrols = ncolors+1 (one color per bin). """ # fields colors: Array[float, (-1, 4)] name: str = 'custom' _display_name: Optional[str] = PrivateAttr(None) interpolation: ColormapInterpolationMode = ColormapInterpolationMode.LINEAR controls: Array[float, (-1,)] = None def __init__(self, colors, display_name: Optional[str] = None, **data): if display_name is None: display_name = data.get('name', 'custom') super().__init__(colors=colors, **data) self._display_name = display_name # validators @validator('colors', pre=True) def _ensure_color_array(cls, v): return transform_color(v) # controls validator must be called even if None for correct initialization @validator('controls', pre=True, always=True) def _check_controls(cls, v, values): if v is None or len(v) == 0: n_controls = len(values['colors']) + int( values['interpolation'] == ColormapInterpolationMode.ZERO ) return np.linspace(0, 1, n_controls) return v def __iter__(self): yield from (self.colors, self.controls, self.interpolation) def map(self, values): values = np.atleast_1d(values) if self.interpolation == ColormapInterpolationMode.LINEAR: # One color per control point cols = [ np.interp(values, self.controls, self.colors[:, i]) for i in range(4) ] cols = np.stack(cols, axis=1) elif self.interpolation == ColormapInterpolationMode.ZERO: # One color per bin indices = np.clip( np.searchsorted(self.controls, values) - 1, 0, len(self.colors) ) cols = self.colors[indices.astype(np.int32)] else: raise ValueError( trans._( 'Unrecognized Colormap Interpolation Mode', deferred=True, ) ) return cols @property def colorbar(self): return make_colorbar(self)
the-stack_0_14157
import sys import gzip import json if __name__ == '__main__': if len(sys.argv) != 3: print('Usage: python3 convert_mrqa_to_fgc.py <mrqa-format-input-fpath> <fgc-format-output-fpath>') exit(1) input_fpath = sys.argv[1] output_fpath = sys.argv[2] # Read MRQA-format data with gzip.open(input_fpath) as f: jsonl_data = f.readlines() data_info = json.loads(jsonl_data[0]) dataset = data_info['header']['dataset'] line_count = len(jsonl_data) - 1 # Convert MRQA-format to FGC-format new_data = [] for di, jsonl_line in enumerate(jsonl_data[1:], start=1): # PQA (Outer loop) new_PQA = {} PQA = json.loads(jsonl_line) DID = '%d' % di DTEXT = PQA['context'] new_PQA['DID'] = DID new_PQA['DTEXT'] = DTEXT new_PQA['QUESTIONS'] = [] # QA (Middle loop) for qi, QA in enumerate(PQA['qas'], start=1): new_QA = {'AMODE': 'Single-Span-Extraction', 'ATYPE': ''} QID = '%s-%d' % (DID, qi) QTEXT = QA['question'] new_QA['QID'] = QID new_QA['QTEXT'] = QTEXT # Inner A (Inner loop) answer_map = {} new_ANSWER, new_ASPAN = [], [] for A in QA['detected_answers']: ATEXT = A['text'] start = A['char_spans'][0][0] end = A['char_spans'][0][1] # ANSWER if ATEXT not in answer_map: answer_map[ATEXT] = len(answer_map) new_ANSWER.append({'ATEXT': ATEXT, 'ATOKEN': [{'text': ATEXT, 'start': start}]}) else: ai = answer_map[ATEXT] atoken_info = {'text': ATEXT, 'start': start} if atoken_info not in new_ANSWER[ai]['ATOKEN']: new_ANSWER[ai]['ATOKEN'].append(atoken_info) # ASPAN aspan_info = {'text': ATEXT, 'start': start, 'end': end} if aspan_info not in new_ASPAN: new_ASPAN.append(aspan_info) new_QA['ANSWER'] = new_ANSWER new_QA['ASPAN'] = new_ASPAN new_PQA['QUESTIONS'].append(new_QA) new_data.append(new_PQA) print('%s: %d/%d (%.2f%%)\r' % (dataset, di, line_count, 100*di/line_count), end='') print() # Save FGC-format data as JSON with open(output_fpath, 'w') as f: json.dump(new_data, f)
the-stack_0_14159
'''OpenGL extension EXT.separate_shader_objects This module customises the behaviour of the OpenGL.raw.GLES2.EXT.separate_shader_objects to provide a more Python-friendly API The official definition of this extension is available here: http://www.opengl.org/registry/specs/EXT/separate_shader_objects.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLES2 import _types, _glgets from OpenGL.raw.GLES2.EXT.separate_shader_objects import * from OpenGL.raw.GLES2.EXT.separate_shader_objects import _EXTENSION_NAME def glInitSeparateShaderObjectsEXT(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) # INPUT glCreateShaderProgramvEXT.strings size not checked against count glCreateShaderProgramvEXT=wrapper.wrapper(glCreateShaderProgramvEXT).setInputArraySize( 'strings', None ) # INPUT glDeleteProgramPipelinesEXT.pipelines size not checked against n glDeleteProgramPipelinesEXT=wrapper.wrapper(glDeleteProgramPipelinesEXT).setInputArraySize( 'pipelines', None ) # INPUT glGenProgramPipelinesEXT.pipelines size not checked against n glGenProgramPipelinesEXT=wrapper.wrapper(glGenProgramPipelinesEXT).setInputArraySize( 'pipelines', None ) # INPUT glGetProgramPipelineInfoLogEXT.infoLog size not checked against bufSize glGetProgramPipelineInfoLogEXT=wrapper.wrapper(glGetProgramPipelineInfoLogEXT).setInputArraySize( 'infoLog', None ).setInputArraySize( 'length', 1 ) # INPUT glProgramUniform1fvEXT.value size not checked against count glProgramUniform1fvEXT=wrapper.wrapper(glProgramUniform1fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform1ivEXT.value size not checked against count glProgramUniform1ivEXT=wrapper.wrapper(glProgramUniform1ivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform2fvEXT.value size not checked against count*2 glProgramUniform2fvEXT=wrapper.wrapper(glProgramUniform2fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform2ivEXT.value size not checked against count*2 glProgramUniform2ivEXT=wrapper.wrapper(glProgramUniform2ivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform3fvEXT.value size not checked against count*3 glProgramUniform3fvEXT=wrapper.wrapper(glProgramUniform3fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform3ivEXT.value size not checked against count*3 glProgramUniform3ivEXT=wrapper.wrapper(glProgramUniform3ivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform4fvEXT.value size not checked against count*4 glProgramUniform4fvEXT=wrapper.wrapper(glProgramUniform4fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform4ivEXT.value size not checked against count*4 glProgramUniform4ivEXT=wrapper.wrapper(glProgramUniform4ivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix2fvEXT.value size not checked against count*4 glProgramUniformMatrix2fvEXT=wrapper.wrapper(glProgramUniformMatrix2fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix3fvEXT.value size not checked against count*9 glProgramUniformMatrix3fvEXT=wrapper.wrapper(glProgramUniformMatrix3fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16 glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform1uivEXT.value size not checked against count glProgramUniform1uivEXT=wrapper.wrapper(glProgramUniform1uivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform2uivEXT.value size not checked against count*2 glProgramUniform2uivEXT=wrapper.wrapper(glProgramUniform2uivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform3uivEXT.value size not checked against count*3 glProgramUniform3uivEXT=wrapper.wrapper(glProgramUniform3uivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniform4uivEXT.value size not checked against count*4 glProgramUniform4uivEXT=wrapper.wrapper(glProgramUniform4uivEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16 glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix2x3fvEXT.value size not checked against count*6 glProgramUniformMatrix2x3fvEXT=wrapper.wrapper(glProgramUniformMatrix2x3fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix3x2fvEXT.value size not checked against count*6 glProgramUniformMatrix3x2fvEXT=wrapper.wrapper(glProgramUniformMatrix3x2fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix2x4fvEXT.value size not checked against count*8 glProgramUniformMatrix2x4fvEXT=wrapper.wrapper(glProgramUniformMatrix2x4fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix4x2fvEXT.value size not checked against count*8 glProgramUniformMatrix4x2fvEXT=wrapper.wrapper(glProgramUniformMatrix4x2fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix3x4fvEXT.value size not checked against count*12 glProgramUniformMatrix3x4fvEXT=wrapper.wrapper(glProgramUniformMatrix3x4fvEXT).setInputArraySize( 'value', None ) # INPUT glProgramUniformMatrix4x3fvEXT.value size not checked against count*12 glProgramUniformMatrix4x3fvEXT=wrapper.wrapper(glProgramUniformMatrix4x3fvEXT).setInputArraySize( 'value', None ) ### END AUTOGENERATED SECTION
the-stack_0_14160
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking. # https://github.com/cheind/py-motmetrics/ # # MIT License # Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others. # See LICENSE file for terms. """Functions for loading data and writing summaries.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from enum import Enum import io import numpy as np import pandas as pd import scipy.io import xmltodict class Format(Enum): """Enumerates supported file formats.""" MOT16 = 'mot16' """Milan, Anton, et al. "Mot16: A benchmark for multi-object tracking." arXiv preprint arXiv:1603.00831 (2016).""" MOT15_2D = 'mot15-2D' """Leal-Taixe, Laura, et al. "MOTChallenge 2015: Towards a benchmark for multi-target tracking." arXiv preprint arXiv:1504.01942 (2015).""" VATIC_TXT = 'vatic-txt' """Vondrick, Carl, Donald Patterson, and Deva Ramanan. "Efficiently scaling up crowdsourced video annotation." International Journal of Computer Vision 101.1 (2013): 184-204. https://github.com/cvondrick/vatic """ DETRAC_MAT = 'detrac-mat' """Wen, Longyin et al. "UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking." arXiv preprint arXiv:arXiv:1511.04136 (2016). http://detrac-db.rit.albany.edu/download """ DETRAC_XML = 'detrac-xml' """Wen, Longyin et al. "UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking." arXiv preprint arXiv:arXiv:1511.04136 (2016). http://detrac-db.rit.albany.edu/download """ def load_motchallenge(fname, **kwargs): r"""Load MOT challenge data. Params ------ fname : str Filename to load data from Kwargs ------ sep : str Allowed field separators, defaults to '\s+|\t+|,' min_confidence : float Rows with confidence less than this threshold are removed. Defaults to -1. You should set this to 1 when loading ground truth MOTChallenge data, so that invalid rectangles in the ground truth are not considered during matching. Returns ------ df : pandas.DataFrame The returned dataframe has the following columns 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility' The dataframe is indexed by ('FrameId', 'Id') """ sep = kwargs.pop('sep', r'\s+|\t+|,') min_confidence = kwargs.pop('min_confidence', -1) df = pd.read_csv( fname, sep=sep, index_col=[0, 1], skipinitialspace=True, header=None, names=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'], engine='python' ) # Account for matlab convention. df[['X', 'Y']] -= (1, 1) # Removed trailing column del df['unused'] # Remove all rows without sufficient confidence return df[df['Confidence'] >= min_confidence] def load_vatictxt(fname, **kwargs): """Load Vatic text format. Loads the vatic CSV text having the following columns per row 0 Track ID. All rows with the same ID belong to the same path. 1 xmin. The top left x-coordinate of the bounding box. 2 ymin. The top left y-coordinate of the bounding box. 3 xmax. The bottom right x-coordinate of the bounding box. 4 ymax. The bottom right y-coordinate of the bounding box. 5 frame. The frame that this annotation represents. 6 lost. If 1, the annotation is outside of the view screen. 7 occluded. If 1, the annotation is occluded. 8 generated. If 1, the annotation was automatically interpolated. 9 label. The label for this annotation, enclosed in quotation marks. 10+ attributes. Each column after this is an attribute set in the current frame Params ------ fname : str Filename to load data from Returns ------ df : pandas.DataFrame The returned dataframe has the following columns 'X', 'Y', 'Width', 'Height', 'Lost', 'Occluded', 'Generated', 'ClassId', '<Attr1>', '<Attr2>', ... where <Attr1> is placeholder for the actual attribute name capitalized (first letter). The order of attribute columns is sorted in attribute name. The dataframe is indexed by ('FrameId', 'Id') """ # pylint: disable=too-many-locals sep = kwargs.pop('sep', ' ') with io.open(fname) as f: # First time going over file, we collect the set of all variable activities activities = set() for line in f: for c in line.rstrip().split(sep)[10:]: activities.add(c) activitylist = sorted(list(activities)) # Second time we construct artificial binary columns for each activity data = [] f.seek(0) for line in f: fields = line.rstrip().split() attrs = ['0'] * len(activitylist) for a in fields[10:]: attrs[activitylist.index(a)] = '1' fields = fields[:10] fields.extend(attrs) data.append(' '.join(fields)) strdata = '\n'.join(data) dtype = { 'Id': np.int64, 'X': np.float32, 'Y': np.float32, 'Width': np.float32, 'Height': np.float32, 'FrameId': np.int64, 'Lost': bool, 'Occluded': bool, 'Generated': bool, 'ClassId': str, } # Remove quotes from activities activitylist = [a.replace('\"', '').capitalize() for a in activitylist] # Add dtypes for activities for a in activitylist: dtype[a] = bool # Read from CSV names = ['Id', 'X', 'Y', 'Width', 'Height', 'FrameId', 'Lost', 'Occluded', 'Generated', 'ClassId'] names.extend(activitylist) df = pd.read_csv(io.StringIO(strdata), names=names, index_col=['FrameId', 'Id'], header=None, sep=' ') # Correct Width and Height which are actually XMax, Ymax in files. w = df['Width'] - df['X'] h = df['Height'] - df['Y'] df['Width'] = w df['Height'] = h return df def load_detrac_mat(fname): """Loads UA-DETRAC annotations data from mat files Competition Site: http://detrac-db.rit.albany.edu/download File contains a nested structure of 2d arrays for indexed by frame id and Object ID. Separate arrays for top, left, width and height are given. Params ------ fname : str Filename to load data from Kwargs ------ Currently none of these arguments used. Returns ------ df : pandas.DataFrame The returned dataframe has the following columns 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility' The dataframe is indexed by ('FrameId', 'Id') """ matData = scipy.io.loadmat(fname) frameList = matData['gtInfo'][0][0][4][0] leftArray = matData['gtInfo'][0][0][0].astype(np.float32) topArray = matData['gtInfo'][0][0][1].astype(np.float32) widthArray = matData['gtInfo'][0][0][3].astype(np.float32) heightArray = matData['gtInfo'][0][0][2].astype(np.float32) parsedGT = [] for f in frameList: ids = [i + 1 for i, v in enumerate(leftArray[f - 1]) if v > 0] for i in ids: row = [] row.append(f) row.append(i) row.append(leftArray[f - 1, i - 1] - widthArray[f - 1, i - 1] / 2) row.append(topArray[f - 1, i - 1] - heightArray[f - 1, i - 1]) row.append(widthArray[f - 1, i - 1]) row.append(heightArray[f - 1, i - 1]) row.append(1) row.append(-1) row.append(-1) row.append(-1) parsedGT.append(row) df = pd.DataFrame(parsedGT, columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused']) df.set_index(['FrameId', 'Id'], inplace=True) # Account for matlab convention. df[['X', 'Y']] -= (1, 1) # Removed trailing column del df['unused'] return df def load_detrac_xml(fname): """Loads UA-DETRAC annotations data from xml files Competition Site: http://detrac-db.rit.albany.edu/download Params ------ fname : str Filename to load data from Kwargs ------ Currently none of these arguments used. Returns ------ df : pandas.DataFrame The returned dataframe has the following columns 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility' The dataframe is indexed by ('FrameId', 'Id') """ with io.open(fname) as fd: doc = xmltodict.parse(fd.read()) frameList = doc['sequence']['frame'] parsedGT = [] for f in frameList: fid = int(f['@num']) targetList = f['target_list']['target'] if not isinstance(targetList, list): targetList = [targetList] for t in targetList: row = [] row.append(fid) row.append(int(t['@id'])) row.append(float(t['box']['@left'])) row.append(float(t['box']['@top'])) row.append(float(t['box']['@width'])) row.append(float(t['box']['@height'])) row.append(1) row.append(-1) row.append(-1) row.append(-1) parsedGT.append(row) df = pd.DataFrame(parsedGT, columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused']) df.set_index(['FrameId', 'Id'], inplace=True) # Account for matlab convention. df[['X', 'Y']] -= (1, 1) # Removed trailing column del df['unused'] return df def loadtxt(fname, fmt=Format.MOT15_2D, **kwargs): """Load data from any known format.""" fmt = Format(fmt) switcher = { Format.MOT16: load_motchallenge, Format.MOT15_2D: load_motchallenge, Format.VATIC_TXT: load_vatictxt, Format.DETRAC_MAT: load_detrac_mat, Format.DETRAC_XML: load_detrac_xml } func = switcher.get(fmt) return func(fname, **kwargs) def render_summary(summary, formatters=None, namemap=None, buf=None): """Render metrics summary to console friendly tabular output. Params ------ summary : pd.DataFrame Dataframe containing summaries in rows. Kwargs ------ buf : StringIO-like, optional Buffer to write to formatters : dict, optional Dicionary defining custom formatters for individual metrics. I.e `{'mota': '{:.2%}'.format}`. You can get preset formatters from MetricsHost.formatters namemap : dict, optional Dictionary defining new metric names for display. I.e `{'num_false_positives': 'FP'}`. Returns ------- string Formatted string """ if namemap is not None: summary = summary.rename(columns=namemap) if formatters is not None: formatters = {namemap.get(c, c): f for c, f in formatters.items()} output = summary.to_string( buf=buf, formatters=formatters, ) return output motchallenge_metric_names = { 'idf1': 'IDF1', 'idp': 'IDP', 'idr': 'IDR', 'recall': 'Rcll', 'precision': 'Prcn', 'num_unique_objects': 'GT', 'mostly_tracked': 'MT', 'partially_tracked': 'PT', 'mostly_lost': 'ML', 'num_false_positives': 'FP', 'num_misses': 'FN', 'num_switches': 'IDs', 'num_fragmentations': 'FM', 'mota': 'MOTA', 'motp': 'MOTP', 'num_transfer': 'IDt', 'num_ascend': 'IDa', 'num_migrate': 'IDm', } """A list mappings for metric names to comply with MOTChallenge."""
the-stack_0_14161
# # Copyright 2014 Quantopian, Inc. # Modifications Copyright 2018 Alpaca # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest import pandas as pd from pylivetrader.errors import ( SymbolNotFound, OrderDuringInitialize, TradingControlViolation, RegisterTradingControlPostInit, ) import pylivetrader.protocol as proto from pylivetrader.misc import events from pylivetrader.algorithm import Algorithm from pylivetrader.executor.executor import AlgorithmExecutor from pylivetrader.misc.api_context import LiveTraderAPI from pylivetrader.loader import get_functions from unittest.mock import Mock def get_algo(script, **kwargs): functions = get_functions(script) return Algorithm( backend='pylivetrader.testing.fixtures', **functions, **kwargs, ) def simulate_init_and_handle(algo): algo._assets_from_source = \ algo.asset_finder.retrieve_all(algo.asset_finder.sids) if not algo.initialized: algo.initialize() algo.initialized = True algo.executor = AlgorithmExecutor(algo, algo.data_portal) dt_to_use = pd.Timestamp( '2018/08/13 9:30', tz='America/New_York').tz_convert('UTC') with LiveTraderAPI(algo): algo.on_dt_changed(dt_to_use) algo.executor.current_data.datetime = dt_to_use algo.before_trading_start(algo.executor.current_data) algo.handle_data(algo.executor.current_data) def test_algorithm_init(): # check init algo = Algorithm(backend='pylivetrader.testing.fixtures') assert not algo.initialized algo = get_algo(''' def initialize(ctx): pass def handle_data(ctx, data): pass ''') simulate_init_and_handle(algo) def test_algorithm_get_datetime(): algo = get_algo(''' import pandas as pd def initialize(ctx): pass def handle_data(ctx, data): dt = get_datetime() assert dt == pd.Timestamp( '2018/08/13 9:30', tz='America/New_York').tz_convert('UTC') ''') simulate_init_and_handle(algo) def test_before_trading_start(): algo = get_algo(''' def before_trading_start(ctx, data): record(value=1) ''') simulate_init_and_handle(algo) assert algo.recorded_vars['value'] == 1 def test_datetime_bad_params(): algo = get_algo(""" from pytz import timezone def initialize(context): pass def handle_data(context, data): get_datetime(timezone) """) with pytest.raises(TypeError): simulate_init_and_handle(algo) def test_schedule(): algo = get_algo(""" def scheduled(context, data): pass def initialize(context): schedule_function( scheduled, date_rules.every_day(), time_rules.market_open(minutes=1) ) """) simulate_init_and_handle(algo) assert algo.event_manager._events[-1].callback.__name__ == 'scheduled' assert isinstance(algo.event_manager._events[-1].rule, events.OncePerDay) def test_asset_lookup(): algo = get_algo(""" def initialize(context): assert symbol('ASSET1').sid == 'asset-1' """) simulate_init_and_handle(algo) algo = get_algo(""" def initialize(context): symbol('INVALID') """) with pytest.raises(SymbolNotFound): simulate_init_and_handle(algo) with pytest.raises(TypeError): algo.symbol(1) with pytest.raises(TypeError): algo.symbol((1,)) with pytest.raises(TypeError): algo.symbol([1]) with pytest.raises(TypeError): algo.symbol({1}) with pytest.raises(TypeError): algo.symbol({"foo": "bar"}) @pytest.mark.parametrize('func, amt, expect', [ ('order', 1, 1), ('order_value', 1, 1), ('order_target', 1, 1), ('order_percent', 0.1, 1), ('order_percent', 0.2, 2), ('order_target_percent', 0.1, 1), ('order_target_value', 1, 1), ]) def test_order(func, amt, expect): algo = get_algo('') simulate_init_and_handle(algo) target = algo.sid('asset-1') def assert_order(asset, amount, style): assert asset == target assert amount == expect class portfolio: portfolio_value = 1000.0 positions = proto.Positions() algo._backend.portfolio = portfolio() algo._backend.order = assert_order getattr(algo, func)(target, amt) def test_order_in_init(): """ Test that calling order in initialize will raise an error. """ with pytest.raises(OrderDuringInitialize): algo = get_algo(''' def initialize(ctx): order(sid('asset-1'), 1) ''') simulate_init_and_handle(algo) def test_portfolio_in_init(): """ Test that accessing portfolio in init doesn't break. """ algo = get_algo(''' def initialize(ctx): ctx.portfolio ''') algo._backend.portfolio = {} simulate_init_and_handle(algo) def test_account_in_init(): """ Test that accessing portfolio in init doesn't break. """ algo = get_algo(''' def initialize(ctx): ctx.account ''') algo._backend.account = {} simulate_init_and_handle(algo) def test_long_only(): algo = get_algo(''' def initialize(ctx): set_long_only() ''') simulate_init_and_handle(algo) class portfolio: portfolio_value = 1000.0 positions = proto.Positions() class order: id = 'oid' algo._backend.portfolio = portfolio algo._backend.order = lambda *args, **kwrags: order() with pytest.raises(TradingControlViolation): algo.order(algo.sid('asset-1'), -1) algo.order(algo.sid('asset-1'), 1) def test_post_init(): algo = get_algo('') simulate_init_and_handle(algo) with pytest.raises(RegisterTradingControlPostInit): algo.set_max_position_size(algo.sid('asset-1'), 1, 1) with pytest.raises(RegisterTradingControlPostInit): algo.set_max_order_size(algo.sid('asset-1'), 1, 1) with pytest.raises(RegisterTradingControlPostInit): algo.set_max_order_count(1) with pytest.raises(RegisterTradingControlPostInit): algo.set_long_only() def test_state_restore(): algo = get_algo(''' def handle_data(ctx, data): ctx.value = 1 ''') simulate_init_and_handle(algo) algo = get_algo(''' def handle_data(ctx, data): ctx.value = 1 ''') algo.initialize() assert algo.value == 1 # should fail with checksum check algo = get_algo(''' def handle_data(ctx, data): ctx.value = 1 ''', algoname='invalid', statefile='algo-state.pkl') with pytest.raises(ValueError): algo.initialize() def test_pipeline(): algo = get_algo('') pipe = Mock() algo.attach_pipeline(pipe, 'mock') import sys pkg = 'pipeline_live.engine' if pkg in sys.modules: del sys.modules[pkg] with pytest.raises(RuntimeError): algo.pipeline_output('mock') mod = Mock() sys.modules[pkg] = mod eng = Mock() def ctor(list_symbols): symbols = list_symbols() assert symbols[0] == 'ASSET0' return eng mod.LivePipelineEngine = ctor eng.run_pipeline.return_value = pd.DataFrame( [[42.0]], index=['ASSET0'], columns=['close']) res = algo.pipeline_output('mock') assert res.index[0].symbol == 'ASSET0' del sys.modules[pkg] def test_backend_param(): class Backend: pass bknd = Backend() algo = Algorithm(backend=bknd) assert algo._backend == bknd with pytest.raises(RuntimeError): Algorithm(backend='foo.does.not.exist')
the-stack_0_14166
import sys import threading import time from io import StringIO from typing import Optional from labml.internal.api import ApiCaller, ApiDataSource, Packet WARMUP_COMMITS = 5 class ApiLogs(ApiDataSource): api_caller: Optional[ApiCaller] frequency: float def __init__(self): super().__init__() self.api_caller = None self.frequency = 1 self.last_committed = time.time() self.commits_count = 0 self.data = {} self.lock = threading.Lock() def set_api(self, api_caller: ApiCaller, *, frequency: float): self.api_caller = api_caller self.frequency = frequency self.check_and_flush() def check_and_flush(self): if self.api_caller is None: return with self.lock: if not self.data: return t = time.time() freq = self.frequency if self.commits_count < WARMUP_COMMITS: freq /= 2 ** (WARMUP_COMMITS - self.commits_count) if self.data.get('stderr', '') != '' or self.commits_count == 0 or t - self.last_committed > freq: self.commits_count += 1 self.api_caller.has_data(self) def _clean(self, data: str): last_newline = None remove = [] for i in range(len(data)): if data[i] == '\r': if i + 1 < len(data) and data[i + 1] == '\n': remove.append((i, i)) elif last_newline is not None: remove.append((last_newline + 1, i)) last_newline = i elif data[i] == '\n': last_newline = i res = [] offset = 0 for r in remove: if offset < r[0]: res.append(data[offset: r[0]]) offset = r[1] + 1 res.append(data[offset:]) return ''.join(res) def get_data_packet(self) -> Packet: with self.lock: self.last_committed = time.time() self.data['time'] = time.time() for type_ in ['stdout', 'logger']: if type_ not in self.data: continue self.data[type_] = self._clean(self.data[type_]) packet = Packet(self.data) self.data = {} return packet def outputs(self, *, stdout_: str = '', stderr_: str = '', logger_: str = ''): with self.lock: if stdout_ != '': self.data['stdout'] = self.data.get('stdout', '') + stdout_ if stderr_ != '': self.data['stderr'] = self.data.get('stderr', '') + stderr_ if logger_ != '': self.data['logger'] = self.data.get('logger', '') + logger_ self.check_and_flush() API_LOGS = ApiLogs() class OutputStream(StringIO): def write(self, *args, **kwargs): # real signature unknown super().write(*args, **kwargs) save = StringIO() save.write(*args, **kwargs) API_LOGS.outputs(**{self.type_: save.getvalue()}) self.original.write(*args, **kwargs) def __init__(self, original, type_): # real signature unknown super().__init__() self.type_ = type_ self.original = original _original_stdout_write = sys.stdout.write _original_stderr_write = sys.stderr.write def _write_stdout(*args, **kwargs): _original_stdout_write(*args, **kwargs) save = StringIO() save.write(*args, **kwargs) API_LOGS.outputs(stdout_=save.getvalue()) def _write_stderr(*args, **kwargs): _original_stderr_write(*args, **kwargs) save = StringIO() save.write(*args, **kwargs) API_LOGS.outputs(stderr_=save.getvalue()) def capture(): sys.stdout.write = _write_stdout sys.stderr.write = _write_stderr capture()
the-stack_0_14168
class NFCTransactionDialog(AnimatedPopup): mode = OptionProperty('send', options=('send','receive')) scanner = ObjectProperty(None) def __init__(self, **kwargs): # Delayed Init global NFCSCanner if NFCSCanner is None: from electrum_xuez_gui.kivy.nfc_scanner import NFCScanner self.scanner = NFCSCanner super(NFCTransactionDialog, self).__init__(**kwargs) self.scanner.nfc_init() self.scanner.bind() def on_parent(self, instance, value): sctr = self.ids.sctr if value: def _cmp(*l): anim = Animation(rotation=2, scale=1, opacity=1) anim.start(sctr) anim.bind(on_complete=_start) def _start(*l): anim = Animation(rotation=350, scale=2, opacity=0) anim.start(sctr) anim.bind(on_complete=_cmp) _start() return Animation.cancel_all(sctr)
the-stack_0_14169
""" Деменчук Г.М., вариант 6, стандартные задания """ import math import matplotlib.pyplot as plt class GraphClass: """ Вариант задания для операторов цикла + график Класс для вывода графика варианта задания для операторов цикла """ def __init__(self): self.graph() def graph(self): args = ([], []) x = 0.2 end_cycle = 0.8 while x != end_cycle: obj = MathUpper(x) args[0].append(x) args[1].append(obj.result) x = round(x + 0.1, 2) ax = plt.figure().gca() ax.plot(args[0], args[1], linewidth=2, marker="o") plt.show() class MathUpper: """ Вариант задания на условные операторы Класс для работы с первым заданием на условные операторы """ def __init__(self, x): self.x = x self.getter() def getter(self): x = self.x upper = x ** 3 * math.e ** (x - 1) lower = x ** 3 - math.fabs(x) if lower == 0: print("Знаменатель равен нулю, деление на 0!") self.result = 0 return first = upper / lower log_sqrt = math.sqrt(x) - x if log_sqrt >= 0: buf_log = math.log(log_sqrt, 2) else: print("Выражение в log[sqrt(x)-x,2] меньше 0!") self.result = 0 return self.result = first - buf_log class CycleClass: """ Вариант задания для операторов цикла Класс для вызова MathUpper в цикле """ def __init__(self): self.cycle() def cycle(self): x = 0.2 end_cycle = 0.8 while x != end_cycle: obj = MathUpper(x) print("x=", x, "result = ", obj.result) x = round(x + 0.1, 2) pass def main(): try: x = float(input("Введите x: ")) except: print("Проблема ввода данных!") return obj = MathUpper(x) print("\n*Условные операторы*") print("Результат:" + str(obj.result)) print("\n*Операторы цикла*") CycleClass() GraphClass() if __name__ == "__main__": main()
the-stack_0_14170
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from .. import models class MetricDefinitionsOperations(object): """MetricDefinitionsOperations operations. You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2018-01-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2018-01-01" self.config = config def list( self, resource_uri, metricnamespace=None, custom_headers=None, raw=False, **operation_config): """Lists the metric definitions for the resource. :param resource_uri: The identifier of the resource. :type resource_uri: str :param metricnamespace: Metric namespace to query metric definitions for. :type metricnamespace: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of MetricDefinition :rtype: ~azure.mgmt.monitor.v2018_01_01.models.MetricDefinitionPaged[~azure.mgmt.monitor.v2018_01_01.models.MetricDefinition] :raises: :class:`ErrorResponseException<azure.mgmt.monitor.v2018_01_01.models.ErrorResponseException>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') if metricnamespace is not None: query_parameters['metricnamespace'] = self._serialize.query("metricnamespace", metricnamespace, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.MetricDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/metricDefinitions'}
the-stack_0_14172
from topaz.module import ClassDef from topaz.objects.objectobject import W_Object class W_ProcObject(W_Object): classdef = ClassDef("Proc", W_Object.classdef) def __init__(self, space, bytecode, w_self, lexical_scope, cells, block, parent_interp, top_parent_interp, regexp_match_cell, is_lambda): W_Object.__init__(self, space) self.bytecode = bytecode self.w_self = w_self self.lexical_scope = lexical_scope self.cells = cells self.block = block self.parent_interp = parent_interp self.top_parent_interp = top_parent_interp self.regexp_match_cell = regexp_match_cell self.is_lambda = is_lambda def copy(self, space, w_self=None, lexical_scope=None, is_lambda=False): return W_ProcObject( space, self.bytecode, w_self or self.w_self, lexical_scope or self.lexical_scope, self.cells, self.block, self.parent_interp, self.top_parent_interp, self.regexp_match_cell, is_lambda or self.is_lambda ) @classdef.singleton_method("new") def method_new(self, space, block): if block is None: raise space.error(space.w_ArgumentError, "tried to create Proc object without a block") return block.copy(space) method_allocate = classdef.undefine_allocator() @classdef.method("yield") @classdef.method("===") @classdef.method("[]") @classdef.method("call") def method_call(self, space, args_w, block): from topaz.interpreter import RaiseReturn, RaiseBreak try: return space.invoke_block(self, args_w, block_arg=block) except RaiseReturn as e: if self.is_lambda: return e.w_value else: raise except RaiseBreak as e: if self.is_lambda: return e.w_value else: raise space.error(space.w_LocalJumpError, "break from proc-closure") @classdef.method("lambda?") def method_lambda(self, space): return space.newbool(self.is_lambda) @classdef.method("arity") def method_arity(self, space): return space.newint(self.bytecode.arity(negative_defaults=self.is_lambda)) @classdef.method("binding") def method_binding(self, space): return space.newbinding_fromblock(self)
the-stack_0_14173
# # Copyright 2013-2014 eNovance <[email protected]> # # Authors: Mehdi Abaakouk <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from ceilometerclient.v2 import alarms import eventlet from oslo.config import fixture as fixture_config from oslo.utils import timeutils import six from ceilometer.alarm import rpc as rpc_alarm from ceilometer.alarm.storage import models from ceilometer import messaging from ceilometer.tests import base as tests_base class FakeNotifier(object): def __init__(self, transport): self.rpc = messaging.get_rpc_server( transport, "alarm_notifier", self) self.notified = [] def start(self, expected_length): self.expected_length = expected_length self.rpc.start() def notify_alarm(self, context, data): self.notified.append(data) if len(self.notified) == self.expected_length: self.rpc.stop() class TestRPCAlarmNotifier(tests_base.BaseTestCase): def setUp(self): super(TestRPCAlarmNotifier, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) self.notifier_server = FakeNotifier(self.transport) self.notifier = rpc_alarm.RPCAlarmNotifier() self.alarms = [ alarms.Alarm(None, info={ 'name': 'instance_running_hot', 'meter_name': 'cpu_util', 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 5, 'statistic': 'avg', 'state': 'ok', 'ok_actions': ['http://host:8080/path'], 'user_id': 'foobar', 'project_id': 'snafu', 'period': 60, 'alarm_id': str(uuid.uuid4()), 'matching_metadata':{'resource_id': 'my_instance'} }), alarms.Alarm(None, info={ 'name': 'group_running_idle', 'meter_name': 'cpu_util', 'comparison_operator': 'le', 'threshold': 10.0, 'statistic': 'max', 'evaluation_periods': 4, 'state': 'insufficient data', 'insufficient_data_actions': ['http://other_host/path'], 'user_id': 'foobar', 'project_id': 'snafu', 'period': 300, 'alarm_id': str(uuid.uuid4()), 'matching_metadata':{'metadata.user_metadata.AS': 'my_group'} }), ] def test_rpc_target(self): topic = self.notifier.client.target.topic self.assertEqual('alarm_notifier', topic) def test_notify_alarm(self): self.notifier_server.start(2) previous = ['alarm', 'ok'] for i, a in enumerate(self.alarms): self.notifier.notify(a, previous[i], "what? %d" % i, {'fire': '%d' % i}) self.notifier_server.rpc.wait() self.assertEqual(2, len(self.notifier_server.notified)) for i, a in enumerate(self.alarms): actions = getattr(a, models.Alarm.ALARM_ACTIONS_MAP[a.state]) self.assertEqual(self.alarms[i].alarm_id, self.notifier_server.notified[i]["alarm_id"]) self.assertEqual(actions, self.notifier_server.notified[i]["actions"]) self.assertEqual(previous[i], self.notifier_server.notified[i]["previous"]) self.assertEqual(self.alarms[i].state, self.notifier_server.notified[i]["current"]) self.assertEqual("what? %d" % i, self.notifier_server.notified[i]["reason"]) self.assertEqual({'fire': '%d' % i}, self.notifier_server.notified[i]["reason_data"]) def test_notify_non_string_reason(self): self.notifier_server.start(1) self.notifier.notify(self.alarms[0], 'ok', 42, {}) self.notifier_server.rpc.wait() reason = self.notifier_server.notified[0]['reason'] self.assertIsInstance(reason, six.string_types) def test_notify_no_actions(self): alarm = alarms.Alarm(None, info={ 'name': 'instance_running_hot', 'meter_name': 'cpu_util', 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 5, 'statistic': 'avg', 'state': 'ok', 'user_id': 'foobar', 'project_id': 'snafu', 'period': 60, 'ok_actions': [], 'alarm_id': str(uuid.uuid4()), 'matching_metadata': {'resource_id': 'my_instance'} }) self.notifier.notify(alarm, 'alarm', "what?", {}) self.assertEqual(0, len(self.notifier_server.notified)) class FakeCoordinator(object): def __init__(self, transport): self.rpc = messaging.get_rpc_server( transport, "alarm_partition_coordination", self) self.notified = [] def presence(self, context, data): self._record('presence', data) def allocate(self, context, data): self._record('allocate', data) def assign(self, context, data): self._record('assign', data) def _record(self, method, data): self.notified.append((method, data)) self.rpc.stop() class TestRPCAlarmPartitionCoordination(tests_base.BaseTestCase): def setUp(self): super(TestRPCAlarmPartitionCoordination, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.setup_messaging(self.CONF) self.coordinator_server = FakeCoordinator(self.transport) self.coordinator_server.rpc.start() eventlet.sleep() # must be sure that fanout queue is created self.coordination = rpc_alarm.RPCAlarmPartitionCoordination() self.alarms = [ alarms.Alarm(None, info={ 'name': 'instance_running_hot', 'meter_name': 'cpu_util', 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 5, 'statistic': 'avg', 'state': 'ok', 'ok_actions': ['http://host:8080/path'], 'user_id': 'foobar', 'project_id': 'snafu', 'period': 60, 'alarm_id': str(uuid.uuid4()), 'matching_metadata':{'resource_id': 'my_instance'} }), alarms.Alarm(None, info={ 'name': 'group_running_idle', 'meter_name': 'cpu_util', 'comparison_operator': 'le', 'threshold': 10.0, 'statistic': 'max', 'evaluation_periods': 4, 'state': 'insufficient data', 'insufficient_data_actions': ['http://other_host/path'], 'user_id': 'foobar', 'project_id': 'snafu', 'period': 300, 'alarm_id': str(uuid.uuid4()), 'matching_metadata':{'metadata.user_metadata.AS': 'my_group'} }), ] def test_coordination_presence(self): id = str(uuid.uuid4()) priority = float(timeutils.utcnow().strftime('%s.%f')) self.coordination.presence(id, priority) self.coordinator_server.rpc.wait() method, args = self.coordinator_server.notified[0] self.assertEqual(id, args['uuid']) self.assertEqual(priority, args['priority']) self.assertEqual('presence', method) def test_coordination_assign(self): id = str(uuid.uuid4()) self.coordination.assign(id, self.alarms) self.coordinator_server.rpc.wait() method, args = self.coordinator_server.notified[0] self.assertEqual(id, args['uuid']) self.assertEqual(2, len(args['alarms'])) self.assertEqual('assign', method) def test_coordination_allocate(self): id = str(uuid.uuid4()) self.coordination.allocate(id, self.alarms) self.coordinator_server.rpc.wait() method, args = self.coordinator_server.notified[0] self.assertEqual(id, args['uuid']) self.assertEqual(2, len(args['alarms'])) self.assertEqual('allocate', method)
the-stack_0_14175
import datetime import os # ================================================= # Background Information # ------------------------------------------------- mip = "cmip5" exp = "historical" frequency = "mo" realm = "atm" # ================================================= # Analysis Options # ------------------------------------------------- variability_mode = "NAM" # Available domains: NAM, NAO, SAM, PNA, PDO seasons = [ "DJF", "MAM", "JJA", "SON", ] # Available seasons: DJF, MAM, JJA, SON, monthly, yearly RemoveDomainMean = True # Remove Domain Mean from each time step (default=True) EofScaling = False # Convert EOF pattern as unit variance (default=False) landmask = False # Maskout land region thus consider only ocean grid (default=False) ConvEOF = True # Calculate conventioanl EOF for model CBF = True # Calculate Common Basis Function (CBF) for model # ================================================= # Miscellaneous # ------------------------------------------------- update_json = True # False debug = False # False # ================================================= # Observation # ------------------------------------------------- reference_data_name = "NOAA-CIRES_20CR" reference_data_path = os.path.join( "/p/user_pub/PCMDIobs/PCMDIobs2/atmos/mon/psl/20CR/gn/v20200707", "psl_mon_20CR_BE_gn_v20200707_187101-201212.nc", ) varOBS = "psl" ObsUnitsAdjust = (True, "divide", 100.0) # Pa to hPa; or (False, 0, 0) osyear = 1900 oeyear = 2005 eofn_obs = 1 # ================================================= # Models # ------------------------------------------------- modpath = os.path.join( "/p/user_pub/pmp/pmp_results/pmp_v1.1.2/additional_xmls/latest/v20200116", "%(mip)/%(exp)/atmos/mon/%(variable)", "%(mip).%(exp).%(model).%(realization).mon.%(variable).xml", ) modnames = [ "ACCESS1-0", "ACCESS1-3", "BCC-CSM1-1", "BCC-CSM1-1-M", "BNU-ESM", "CanCM4", "CanESM2", "CCSM4", "CESM1-BGC", "CESM1-CAM5", "CESM1-FASTCHEM", "CESM1-WACCM", "CMCC-CESM", "CMCC-CM", "CMCC-CMS", "CNRM-CM5", "CNRM-CM5-2", "CSIRO-Mk3-6-0", "EC-EARTH", "FGOALS-g2", "FGOALS-s2", "FIO-ESM", "FIO-ESM", "GFDL-CM2p1", "GFDL-CM3", "GFDL-ESM2G", "GFDL-ESM2M", "GISS-E2-H", "GISS-E2-H-CC", "GISS-E2-R", "GISS-E2-R-CC", "HadCM3", "HadGEM2-AO", "HadGEM2-CC", "HadGEM2-ES", "INMCM4", "IPSL-CM5A-LR", "IPSL-CM5A-MR", "IPSL-CM5B-LR", "MIROC-ESM", "MIROC-ESM-CHEM", "MIROC4h", "MIROC5", "MPI-ESM-LR", "MPI-ESM-MR", "MPI-ESM-P", "NorESM1-M", "NorESM1-ME", ] modnames = ["all"] # modnames = ['ACCESS1-0'] realization = "*" # realizations # realization = 'r1i1p1' varModel = "psl" ModUnitsAdjust = (True, "divide", 100.0) # Pa to hPa msyear = 1900 meyear = 2005 eofn_mod = 1 # ================================================= # Output # ------------------------------------------------- case_id = "{:v%Y%m%d}".format(datetime.datetime.now()) pmprdir = "/p/user_pub/pmp/pmp_results/pmp_v1.1.2" if debug: pmprdir = "/work/lee1043/imsi/result_test" results_dir = os.path.join( pmprdir, "%(output_type)", "variability_modes", "%(mip)", "%(exp)", "%(case_id)", "%(variability_mode)", "%(reference_data_name)", ) nc_out = True # Write output in NetCDF plot = True # Create map graphics
the-stack_0_14176
import numpy as np def get_samples_complex(fp,n): z = fp.read(2*n) if len(z)!=2*n: return None s = np.fromstring(z,dtype='int8') s.shape = (n,2) x = np.empty(n,dtype='c8') x.real = s[:,0] x.imag = s[:,1] return x
the-stack_0_14177
import sys import os import cfnresponse import boto3 import botocore import json import logging logger = logging.getLogger() logger.setLevel(os.getenv("LOG_LEVEL", "DEBUG")) def lambda_handler(event, context): try: logger.debug("Received event: {}".format(json.dumps(event))) result = cfnresponse.SUCCESS reason = None client = boto3.client("iam") # Pull identifiers from the request (passes as Properties in the custom resource) role_names = event["ResourceProperties"].get("RoleNames", []) role_arns = {} missing_roles = [] if event["RequestType"] in ["Create", "Update"]: for name in role_names: key = name.split("-")[-1] # Strip the leading ProjectName from role name try: logger.debug(f"Checking Account Roles for {name}") role = client.get_role(RoleName=name)["Role"] role_arn = role["Arn"] logger.debug(f"Role already exists: {role_arn}") role_arns[key + "Arn"] = role_arn role_arns[key + "Name"] = role["RoleName"] except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] in ["NoSuchEntity", "AccessDenied"]: logger.error(f"{name} Role does not exist") # The roles should be deployed all at once or not at all (via the supplied template); # therefore, it does not make sense to proceed with the deployment if one of them is missing result = cfnresponse.FAILED missing_roles.append(name) else: logger.error("Uncaught boto exception", e) result = cfnresponse.FAILED elif event["RequestType"] == "Delete": logger.info("Delete request - NOOP") result = cfnresponse.SUCCESS except Exception as e: logger.error("Error: {}".format(e)) result = cfnresponse.FAILED responseData = role_arns if result == cfnresponse.FAILED: reason = ("Required roles were not found in account; please use or refer to the ast-iam-role template for a " "list of required roles. The following roles were not found: " + ", ".join(missing_roles)) logger.info("Returning response of: {}, with result of: {}".format(result, responseData)) sys.stdout.flush() # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html cfnresponse.send(event, context, result, responseData, reason=reason)
the-stack_0_14178
import math from typing import Sequence, Tuple import torch from torch import nn from torch.nn import functional as F from tensorfn.config import config_model from pydantic import StrictInt, StrictFloat from .layer import DropPath, tuple2, PositionwiseFeedForward LayerNorm = lambda x: nn.LayerNorm(x, eps=1e-6) def patchify(input, size): batch, height, width, dim = input.shape return ( input.view(batch, height // size, size, width // size, size, dim) .permute(0, 1, 3, 2, 4, 5) .reshape(batch, height // size, width // size, -1) ) class PositionalEncodingGenerator(nn.Module): def __init__(self, dim): super().__init__() self.proj = nn.Conv2d(dim, dim, 3, padding=1, bias=False, groups=dim) def forward(self, input): out = input.permute(0, 3, 1, 2) out = self.proj(out) + out out = out.permute(0, 2, 3, 1) return out class MultiHeadedAttention(nn.Module): def __init__(self, dim, n_head, reduction=1, dropout=0): super().__init__() self.dim_head = dim // n_head self.n_head = n_head self.linear_q = nn.Linear(dim, dim, bias=False) self.linear_kv = nn.Linear(dim, dim * 2, bias=False) self.linear = nn.Linear(dim, dim) self.dropout = dropout self.reduction = reduction if self.reduction > 1: self.reduce_conv = nn.Conv2d( dim, dim, self.reduction, stride=self.reduction ) def forward(self, input): batch_size, height, width, _ = input.shape def reshape(input): return input.reshape(batch_size, -1, self.n_head, self.dim_head).transpose( 1, 2 ) query = reshape(self.linear_q(input)) if self.reduction > 1: dim = input.shape[-1] reduc = input.transpose(1, 2).reshape(batch_size, dim, height, width) reduc = self.reduce_conv(reduc).reshape(batch_size, dim, -1).transpose(1, 2) kv = reduc else: kv = input key, value = self.linear_kv(kv).chunk(2, dim=2) key = reshape(key).transpose(2, 3) value = reshape(value) score = query @ key / math.sqrt(self.dim_head) attn = F.softmax(score, 3) attn = F.dropout(attn, self.dropout, training=self.training) out = attn @ value out = out.transpose(1, 2).reshape( batch_size, height, width, self.dim_head * self.n_head ) out = self.linear(out) return out class MultiHeadedLocalAttention(nn.Module): def __init__(self, dim, n_head, dim_head, window_size, dropout=0): super().__init__() self.dim_head = dim_head self.n_head = n_head self.weight = nn.Linear(dim, n_head * dim_head * 3, bias=True) self.linear = nn.Linear(n_head * dim_head, dim) self.window_size = window_size self.dropout = dropout def forward(self, input): batch, height, width, dim = input.shape h_stride = height // self.window_size w_stride = width // self.window_size window = self.window_size def reshape(input): return ( input.reshape( batch, h_stride, window, w_stride, window, self.n_head, self.dim_head, ) .permute(0, 1, 3, 5, 2, 4, 6) .reshape(batch, -1, self.n_head, window * window, self.dim_head) ) query, key, value = self.weight(input).chunk(3, dim=-1) # B, S, H, W^2, D query = reshape(query) key = reshape(key).transpose(-2, -1) value = reshape(value) score = query @ key / math.sqrt(self.dim_head) # B, S, H, W^2, W^2 attn = F.softmax(score, -1) attn = F.dropout(attn, self.dropout, training=self.training) out = attn @ value # B, S, H, W^2, D out = ( out.view( batch, h_stride, w_stride, self.n_head, window, window, self.dim_head ) .permute(0, 1, 4, 2, 5, 3, 6) .reshape(batch, height, width, self.n_head * self.dim_head) ) out = self.linear(out) return out class TransformerLayer(nn.Module): def __init__( self, dim, n_head, dim_head, dim_ff, window_size, activation=nn.SiLU, drop_ff=0, drop_attn=0, drop_path=0, ): super().__init__() self.norm_attn_local = LayerNorm(dim) self.attn_local = MultiHeadedLocalAttention( dim, n_head, dim_head, window_size, drop_attn ) self.norm_ff_local = LayerNorm(dim) self.ff_local = PositionwiseFeedForward( dim, dim_ff, activation=activation, dropout=drop_ff ) self.norm_attn_global = LayerNorm(dim) self.attn_global = MultiHeadedAttention(dim, n_head, window_size, drop_attn) self.norm_ff_global = LayerNorm(dim) self.ff_global = PositionwiseFeedForward( dim, dim_ff, activation=activation, dropout=drop_ff ) self.drop_path = DropPath(drop_path) def set_drop_path(self, p): self.drop_path.p = p def forward(self, input): out = input + self.drop_path(self.attn_local(self.norm_attn_local(input))) out = out + self.drop_path(self.ff_local(self.norm_ff_local(out))) out = out + self.drop_path(self.attn_global(self.norm_attn_global(out))) out = out + self.drop_path(self.ff_global(self.norm_ff_global(out))) return out class PatchEmbedding(nn.Module): def __init__(self, in_dim, out_dim, window_size): super().__init__() self.window_size = window_size self.linear = nn.Linear(in_dim * window_size * window_size, out_dim) self.norm = nn.LayerNorm(out_dim) def forward(self, input): out = patchify(input, self.window_size) out = self.linear(out) out = self.norm(out) return out def reduce_size(size, reduction): return (size[0] // reduction, size[1] // reduction) @config_model(name="twins_svt", namespace="model", use_type=True) class TwinsSVT(nn.Module): def __init__( self, n_class: StrictInt, depths: Tuple[StrictInt, StrictInt, StrictInt, StrictInt], dims: Tuple[StrictInt, StrictInt, StrictInt, StrictInt], dim_head: StrictInt, n_heads: Tuple[StrictInt, StrictInt, StrictInt, StrictInt], dim_ffs: Tuple[StrictInt, StrictInt, StrictInt, StrictInt], window_size: StrictInt, drop_ff: StrictFloat = 0.0, drop_attn: StrictFloat = 0.0, drop_path: StrictFloat = 0.0, ): super().__init__() self.depths = depths def make_block(i, in_dim, reduction): return self.make_block( depths[i], in_dim, dims[i], n_heads[i], dim_head, dim_ffs[i], window_size, reduction, drop_ff, drop_attn, ) self.block1 = make_block(0, 3, 4) self.block2 = make_block(1, dims[0], 2) self.block3 = make_block(2, dims[1], 2) self.block4 = make_block(3, dims[2], 2) self.final_linear = nn.Sequential(nn.LayerNorm(dims[-1])) linear = nn.Linear(dims[-1], n_class) nn.init.normal_(linear.weight, std=0.02) nn.init.zeros_(linear.bias) self.classifier = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Flatten(1), linear) self.apply(self.init_weights) self.set_dropout(None, drop_path) def set_dropout(self, dropout, drop_path): n_blocks = sum(self.depths) dp_rate = [drop_path * float(i) / n_blocks for i in range(n_blocks)] i = 0 for block in self.block1: try: block.set_drop_path(dp_rate[i]) i += 1 except: continue for block in self.block2: try: block.set_drop_path(dp_rate[i]) i += 1 except: continue for block in self.block3: try: block.set_drop_path(dp_rate[i]) i += 1 except: continue for block in self.block4: try: block.set_drop_path(dp_rate[i]) i += 1 except: continue def init_weights(self, module): if isinstance(module, nn.Linear): nn.init.normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.LayerNorm): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) def make_block( self, depth, in_dim, dim, n_head, dim_head, dim_ff, window_size, reduction, drop_ff, drop_attn, ): block = [PatchEmbedding(in_dim, dim, reduction)] for i in range(depth): block.append( TransformerLayer( dim, n_head, dim_head, dim_ff, window_size, drop_ff=drop_ff, drop_attn=drop_attn, ) ) if i == 0: block.append(PositionalEncodingGenerator(dim)) return nn.Sequential(*block) def forward(self, input): out = self.block1(input.permute(0, 2, 3, 1)) out = self.block2(out) out = self.block3(out) out = self.block4(out) out = self.final_linear(out).permute(0, 3, 1, 2) out = self.classifier(out) return out
the-stack_0_14179
#!/usr/bin/env python # # esp-idf NVS partition generation tool. Tool helps in generating NVS-compatible # partition binary, with key-value pair entries provided via a CSV file. # # Copyright 2018 Espressif Systems (Shanghai) PTE LTD # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import division, print_function import argparse import array import binascii import codecs import datetime import distutils.dir_util import os import random import struct import sys import zlib from builtins import bytes, int, range from io import open from itertools import zip_longest try: from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes except ImportError: print('The cryptography package is not installed.' 'Please refer to the Get Started section of the ESP-IDF Programming Guide for ' 'setting up the required packages.') raise VERSION1_PRINT = 'V1 - Multipage Blob Support Disabled' VERSION2_PRINT = 'V2 - Multipage Blob Support Enabled' def reverse_hexbytes(addr_tmp): addr = [] reversed_bytes = '' for i in range(0, len(addr_tmp), 2): addr.append(addr_tmp[i:i + 2]) reversed_bytes = ''.join(reversed(addr)) return reversed_bytes """ Class for standard NVS page structure """ class Page(object): PAGE_PARAMS = { 'max_size': 4096, 'max_old_blob_size': 1984, 'max_new_blob_size': 4000, 'max_entries': 126 } # Item type codes U8 = 0x01 I8 = 0x11 U16 = 0x02 I16 = 0x12 U32 = 0x04 I32 = 0x14 U64 = 0x08 I64 = 0x18 SZ = 0x21 BLOB = 0x41 BLOB_DATA = 0x42 BLOB_IDX = 0x48 # Few Page constants HEADER_SIZE = 32 BITMAPARRAY_OFFSET = 32 BITMAPARRAY_SIZE_IN_BYTES = 32 FIRST_ENTRY_OFFSET = 64 SINGLE_ENTRY_SIZE = 32 CHUNK_ANY = 0xFF ACTIVE = 0xFFFFFFFE FULL = 0xFFFFFFFC VERSION1 = 0xFF VERSION2 = 0xFE def __init__(self, page_num, version, is_rsrv_page=False): self.entry_num = 0 self.bitmap_array = array.array('B') self.version = version self.page_buf = bytearray(b'\xff') * Page.PAGE_PARAMS['max_size'] if not is_rsrv_page: self.bitmap_array = self.create_bitmap_array() self.set_header(page_num, version) def set_header(self, page_num, version): # set page state to active page_header = bytearray(b'\xff') * 32 page_state_active_seq = Page.ACTIVE struct.pack_into('<I', page_header, 0, page_state_active_seq) # set page sequence number struct.pack_into('<I', page_header, 4, page_num) # set version if version == Page.VERSION2: page_header[8] = Page.VERSION2 elif version == Page.VERSION1: page_header[8] = Page.VERSION1 # set header's CRC crc_data = bytes(page_header[4:28]) crc = zlib.crc32(crc_data, 0xFFFFFFFF) struct.pack_into('<I', page_header, 28, crc & 0xFFFFFFFF) self.page_buf[0:len(page_header)] = page_header def create_bitmap_array(self): bitarray = array.array('B') charsize = 32 # bitmaparray has 256 bits, hence 32 bytes fill = 255 # Fill all 8 bits with 1's bitarray.extend((fill,) * charsize) return bitarray def write_bitmaparray(self): bitnum = self.entry_num * 2 byte_idx = bitnum // 8 # Find byte index in the array bit_offset = bitnum & 7 # Find bit offset in given byte index mask = ~(1 << bit_offset) self.bitmap_array[byte_idx] &= mask start_idx = Page.BITMAPARRAY_OFFSET end_idx = Page.BITMAPARRAY_OFFSET + Page.BITMAPARRAY_SIZE_IN_BYTES self.page_buf[start_idx:end_idx] = self.bitmap_array def encrypt_entry(self, data_arr, tweak_arr, encr_key): # Encrypt 32 bytes of data using AES-XTS encryption backend = default_backend() plain_text = codecs.decode(data_arr, 'hex') tweak = codecs.decode(tweak_arr, 'hex') cipher = Cipher(algorithms.AES(encr_key), modes.XTS(tweak), backend=backend) encryptor = cipher.encryptor() encrypted_data = encryptor.update(plain_text) return encrypted_data def encrypt_data(self, data_input, no_of_entries, nvs_obj): # Set values needed for encryption and encrypt data byte wise encr_data_to_write = bytearray() data_len_needed = 64 # in hex tweak_len_needed = 32 # in hex key_len_needed = 64 init_tweak_val = '0' init_data_val = 'f' tweak_tmp = '' encr_key_input = None # Extract encryption key and tweak key from given key input if len(nvs_obj.encr_key) == key_len_needed: encr_key_input = nvs_obj.encr_key else: encr_key_input = codecs.decode(nvs_obj.encr_key, 'hex') rel_addr = nvs_obj.page_num * Page.PAGE_PARAMS['max_size'] + Page.FIRST_ENTRY_OFFSET if not isinstance(data_input, bytearray): byte_arr = bytearray(b'\xff') * 32 byte_arr[0:len(data_input)] = data_input data_input = byte_arr data_input = binascii.hexlify(data_input) entry_no = self.entry_num start_idx = 0 end_idx = start_idx + 64 for _ in range(0, no_of_entries): # Set tweak value offset = entry_no * Page.SINGLE_ENTRY_SIZE addr = hex(rel_addr + offset)[2:] addr_len = len(addr) if addr_len > 2: if not addr_len % 2: addr_tmp = addr else: addr_tmp = init_tweak_val + addr tweak_tmp = reverse_hexbytes(addr_tmp) tweak_val = tweak_tmp + (init_tweak_val * (tweak_len_needed - (len(tweak_tmp)))) else: tweak_val = addr + (init_tweak_val * (tweak_len_needed - len(addr))) # Encrypt data data_bytes = data_input[start_idx:end_idx] if type(data_bytes) == bytes: data_bytes = data_bytes.decode() data_val = data_bytes + (init_data_val * (data_len_needed - len(data_bytes))) encr_data_ret = self.encrypt_entry(data_val, tweak_val, encr_key_input) encr_data_to_write = encr_data_to_write + encr_data_ret # Update values for encrypting next set of data bytes start_idx = end_idx end_idx = start_idx + 64 entry_no += 1 return encr_data_to_write def write_entry_to_buf(self, data, entrycount,nvs_obj): encr_data = bytearray() if nvs_obj.encrypt: encr_data_ret = self.encrypt_data(data, entrycount,nvs_obj) encr_data[0:len(encr_data_ret)] = encr_data_ret data = encr_data data_offset = Page.FIRST_ENTRY_OFFSET + (Page.SINGLE_ENTRY_SIZE * self.entry_num) start_idx = data_offset end_idx = data_offset + len(data) self.page_buf[start_idx:end_idx] = data # Set bitmap array for entries in current page for i in range(0, entrycount): self.write_bitmaparray() self.entry_num += 1 def set_crc_header(self, entry_struct): crc_data = bytearray(b'28') crc_data[0:4] = entry_struct[0:4] crc_data[4:28] = entry_struct[8:32] crc_data = bytes(crc_data) crc = zlib.crc32(crc_data, 0xFFFFFFFF) struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF) return entry_struct def write_varlen_binary_data(self, entry_struct, ns_index, key, data, data_size, total_entry_count, encoding, nvs_obj): chunk_start = 0 chunk_count = 0 chunk_index = Page.CHUNK_ANY offset = 0 remaining_size = data_size tailroom = None while True: chunk_size = 0 # Get the size available in current page tailroom = (Page.PAGE_PARAMS['max_entries'] - self.entry_num - 1) * Page.SINGLE_ENTRY_SIZE assert tailroom >= 0, 'Page overflow!!' # Split the binary data into two and store a chunk of available size onto curr page if tailroom < remaining_size: chunk_size = tailroom else: chunk_size = remaining_size remaining_size = remaining_size - chunk_size # Change type of data to BLOB_DATA entry_struct[1] = Page.BLOB_DATA # Calculate no. of entries data chunk will require datachunk_rounded_size = (chunk_size + 31) & ~31 datachunk_entry_count = datachunk_rounded_size // 32 datachunk_total_entry_count = datachunk_entry_count + 1 # +1 for the entry header # Set Span entry_struct[2] = datachunk_total_entry_count # Update the chunkIndex chunk_index = chunk_start + chunk_count entry_struct[3] = chunk_index # Set data chunk data_chunk = data[offset:offset + chunk_size] # Compute CRC of data chunk struct.pack_into('<H', entry_struct, 24, chunk_size) if type(data) != bytes: data_chunk = bytes(data_chunk, encoding='utf8') crc = zlib.crc32(data_chunk, 0xFFFFFFFF) struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF) # compute crc of entry header entry_struct = self.set_crc_header(entry_struct) # write entry header self.write_entry_to_buf(entry_struct, 1,nvs_obj) # write actual data self.write_entry_to_buf(data_chunk, datachunk_entry_count,nvs_obj) chunk_count = chunk_count + 1 if remaining_size or (tailroom - chunk_size) < Page.SINGLE_ENTRY_SIZE: nvs_obj.create_new_page() self = nvs_obj.cur_page offset = offset + chunk_size # All chunks are stored, now store the index if not remaining_size: # Initialise data field to 0xff data_array = bytearray(b'\xff') * 8 entry_struct[24:32] = data_array # change type of data to BLOB_IDX entry_struct[1] = Page.BLOB_IDX # Set Span entry_struct[2] = 1 # Update the chunkIndex chunk_index = Page.CHUNK_ANY entry_struct[3] = chunk_index struct.pack_into('<I', entry_struct, 24, data_size) entry_struct[28] = chunk_count entry_struct[29] = chunk_start # compute crc of entry header entry_struct = self.set_crc_header(entry_struct) # write last entry self.write_entry_to_buf(entry_struct, 1,nvs_obj) break return entry_struct def write_single_page_entry(self, entry_struct, data, datalen, data_entry_count, nvs_obj): # compute CRC of data struct.pack_into('<H', entry_struct, 24, datalen) if type(data) != bytes: data = bytes(data, encoding='utf8') crc = zlib.crc32(data, 0xFFFFFFFF) struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF) # compute crc of entry header entry_struct = self.set_crc_header(entry_struct) # write entry header self.write_entry_to_buf(entry_struct, 1, nvs_obj) # write actual data self.write_entry_to_buf(data, data_entry_count, nvs_obj) """ Low-level function to write variable length data into page buffer. Data should be formatted according to encoding specified. """ def write_varlen_data(self, key, data, encoding, ns_index,nvs_obj): # Set size of data datalen = len(data) if datalen > Page.PAGE_PARAMS['max_old_blob_size']: if self.version == Page.VERSION1: raise InputError(' Input File: Size (%d) exceeds max allowed length `%s` bytes for key `%s`.' % (datalen, Page.PAGE_PARAMS['max_old_blob_size'], key)) else: if encoding == 'string': raise InputError(' Input File: Size (%d) exceeds max allowed length `%s` bytes for key `%s`.' % (datalen, Page.PAGE_PARAMS['max_old_blob_size'], key)) # Calculate no. of entries data will require rounded_size = (datalen + 31) & ~31 data_entry_count = rounded_size // 32 total_entry_count = data_entry_count + 1 # +1 for the entry header # Check if page is already full and new page is needed to be created right away if self.entry_num >= Page.PAGE_PARAMS['max_entries']: raise PageFullError() elif (self.entry_num + total_entry_count) >= Page.PAGE_PARAMS['max_entries']: if not (self.version == Page.VERSION2 and encoding in ['hex2bin', 'binary', 'base64']): raise PageFullError() # Entry header entry_struct = bytearray(b'\xff') * 32 # Set Namespace Index entry_struct[0] = ns_index # Set Span if self.version == Page.VERSION2: if encoding == 'string': entry_struct[2] = data_entry_count + 1 # Set Chunk Index chunk_index = Page.CHUNK_ANY entry_struct[3] = chunk_index else: entry_struct[2] = data_entry_count + 1 # set key key_array = b'\x00' * 16 entry_struct[8:24] = key_array entry_struct[8:8 + len(key)] = key.encode() # set Type if encoding == 'string': entry_struct[1] = Page.SZ elif encoding in ['hex2bin', 'binary', 'base64']: entry_struct[1] = Page.BLOB if self.version == Page.VERSION2 and (encoding in ['hex2bin', 'binary', 'base64']): entry_struct = self.write_varlen_binary_data(entry_struct,ns_index,key,data, datalen,total_entry_count, encoding, nvs_obj) else: self.write_single_page_entry(entry_struct, data, datalen, data_entry_count, nvs_obj) """ Low-level function to write data of primitive type into page buffer. """ def write_primitive_data(self, key, data, encoding, ns_index,nvs_obj): # Check if entry exceeds max number of entries allowed per page if self.entry_num >= Page.PAGE_PARAMS['max_entries']: raise PageFullError() entry_struct = bytearray(b'\xff') * 32 entry_struct[0] = ns_index # namespace index entry_struct[2] = 0x01 # Span chunk_index = Page.CHUNK_ANY entry_struct[3] = chunk_index # write key key_array = b'\x00' * 16 entry_struct[8:24] = key_array entry_struct[8:8 + len(key)] = key.encode() if encoding == 'u8': entry_struct[1] = Page.U8 struct.pack_into('<B', entry_struct, 24, data) elif encoding == 'i8': entry_struct[1] = Page.I8 struct.pack_into('<b', entry_struct, 24, data) elif encoding == 'u16': entry_struct[1] = Page.U16 struct.pack_into('<H', entry_struct, 24, data) elif encoding == 'i16': entry_struct[1] = Page.I16 struct.pack_into('<h', entry_struct, 24, data) elif encoding == 'u32': entry_struct[1] = Page.U32 struct.pack_into('<I', entry_struct, 24, data) elif encoding == 'i32': entry_struct[1] = Page.I32 struct.pack_into('<i', entry_struct, 24, data) elif encoding == 'u64': entry_struct[1] = Page.U64 struct.pack_into('<Q', entry_struct, 24, data) elif encoding == 'i64': entry_struct[1] = Page.I64 struct.pack_into('<q', entry_struct, 24, data) # Compute CRC crc_data = bytearray(b'28') crc_data[0:4] = entry_struct[0:4] crc_data[4:28] = entry_struct[8:32] crc_data = bytes(crc_data) crc = zlib.crc32(crc_data, 0xFFFFFFFF) struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF) # write to file self.write_entry_to_buf(entry_struct, 1,nvs_obj) """ Get page buffer data of a given page """ def get_data(self): return self.page_buf """ NVS class encapsulates all NVS specific operations to create a binary with given key-value pairs. Binary can later be flashed onto device via a flashing utility. """ class NVS(object): def __init__(self, fout, input_size, version, encrypt=False, key_input=None): self.size = input_size self.encrypt = encrypt self.encr_key = None self.namespace_idx = 0 self.page_num = -1 self.pages = [] self.version = version self.fout = fout if self.encrypt: self.encr_key = key_input self.cur_page = self.create_new_page(version) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None and exc_value is None: # Create pages for remaining available size while True: try: self.create_new_page() except InsufficientSizeError: self.size = None # Creating the last reserved page self.create_new_page(is_rsrv_page=True) break result = self.get_binary_data() self.fout.write(result) def create_new_page(self, version=None, is_rsrv_page=False): # Set previous page state to FULL before creating new page if self.pages: curr_page_state = struct.unpack('<I', self.cur_page.page_buf[0:4])[0] if curr_page_state == Page.ACTIVE: page_state_full_seq = Page.FULL struct.pack_into('<I', self.cur_page.page_buf, 0, page_state_full_seq) # Set version for NVS binary generated version = self.version # Update available size as each page is created if self.size == 0: raise InsufficientSizeError('Error: Size parameter is less than the size of data in csv.Please increase size.') if not is_rsrv_page: self.size = self.size - Page.PAGE_PARAMS['max_size'] self.page_num += 1 # Set version for each page and page header new_page = Page(self.page_num, version, is_rsrv_page) self.pages.append(new_page) self.cur_page = new_page return new_page """ Write namespace entry and subsequently increase namespace count so that all upcoming entries will be mapped to a new namespace. """ def write_namespace(self, key): self.namespace_idx += 1 try: self.cur_page.write_primitive_data(key, self.namespace_idx, 'u8', 0,self) except PageFullError: new_page = self.create_new_page() new_page.write_primitive_data(key, self.namespace_idx, 'u8', 0,self) """ Write key-value pair. Function accepts value in the form of ascii character and converts it into appropriate format before calling Page class's functions to write entry into NVS format. Function handles PageFullError and creates a new page and re-invokes the function on a new page. We don't have to guard re-invocation with try-except since no entry can span multiple pages. """ def write_entry(self, key, value, encoding): if encoding == 'hex2bin': value = value.strip() if len(value) % 2 != 0: raise InputError('%s: Invalid data length. Should be multiple of 2.' % key) value = binascii.a2b_hex(value) if encoding == 'base64': value = binascii.a2b_base64(value) if encoding == 'string': if type(value) == bytes: value = value.decode() value += '\0' encoding = encoding.lower() varlen_encodings = ['string', 'binary', 'hex2bin', 'base64'] primitive_encodings = ['u8', 'i8', 'u16', 'i16', 'u32', 'i32', 'u64', 'i64'] if encoding in varlen_encodings: try: self.cur_page.write_varlen_data(key, value, encoding, self.namespace_idx,self) except PageFullError: new_page = self.create_new_page() new_page.write_varlen_data(key, value, encoding, self.namespace_idx,self) elif encoding in primitive_encodings: try: self.cur_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self) except PageFullError: new_page = self.create_new_page() new_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self) else: raise InputError('%s: Unsupported encoding' % encoding) """ Return accumulated data of all pages """ def get_binary_data(self): data = bytearray() for page in self.pages: data += page.get_data() return data class PageFullError(RuntimeError): """ Represents error when current page doesn't have sufficient entries left to accommodate current request """ def __init__(self): super(PageFullError, self).__init__() class InputError(RuntimeError): """ Represents error on the input """ def __init__(self, e): print('\nError:') super(InputError, self).__init__(e) class InsufficientSizeError(RuntimeError): """ Represents error when NVS Partition size given is insufficient to accomodate the data in the given csv file """ def __init__(self, e): super(InsufficientSizeError, self).__init__(e) def nvs_open(result_obj, input_size, version=None, is_encrypt=False, key=None): """ Wrapper to create and NVS class object. This object can later be used to set key-value pairs :param result_obj: File/Stream object to dump resultant binary. If data is to be dumped into memory, one way is to use BytesIO object :param input_size: Size of Partition :return: NVS class instance """ return NVS(result_obj, input_size, version, encrypt=is_encrypt, key_input=key) def write_entry(nvs_instance, key, datatype, encoding, value): """ Wrapper to set key-value pair in NVS format :param nvs_instance: Instance of an NVS class returned by nvs_open() :param key: Key of the data :param datatype: Data type. Valid values are "file", "data" and "namespace" :param encoding: Data encoding. Valid values are "u8", "i8", "u16", "i16", "u32", "i32", "u64", "i64", "string", "binary", "hex2bin" and "base64" :param value: Data value in ascii encoded string format for "data" datatype and filepath for "file" datatype :return: None """ if datatype == 'file': abs_file_path = value if os.path.isabs(value) is False: script_dir = os.getcwd() abs_file_path = os.path.join(script_dir, value) with open(abs_file_path, 'rb') as f: value = f.read() if datatype == 'namespace': nvs_instance.write_namespace(key) else: nvs_instance.write_entry(key, value, encoding) def nvs_close(nvs_instance): """ Wrapper to finish writing to NVS and write data to file/stream object provided to nvs_open method :param nvs_instance: Instance of NVS class returned by nvs_open() :return: None """ nvs_instance.__exit__(None, None, None) def check_size(size): ''' Checks for input partition size :param size: Input partition size ''' try: # Set size input_size = int(size, 0) if input_size % 4096 != 0: sys.exit('Size of partition must be multiple of 4096') # Update size as a page needs to be reserved of size 4KB input_size = input_size - Page.PAGE_PARAMS['max_size'] if input_size < (2 * Page.PAGE_PARAMS['max_size']): sys.exit('Minimum NVS partition size needed is 0x3000 bytes.') return input_size except Exception as e: print(e) sys.exit(0) def set_target_filepath(outdir, filepath): ''' Set target file path: <outdir>/<filepath> :param outdir: Target output dir to store files :param filepath: Path of target file ''' bin_ext = '.bin' # Expand if tilde(~) provided in path outdir = os.path.expanduser(outdir) if filepath: key_file_name, ext = os.path.splitext(filepath) if not ext: filepath = key_file_name + bin_ext elif bin_ext not in ext: sys.exit('Error: `%s`. Only `%s` extension allowed.' % (filepath, bin_ext)) # Create dir if does not exist if not (os.path.isdir(outdir)): distutils.dir_util.mkpath(outdir) filedir, filename = os.path.split(filepath) filedir = os.path.join(outdir,filedir,'') if filedir and not os.path.isdir(filedir): distutils.dir_util.mkpath(filedir) if os.path.isabs(filepath): if not outdir == os.getcwd(): print('\nWarning: `%s` \n\t==> absolute path given so outdir is ignored for this file.' % filepath) # Set to empty as outdir is ignored here outdir = '' # Set full path - outdir + filename filepath = os.path.join(outdir, '') + filepath return outdir, filepath def encrypt(args): ''' Generate encrypted NVS Partition :param args: Command line arguments given ''' key = None bin_ext = '.bin' check_size(args.size) if (args.keygen is False) and (not args.inputkey): sys.exit('Error. --keygen or --inputkey argument needed.') elif args.keygen and args.inputkey: sys.exit('Error. --keygen and --inputkey both are not allowed.') elif not args.keygen and args.keyfile: print('\nWarning:','--inputkey argument is given. --keyfile argument will be ignored...') if args.inputkey: # Check if key file has .bin extension filename, ext = os.path.splitext(args.inputkey) if bin_ext not in ext: sys.exit('Error: `%s`. Only `%s` extension allowed.' % (args.inputkey, bin_ext)) key = bytearray() with open(args.inputkey, 'rb') as key_f: key = key_f.read(64) # Generate encrypted NVS Partition generate(args, is_encr_enabled=True, encr_key=key) def decrypt_data(data_input, decr_key, page_num, entry_no, entry_size): ''' Decrypt NVS data entry ''' page_max_size = 4096 first_entry_offset = 64 init_tweak_val = '0' tweak_len_needed = 32 # in hex tweak_tmp = '' data_input = binascii.hexlify(data_input) rel_addr = page_num * page_max_size + first_entry_offset # Set tweak value offset = entry_no * entry_size addr = hex(rel_addr + offset)[2:] addr_len = len(addr) if addr_len > 2: if not addr_len % 2: addr_tmp = addr else: addr_tmp = init_tweak_val + addr tweak_tmp = reverse_hexbytes(addr_tmp) tweak_val = tweak_tmp + (init_tweak_val * (tweak_len_needed - (len(tweak_tmp)))) else: tweak_val = addr + (init_tweak_val * (tweak_len_needed - len(addr))) if type(data_input) == bytes: data_input = data_input.decode() # Decrypt 32 bytes of data using AES-XTS decryption backend = default_backend() plain_text = codecs.decode(data_input, 'hex') tweak = codecs.decode(tweak_val, 'hex') cipher = Cipher(algorithms.AES(decr_key), modes.XTS(tweak), backend=backend) decryptor = cipher.decryptor() decrypted_data = decryptor.update(plain_text) return decrypted_data def decrypt(args): ''' Decrypt encrypted NVS Partition :param args: Command line arguments given ''' bin_ext = '.bin' nvs_read_bytes = 32 decrypted_entry_no = 0 file_entry_no = 0 page_num = 0 page_max_size = 4096 start_entry_offset = 0 empty_data_entry = bytearray(b'\xff') * nvs_read_bytes # Check if key file has .bin extension input_files = [args.input, args.key, args.output] for filepath in input_files: filename, ext = os.path.splitext(filepath) if bin_ext not in ext: sys.exit('Error: `%s`. Only `%s` extension allowed.' % (filepath, bin_ext)) with open(args.key,'rb') as decr_key_file: decr_key = decr_key_file.read(64) args.outdir, args.output = set_target_filepath(args.outdir, args.output) output_buf = bytearray(b'\xff') with open(args.input, 'rb') as input_file, open(args.output,'wb') as output_file: while True: if file_entry_no == 128: decrypted_entry_no = 0 file_entry_no = 0 page_num += 1 data_entry = input_file.read(nvs_read_bytes) if not data_entry: break if data_entry != empty_data_entry and file_entry_no not in [0,1]: data_entry = decrypt_data(data_entry, decr_key, page_num, decrypted_entry_no, nvs_read_bytes) decrypted_entry_no += 1 write_entry_no = ((page_num * page_max_size) + file_entry_no) start_idx = start_entry_offset + (write_entry_no * nvs_read_bytes) end_idx = nvs_read_bytes output_buf[start_idx:end_idx] = data_entry file_entry_no += 1 start_entry_offset += nvs_read_bytes output_file.write(output_buf) print('\nCreated NVS decrypted binary: ===>', args.output) def generate_key(args): ''' Generate encryption keys :param args: Command line arguments given ''' page_max_size = 4096 keys_dir = 'keys' output_keyfile = None bin_ext = '.bin' if not args.keyfile: timestamp = datetime.datetime.now().strftime('%m-%d_%H-%M') args.keyfile = 'keys-' + timestamp + bin_ext keys_outdir = os.path.join(args.outdir,keys_dir, '') # Create keys/ dir in <outdir> if does not exist if not (os.path.isdir(keys_outdir)): distutils.dir_util.mkpath(keys_outdir) keys_outdir, output_keyfile = set_target_filepath(keys_outdir, args.keyfile) key = ''.join(random.choice('0123456789abcdef') for _ in range(128)).strip() encr_key_bytes = codecs.decode(key, 'hex') key_len = len(encr_key_bytes) keys_buf = bytearray(b'\xff') * page_max_size keys_buf[0:key_len] = encr_key_bytes crc_data = keys_buf[0:key_len] crc_data = bytes(crc_data) crc = zlib.crc32(crc_data, 0xFFFFFFFF) struct.pack_into('<I', keys_buf, key_len, crc & 0xFFFFFFFF) with open(output_keyfile, 'wb') as output_keys_file: output_keys_file.write(keys_buf) print('\nCreated encryption keys: ===> ', output_keyfile) return key def generate(args, is_encr_enabled=False, encr_key=None): ''' Generate NVS Partition :param args: Command line arguments given :param is_encr_enabled: Encryption enabled/disabled :param encr_key: Key to encrypt NVS partition ''' is_dir_new = False bin_ext = '.bin' input_size = check_size(args.size) if args.version == 1: args.version = Page.VERSION1 elif args.version == 2: args.version = Page.VERSION2 # Check if key file has .bin extension filename, ext = os.path.splitext(args.output) if bin_ext not in ext: sys.exit('Error: `%s`. Only `.bin` extension allowed.' % args.output) args.outdir, args.output = set_target_filepath(args.outdir, args.output) if is_encr_enabled and not encr_key: encr_key = generate_key(args) input_file = open(args.input, 'rt', encoding='utf8') output_file = open(args.output, 'wb') with open(args.input, 'rt', encoding='utf8') as input_file,\ open(args.output, 'wb') as output_file,\ nvs_open(output_file, input_size, args.version, is_encrypt=is_encr_enabled, key=encr_key) as nvs_obj: if nvs_obj.version == Page.VERSION1: version_set = VERSION1_PRINT else: version_set = VERSION2_PRINT print('\nCreating NVS binary with version:', version_set) line = input_file.readline().strip() # Comments are skipped while line.startswith('#'): line = input_file.readline().strip() if not isinstance(line, str): line = line.encode('utf-8') header = line.split(',') while True: line = input_file.readline().strip() if not isinstance(line, str): line = line.encode('utf-8') value = line.split(',') if len(value) == 1 and '' in value: break data = dict(zip_longest(header, value)) try: # Check key length if len(data['key']) > 15: raise InputError('Length of key `{}` should be <= 15 characters.'.format(data['key'])) write_entry(nvs_obj, data['key'], data['type'], data['encoding'], data['value']) except InputError as e: print(e) filedir, filename = os.path.split(args.output) if filename: print('\nWarning: NVS binary not created...') os.remove(args.output) if is_dir_new and not filedir == os.getcwd(): print('\nWarning: Output dir not created...') os.rmdir(filedir) sys.exit(-2) print('\nCreated NVS binary: ===>', args.output) def main(): parser = argparse.ArgumentParser(description='\nESP NVS partition generation utility', formatter_class=argparse.RawTextHelpFormatter) subparser = parser.add_subparsers(title='Commands', dest='command', help='\nRun nvs_partition_gen.py {command} -h for additional help\n\n') parser_gen = subparser.add_parser('generate', help='Generate NVS partition', formatter_class=argparse.RawTextHelpFormatter) parser_gen.set_defaults(func=generate) parser_gen.add_argument('input', default=None, help='Path to CSV file to parse') parser_gen.add_argument('output', default=None, help='Path to output NVS binary file') parser_gen.add_argument('size', default=None, help='Size of NVS partition in bytes\ \n(must be multiple of 4096)') parser_gen.add_argument('--version', choices=[1,2], default=2, type=int, help='''Set multipage blob version.\ \nVersion 1 - Multipage blob support disabled.\ \nVersion 2 - Multipage blob support enabled.\ \nDefault: Version 2''') parser_gen.add_argument('--outdir', default=os.getcwd(), help='Output directory to store files created\ \n(Default: current directory)') parser_gen_key = subparser.add_parser('generate-key', help='Generate keys for encryption', formatter_class=argparse.RawTextHelpFormatter) parser_gen_key.set_defaults(func=generate_key) parser_gen_key.add_argument('--keyfile', default=None, help='Path to output encryption keys file') parser_gen_key.add_argument('--outdir', default=os.getcwd(), help='Output directory to store files created.\ \n(Default: current directory)') parser_encr = subparser.add_parser('encrypt', help='Generate NVS encrypted partition', formatter_class=argparse.RawTextHelpFormatter) parser_encr.set_defaults(func=encrypt) parser_encr.add_argument('input', default=None, help='Path to CSV file to parse') parser_encr.add_argument('output', default=None, help='Path to output NVS binary file') parser_encr.add_argument('size', default=None, help='Size of NVS partition in bytes\ \n(must be multiple of 4096)') parser_encr.add_argument('--version', choices=[1,2], default=2, type=int, help='''Set multipage blob version.\ \nVersion 1 - Multipage blob support disabled.\ \nVersion 2 - Multipage blob support enabled.\ \nDefault: Version 2''') parser_encr.add_argument('--keygen', action='store_true', default=False, help='Generates key for encrypting NVS partition') parser_encr.add_argument('--keyfile', default=None, help='Path to output encryption keys file') parser_encr.add_argument('--inputkey', default=None, help='File having key for encrypting NVS partition') parser_encr.add_argument('--outdir', default=os.getcwd(), help='Output directory to store files created.\ \n(Default: current directory)') parser_decr = subparser.add_parser('decrypt', help='Decrypt NVS encrypted partition', formatter_class=argparse.RawTextHelpFormatter) parser_decr.set_defaults(func=decrypt) parser_decr.add_argument('input', default=None, help='Path to encrypted NVS partition file to parse') parser_decr.add_argument('key', default=None, help='Path to file having keys for decryption') parser_decr.add_argument('output', default=None, help='Path to output decrypted binary file') parser_decr.add_argument('--outdir', default=os.getcwd(), help='Output directory to store files created.\ \n(Default: current directory)') args = parser.parse_args() args.func(args) if __name__ == '__main__': main()
the-stack_0_14182
import os from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail meessage = Mail( from_email='[email protected]', to_emails='[email protected]', subject='Sending with Twilio SendGrid is Fun', html_content='<strong>and easy to do anywhere, even with Python</strong>') try: sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY')) response = sg.send(message) print(response.status_code) print(response.body) print(response.headers) except Exception as e: print(e.message)
the-stack_0_14183
""" Density fitting and interpolation classes """ import numpy as np from scipy.optimize import leastsq, least_squares, curve_fit from scipy.interpolate import PchipInterpolator, CubicSpline import pdb # Idealised models def sech(z): return 2./(np.exp(z) + np.exp(-z)) def ideal_rho_tanh(z, rho0, drho, dp, L): #return drho/2 - drho/2*np.tanh(dp + dp*z/L ) return drho/2 * (1 - np.tanh(dp + dp*z/L ) ) + rho0 #return drho/2 * (1 - np.tanh(z/L + 1 ) ) def lamb_tanh_rho(z, rho0, dp, z1, h1, H=None): # Assumes z is negative down if H is None: H = z.min() zhat = z-H return rho0*(1 - dp*(1 + np.tanh( (zhat-z1)/h1) ) ) def single_tanh_rho(z, rho0, rho1, z1, h1,): #return rho0 + rho1/2*(1-np.tanh( (z+z1)/h1)) return rho0 - rho1*np.tanh((z+z1)/h1) def double_tanh_rho_orig(z, rho0, rho1, rho2, z1, z2, h1, h2): """ Seven parameter model """ return rho0 + rho1/2*(1-np.tanh( (z+z1)/h1)) +\ rho2/2*(1-np.tanh( (z+z2)/h2)) def double_tanh_rho(z, rho0, rho1, rho2, z1, z2, h1, h2): """ Seven parameter model """ #return rho0 + rho1/2*(1-np.tanh( (z+z1)/h1)) +\ # rho2/2*(1-np.tanh( (z+z2)/h2)) return rho0 - rho1*np.tanh((z+z1)/h1) -\ rho2*np.tanh((z+z2)/h2) def double_tanh_rho_new(z, rho0, rho1, z1, z2, h1, h2): """ Six parameter model proposed by Andrew Manderson and Ed Cripps, UWA Stats """ return rho0 - rho1* (np.tanh((z+z1)/h1) +\ np.tanh((z+z1+z2)/h2)) def fdiff(coeffs, rho, z, density_func): if density_func=='double_tanh': soln = double_tanh_rho(z, *coeffs) elif density_func=='double_tanh_new': soln = double_tanh_rho_new(z, *coeffs) elif density_func=='single_tanh': soln = single_tanh_rho(z, *coeffs) else: soln = density_func(z, coeffs) #print coeffs[-4], coeffs[-3], coeffs[-2], coeffs[-1] return rho - soln def fit_rho(rho, z, density_func='single_tanh', errmax=1.0, bounds=None, initguess=None): """ Fits an analytical density profile to data Uses a robust linear regression Inputs: --- rho: vector of density [Nz] z : depth [Nz] w/ negative values i.e. 0 at surface, positive: up Returns: --- rhofit: best fit function at z locations f0: tuple with analytical parameters """ status = 0 rho0 = rho.min() #rhotry = rho # Use "least_squares" at it allows bounds on fitted parameters to be input rhotry = rho # - rho0 H = np.abs(z).max() if density_func=='double_tanh': initguess = [rho0, 0.01, 0.01, 1., 2., H/10., H/10.] # double tanh guess #bounds = [(0,10.),(0,10.),(0,H),(0,H),(0,H/2),(0,H/2)] bounds = [(rho0-5,0.,0.,0.,0.,H/20.,H/20.),(rho0+5,10.,10.,H,H,H/2,H/2)] elif density_func=='double_tanh_new': initguess = [rho0, 0.01, 1., 2., H/10., H/10.] # double tanh guess #bounds = [(0,10.),(0,10.),(0,H),(0,H),(0,H/2),(0,H/2)] bounds = [(rho0-5,0.,0.,0.,H/20.,H/20.),(rho0+5,10.,H,H,H/2,H/2)] elif density_func=='single_tanh': initguess = [rho0, 1e-3, 40., 100.] # single stratification function bounds = [(rho0-5,0.,0.,0.),(rho0+5,10.,2*H,2*H)] #else: User must set bounds soln =\ least_squares(fdiff, initguess, args=(rhotry, z, density_func), \ bounds=bounds,\ xtol=1e-10, ftol=1e-10, loss='cauchy', f_scale=0.1, # Robust verbose=0, ) f0 = soln['x'] #soln = leastsq(fdiff, initguess, args=(rhotry, z), \ # full_output=True) #f0 = soln[0] # This could be changed to pass a function directly... if density_func=='double_tanh': rhofit = double_tanh_rho(z, *f0)# + rho0 elif density_func=='double_tanh_new': rhofit = double_tanh_rho_new(z, *f0)# + rho0 elif density_func=='single_tanh': rhofit = single_tanh_rho(z, *f0) else: rhofit = density_func(z, f0) err = np.linalg.norm(rhofit - rhotry) if err > errmax: print('Warning in density fit -- large error: %f'%err) status = -1 #raise Exception('maximum fitting error exceeded') return rhofit, f0, status class FitDensity(object): """ Interpolate by fitting an analytical profile first """ density_func = 'single_tanh' bounds = None initguess = None def __init__(self, rho, z, **kwargs): self.__dict__.update(**kwargs) self.rho0 = rho.min() rhofit, self.f0, self.status = fit_rho(rho, z, density_func=self.density_func, bounds=self.bounds, initguess=self.initguess) def __call__(self, Z): f0 = self.f0 if self.density_func=='double_tanh': return double_tanh_rho(Z, *f0)# + self.rho0 elif self.density_func=='double_tanh_new': return double_tanh_rho_new(Z, *f0)# + self.rho0 elif self.density_func=='single_tanh': return single_tanh_rho(Z, *f0) else: return self.density_func(Z, f0) class InterpDensity(object): """ Wrapper class for pchip function """ density_func = None def __init__(self, rho ,z, **kwargs): self.__dict__.update(**kwargs) self.Fi = PchipInterpolator(z, rho, axis=0, extrapolate=True) #self.Fi = CubicSpline(z, rho, axis=0, bc_type='natural') def __call__(self, Z): return self.Fi(Z) class ChebyFitDensity(object): """ Wrapper class for Chebyshev Polynomial fit """ order=None def __init__(self, rho ,z, **kwargs): self.__dict__.update(**kwargs) nz = z.size if self.order is None: self.order = int(max(3,nz -2)) self.f0 = coefs = np.polynomial.chebyshev.chebfit(z, rho, self.order) def __call__(self, Z): return np.polynomial.chebyshev.chebval(Z, self.f0)
the-stack_0_14186
import math import json import numpy as np import torch from mmcv.runner import get_dist_info from torch.utils.data import Sampler class DistributedClassAwareSampler(Sampler): def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, seed=1, sample_weight_path=None): _rank, _num_replicas = get_dist_info() if num_replicas is None: num_replicas = _num_replicas if rank is None: rank = _rank self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.seed = seed if seed is not None else 1 assert hasattr(self.dataset, 'flag') assert len(self.dataset.flag) == len(self.dataset) self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) with open(sample_weight_path, "r") as f: sample_weight = json.load(f) self.sample_weights = torch.tensor( list(sample_weight.values()), dtype=torch.float) self.indices = None self.set_epoch(-1) def __iter__(self): return iter(self.indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = torch.multinomial( self.sample_weights, len(self.dataset), generator=g, replacement=True ).numpy() self.flag = self.dataset.flag[indices] self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, j in enumerate(self.group_sizes): self.num_samples += math.ceil(self.group_sizes[i] / self.samples_per_gpu / self.num_replicas) * self.samples_per_gpu self.total_size = self.num_samples * self.num_replicas indices_group = [] for i, size in enumerate(self.group_sizes): if size > 0: flag_i_indice = np.where(self.flag == i)[0] assert len(flag_i_indice) == size indice = indices[flag_i_indice].tolist() extra = math.ceil( size / self.samples_per_gpu / self.num_replicas ) * self.samples_per_gpu * self.num_replicas - len(indice) tmp = indice.copy() for _ in range(extra // size): indice.extend(tmp) indice.extend(tmp[:extra % size]) indices_group.extend(indice) assert len(indices_group) == self.total_size indices_group = [ indices_group[j] for i in list( torch.randperm( len(indices_group) // self.samples_per_gpu, generator=g)) for j in range(i * self.samples_per_gpu, (i + 1) * self.samples_per_gpu) ] offset = self.num_samples * self.rank indices_group = indices_group[offset:offset + self.num_samples] assert len(indices_group) == self.num_samples self.indices = indices_group
the-stack_0_14187
#!/usr/bin/env python """ get_result.py """ import click from urllib.parse import parse_qsl, urljoin, urlparse import requests from bs4 import BeautifulSoup DAUM_DICT_HOST = "https://dic.daum.net/" LANG = 'eng' COMMAND_SET = { 'a': 'antonym', 'e': 'example sentences', 's': 'synonym', 'q': 'quit' } COMMANDS = "more: " + ' | '.join( [f'{COMMAND_SET[key]}({key})' for key in COMMAND_SET] ) def example_url(wordid: str, page: int = 1): example_host = f'{DAUM_DICT_HOST}/word/view_example_more.do' qsl = f'?wordid={wordid}&summaryid=etc&page={page}' return urljoin(example_host, qsl) def parse(html: str): bs = BeautifulSoup(html, 'html.parser') content = bs.findAll('meta', attrs={'property': 'og:description'})[0]\ .get('content') if not content: return 'No results found.', '' try: redir_url = bs.findAll('meta', attrs={'http-equiv': 'Refresh'})[0]\ .get('content').split('URL=')[1] except IndexError: # the result comes with polysemic words redir_url = bs.findAll('a', attrs={'txt_cleansch'})[0].attrs['href'] dic_query = urlparse(redir_url).query wordid = dict(parse_qsl(dic_query))['wordid'] return content, wordid def parse_detail(html: str, wordid: str, category: str): """ parse once more to get the detailed view """ bs = BeautifulSoup(html, 'html.parser') id_set = { 'antonym': 'OPPOSITE_WORD', 'synonym': 'SIMILAR_WORD' } if category not in id_set.keys(): pass else: words = bs.find(id=id_set[category]) if not words: # there's no antonym of this keyword return 'No results found.' tags = words.findAll('li') result = [ f"{tag.find('a').text}: {tag.find('span').text}" for tag in tags ] return '\n'.join(result) def parse_example(url: str): """ extract the example sentences """ html = requests.get(url).text bs = BeautifulSoup(html, 'html.parser') list_ = bs.findAll('li') sentences = [] for l in list_: eng_phrase = l.find('span', attrs={'txt_example'}).text.split('\n')[0] mean_phrase = l.find('span', attrs={'mean_example'}).text phrase_set = f'{eng_phrase}\n -> {mean_phrase}\n\n' sentences.append(phrase_set) return ''.join(sentences) @click.command() @click.argument('keyword', metavar='<keyword>') def main(keyword): """ Use DAUM Dictionary via terminal """ click.echo('Searching...') url = f'{DAUM_DICT_HOST}search.do?q={keyword}&dic={LANG}' response = requests.get(url) meanings, wordid = parse(response.text) detailed_url = f'https://dic.daum.net/word/view.do?wordid={wordid}' detailed_text = None click.echo(meanings) if meanings == 'No results found.' and wordid == '': return while(True): value = click.prompt(click.style(COMMANDS, fg='white', bg='blue')) try: command = COMMAND_SET[value] except KeyError: click.echo("Sorry, I don't understand.") continue if value != 'q': if value == 'e': result = parse_example(example_url(wordid)) click.echo(result) else: # a / s if detailed_text is None: detailed_text = requests.get(detailed_url).text result = parse_detail(detailed_text, wordid, command) click.secho(command, fg='green') click.echo(result) else: break if __name__ == "__main__": main()
the-stack_0_14188
import numpy as np import torch import torch.nn.functional as F def restore_bn(kernel, bn, conv_bias): gamma = bn.weight std = (bn.running_var + bn.eps).sqrt() bias = -bn.running_mean new_bias = (conv_bias - bn.bias) / gamma * std - bias new_weight = kernel * (std / gamma).reshape(-1, 1, 1, 1) return new_weight, new_bias def transI_fusebn(kernel, bn, conv_bias): gamma = bn.weight std = (bn.running_var + bn.eps).sqrt() bias = -bn.running_mean if conv_bias is not None: bias += conv_bias return kernel * ( (gamma / std).reshape(-1, 1, 1, 1)), bn.bias + bias * gamma / std def transII_addbranch(kernels, biases): return torch.sum(kernels, dim=0), torch.sum(biases, dim=0) def transIII_1x1_kxk(k1, b1, k2, b2, groups=1): if groups == 1: k = F.conv2d(k2, k1.permute(1, 0, 2, 3)) b_hat = (k2 * b1.reshape(1, -1, 1, 1)).sum((1, 2, 3)) else: k_slices = [] b_slices = [] k1_T = k1.permute(1, 0, 2, 3) k1_group_width = k1.size(0) // groups k2_group_width = k2.size(0) // groups for g in range(groups): k1_T_slice = k1_T[:, g * k1_group_width:(g + 1) * k1_group_width, :, :] k2_slice = k2[g * k2_group_width:(g + 1) * k2_group_width, :, :, :] k_slices.append(F.conv2d(k2_slice, k1_T_slice)) b_slices.append( (k2_slice * b1[g * k1_group_width:(g + 1) * k1_group_width].reshape( 1, -1, 1, 1)).sum((1, 2, 3))) k, b_hat = transIV_depthconcat(k_slices, b_slices) return k, b_hat + b2 def transIV_depthconcat(kernels, biases): return torch.cat(kernels), torch.cat(biases) def transV_avg(channels, kernel_size, groups): input_dim = channels // groups k = torch.zeros((channels, input_dim, kernel_size, kernel_size)) k[np.arange(channels).tolist(), np.tile(np.arange(input_dim), groups).tolist( ), :, :] = 1.0 / kernel_size**2 return k def transVI_multiscale(kernel, target_kernel_size): """ NOTE: This has not been tested with non-square kernels (kernel.size(2) != kernel.size(3)) nor even-size kernels """ W_pixels_to_pad = (target_kernel_size - kernel.size(2)) // 2 H_pixels_to_pad = (target_kernel_size - kernel.size(3)) // 2 return F.pad( kernel, [H_pixels_to_pad, H_pixels_to_pad, W_pixels_to_pad, W_pixels_to_pad]) def transVII_kxk_1x1(k1, b1, k2, b2): return F.conv2d(k1.permute(1, 0, 2, 3), k2).permute(1, 0, 2, 3), (k2 * b1.reshape(-1, 1, 1, 1)).sum( (1, 2, 3)) + b2 def transIIX_kxk_kxk(k1, b1, k2, b2, groups=1): k1 = torch.from_numpy( np.flip(np.flip(np.array(k1), axis=3), axis=2).copy()) k_size = k1.size(2) padding = k_size // 2 + 1 if groups == 1: k = F.conv2d(k2, k1.permute(1, 0, 2, 3), padding=padding) b_hat = (k2 * b1.reshape(1, -1, 1, 1)).sum((1, 2, 3)) else: k_slices = [] b_slices = [] k1_T = k1.permute(1, 0, 2, 3) k1_group_width = k1.size(0) // groups k2_group_width = k2.size(0) // groups for g in range(groups): k1_T_slice = k1_T[:, g * k1_group_width:(g + 1) * k1_group_width, :, :] k2_slice = k2[g * k2_group_width:(g + 1) * k2_group_width, :, :, :] k_slices.append(F.conv2d(k2_slice, k1_T_slice, padding=padding)) b_slices.append( (k2_slice * b1[g * k1_group_width:(g + 1) * k1_group_width].reshape( 1, -1, 1, 1)).sum((1, 2, 3))) k, b_hat = transIV_depthconcat(k_slices, b_slices) return k, b_hat + b2 def transIX_bn_to_1x1(bn, in_channels, groups=1): input_dim = in_channels // groups kernel_value = np.zeros((in_channels, input_dim, 3, 3), dtype=np.float32) for i in range(in_channels): kernel_value[i, i % input_dim, 1, 1] = 1 id_tensor = torch.from_numpy(kernel_value).to(bn.weight.device) kernel = id_tensor running_mean = bn.running_mean running_var = bn.running_var gamma = bn.weight beta = bn.bias eps = bn.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std
the-stack_0_14189
import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 """ image = mpimg.imread('./images/waymo_car.jpg') #Image Dimensions print("Image Dimensions: ", image.shape) """ """ Starting with B & W """ """ #Covert to GrayScale convGrayScale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) print("Converted Image Dimensions: ", convGrayScale.shape) plt.imshow(convGrayScale, cmap='gray') # Print the value at the centre of the image x = convGrayScale.shape[1]//2 y = convGrayScale.shape[0]//2 print(convGrayScale[y,x]) # Finds the maximum and minimum grayscale values in this image max_val = np.amax(convGrayScale) min_val = np.amin(convGrayScale) print('Max: ', max_val) print('Min: ', min_val) """ """ With Colour Images """ """ #image = mpimg.imread('images/wa_state_highway.jpg') plt.imshow(image) # Copying RGB Channels into separate arrays red = image[:,:,0] green = image[:,:,1] blue = image[:,:,2] f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10)) ax1.set_title('Red channel') ax1.imshow(red, cmap='gray') ax2.set_title('Green channel') ax2.imshow(green, cmap='gray') ax3.set_title('Blue channel') ax3.imshow(blue, cmap='gray') """ """ Creating Blue Screen """ """ pizzaImage = cv2.imread("./images/pizza_bluescreen.jpg") print("This image is a(n)", type(pizzaImage)) #Please remember that the image dimensions are displayed as Height x Width x Colour Components print("Image Dimensions", pizzaImage.shape) #We need to make a copy and convert the image to RGB pizzaCopy = np.copy(pizzaImage) pizzaCopy = cv2.cvtColor(pizzaCopy, cv2.COLOR_BGR2RGB) plt.imshow(pizzaCopy) #Identifying Colour thresholds for Blue lowerBlue = np.array([0,0,210]) upperBlue = np.array([70,70,255]) #Creating masks for Blue area mask = cv2.inRange(pizzaCopy, lowerBlue, upperBlue) #Visualize the mask - Black area means that the mask isn't effective there plt.imshow(mask, cmap='gray') maskedImage = np.copy(pizzaCopy) maskedImage[mask != 0] = [0, 0, 0] plt.imshow(maskedImage, cmap='gray') #Adding the background backgroundImage = cv2.imread('./images/space_background.jpg') backgroundImage = cv2.cvtColor(backgroundImage, cv2.COLOR_BGR2RGB) croppedImage = backgroundImage[0:514, 0:816] croppedImage[mask == 0] = [0,0,0] plt.imshow(croppedImage) completeImage = croppedImage + maskedImage plt.imshow(completeImage) """ """ Coding for Green Screen """ """ carImage = cv2.imread("./images/car_green_screen.jpg") print("This image is a(n)", type(carImage)) #Please remember that the image dimensions are displayed as Height x Width x Colour Components print("Image Dimensions", carImage.shape) #We need to make a copy and convert the image to RGB carCopy = np.copy(carImage) carCopy = cv2.cvtColor(carCopy, cv2.COLOR_BGR2RGB) plt.imshow(carCopy) #Identifying Colour thresholds for Green lowerGreen = np.array([36, 25, 25]) upperGreen = np.array([70, 255, 255]) #Creating masks for Green area mask = cv2.inRange(carCopy, lowerGreen, upperGreen) #Visualize the mask - Black area means that the mask isn't effective there plt.imshow(mask, cmap='gray') maskedImage = np.copy(carCopy) maskedImage[mask != 0] = [0, 0, 0] plt.imshow(maskedImage, cmap='gray') #Adding the background backgroundImage = cv2.imread('./images/space_background.jpg') backgroundImage = cv2.cvtColor(backgroundImage, cv2.COLOR_BGR2RGB) plt.imshow(backgroundImage, cmap='gray') croppedImage = backgroundImage[0:450, 0:660] croppedImage[mask == 0] = [0,0,0] plt.imshow(croppedImage) completeImage = croppedImage + maskedImage plt.imshow(completeImage) """ """ Converting to HSV format """ image = cv2.imread('images/water_balloons.jpg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) plt.imshow(image) # RGB channels r = image[:,:,0] g = image[:,:,1] b = image[:,:,2] f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10)) ax1.set_title('Red') ax1.imshow(r, cmap='gray') ax2.set_title('Green') ax2.imshow(g, cmap='gray') ax3.set_title('Blue') ax3.imshow(b, cmap='gray') # Convert from RGB to HSV hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # HSV channels h = hsv[:,:,0] s = hsv[:,:,1] v = hsv[:,:,2] f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10)) ax1.set_title('Hue') ax1.imshow(h, cmap='gray') ax2.set_title('Saturation') ax2.imshow(s, cmap='gray') ax3.set_title('Value') ax3.imshow(v, cmap='gray') # Define our color selection criteria in HSV values lower_hue = np.array([160,0,0]) upper_hue = np.array([180,255,255]) # Define our color selection criteria in RGB values lower_pink = np.array([180,0,100]) upper_pink = np.array([255,255,230]) # Define the masked area in RGB space mask_rgb = cv2.inRange(image, lower_pink, upper_pink) # mask the image masked_image = np.copy(image) masked_image[mask_rgb==0] = [0,0,0] # Vizualize the mask plt.imshow(masked_image) # Now try HSV! # Define the masked area in HSV space mask_hsv = cv2.inRange(hsv, lower_hue, upper_hue) # mask the image masked_image = np.copy(image) masked_image[mask_hsv==0] = [0,0,0] # Vizualize the mask plt.imshow(masked_image)
the-stack_0_14194
# (C) Copyright 2005- ECMWF. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. # # In applying this licence, ECMWF does not waive the privileges and immunities # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. # # # Python implementation: bufr_clone # # Description: how to create a new BUFR message by cloning # an existing message. # from __future__ import absolute_import import traceback import sys from eccodes import * INPUT = '../../data/bufr/syno_1.bufr' OUTPUT = 'bufr_clone_test_p.clone.bufr' VERBOSE = 1 # verbose error reporting def example(): # open BUFR file fin = open(INPUT, 'rb') # open output BUFR file fout = open(OUTPUT, 'wb') # get handle for message bufr = codes_bufr_new_from_file(fin) # create several clones of this message and alter them # in different ways for centre in range(0, 3): # clone the message clone_id = codes_clone(bufr) # this is the place where you may wish to modify the clone codes_set(clone_id, 'bufrHeaderCentre', centre) # write the cloned message to a file codes_write(clone_id, fout) # release the clone's handle codes_release(clone_id) # release the source's handle codes_release(bufr) fin.close() fout.close() def main(): try: example() except CodesInternalError as err: if VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg + '\n') return 1 if __name__ == "__main__": sys.exit(main())
the-stack_0_14195
import datetime as dt import functools import itertools import logging from logging.handlers import RotatingFileHandler import os import sys import threading import traceback # pylint: disable=redefined-builtin from codecs import open from collections import namedtuple from time import time from cli_helpers.tabular_output import TabularOutputFormatter from cli_helpers.tabular_output.preprocessors import (align_decimals, format_numbers) import humanize import click from prompt_toolkit.shortcuts import PromptSession, CompleteStyle from prompt_toolkit.completion import DynamicCompleter, ThreadedCompleter from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode from prompt_toolkit.document import Document from prompt_toolkit.filters import HasFocus, IsDone from prompt_toolkit.lexers import PygmentsLexer from prompt_toolkit.layout.processors import (ConditionalProcessor, HighlightMatchingBracketProcessor, TabsProcessor) from prompt_toolkit.history import FileHistory from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from pygments.lexers.sql import PostgresLexer from mssqlcli.config import ( get_casing_file, config_location, ensure_dir_exists, get_config, ) from mssqlcli.completion_refresher import CompletionRefresher from mssqlcli.__init__ import __version__ from mssqlcli.encodingutils import text_type from mssqlcli.key_bindings import mssqlcli_bindings from mssqlcli.mssqlcliclient import MssqlCliClient from mssqlcli.mssqlcompleter import MssqlCompleter from mssqlcli.mssqlstyle import style_factory, style_factory_output from mssqlcli.mssqltoolbar import create_toolbar_tokens_func from mssqlcli.sqltoolsclient import SqlToolsClient from mssqlcli.packages import special from mssqlcli.mssqlbuffer import mssql_is_multiline from mssqlcli.util import is_command_valid import mssqlcli.localized_strings as localized # Query tuples are used for maintaining history MetaQuery = namedtuple( 'Query', [ 'query', # The entire text of the command 'successful', # True If all subqueries were successful 'total_time', # Time elapsed executing the query 'meta_changed', # True if any subquery executed create/alter/drop 'db_changed', # True if any subquery changed the database 'path_changed', # True if any subquery changed the search path 'mutated', # True if any subquery executed insert/update/delete 'contains_secure_statement', # True if any subquery contains the security statement ]) MetaQuery.__new__.__defaults__ = ('', False, 0, False, False, False, False, False) OutputSettings = namedtuple( 'OutputSettings', 'table_format dcmlfmt floatfmt missingval expanded max_width case_function' ) OutputSettings.__new__.__defaults__ = ( None, None, None, '<null>', False, None, lambda x: x ) security_keywords = ['password', 'secret', 'encrypted_value'] def security_words_found_in(query): try: tokens = query.lower() return any([keyword for keyword in security_keywords if keyword in tokens]) except AttributeError: return False class MssqlFileHistory(FileHistory): def append_string(self, string): if security_words_found_in(string): return super(MssqlFileHistory, self).append_string(string) class MssqlCli(object): # pylint: disable=too-many-instance-attributes, useless-object-inheritance max_len_prompt = 30 default_prompt = '\\d> ' def set_default_pager(self, config): configured_pager = config['main'].get('pager') os_environ_pager = os.environ.get('PAGER') is_less_installed = is_command_valid(['less', '--version']) default_pager = configured_pager or os_environ_pager or \ ('less -SRXF' if is_less_installed else False) or None if configured_pager: self.logger.info( 'Default pager found in config file: "%s"', configured_pager) elif os_environ_pager: self.logger.info('Default pager found in PAGER environment variable: "%s"', os_environ_pager) elif is_less_installed: self.logger.info('Default pager set to Less') else: self.logger.info( 'No default pager found in environment. Using os default pager') # Set default set of less recommended options, if they are not already set. # They are ignored if pager is different than less. if not os.environ.get('LESS'): os.environ['LESS'] = '-SRXF' if default_pager is not None: os.environ['PAGER'] = default_pager return default_pager def __init__(self, options): # Load config. c = self.config = get_config(options.mssqlclirc_file) self.initialize_logging() self.logger = logging.getLogger(u'mssqlcli.main') self.interactive_mode = options.interactive_mode self.table_format = c['main']['table_format'] self.decimal_format = c['data_formats']['decimal'] self.float_format = c['data_formats']['float'] self.null_string = c['main'].get('null_string', '<null>') self.expanded_output = c['main']['expand'] == 'always' self.integrated_auth = options.integrated_auth self.less_chatty = bool( options.less_chatty) or c['main'].as_bool('less_chatty') or self.interactive_mode keyword_casing = c['main']['keyword_casing'] self.settings = { 'casing_file': get_casing_file(c), 'generate_casing_file': c['main'].as_bool('generate_casing_file'), 'generate_aliases': c['main'].as_bool('generate_aliases'), 'asterisk_column_order': c['main']['asterisk_column_order'], 'qualify_columns': c['main']['qualify_columns'], 'case_column_headers': c['main'].as_bool('case_column_headers'), 'search_path_filter': c['main'].as_bool('search_path_filter'), 'single_connection': False, 'less_chatty': self.less_chatty, 'keyword_casing': keyword_casing, } if self.interactive_mode: pager = self.set_default_pager(c) self.prompt_session = None # set auto_expand to false if less is detected with auto expand self.auto_expand = options.auto_vertical_output \ or (c['main']['expand'] == 'auto' and pager != 'less -SRXF') self.multiline = c['main'].as_bool('multi_line') self.multiline_mode = c['main'].get('multi_line_mode', 'tsql') self.vi_mode = c['main'].as_bool('vi') self.prompt_format = options.prompt or c['main'].get('prompt', self.default_prompt) self.row_limit = options.row_limit self.min_num_menu_lines = c['main'].as_int('min_num_menu_lines') self.multiline_continuation_char = c['main']['multiline_continuation_char'] self.syntax_style = c['main']['syntax_style'] self.cli_style = c['colors'] self.output_style = style_factory_output(self.syntax_style, self.cli_style) self.wider_completion_menu = c['main'].as_bool('wider_completion_menu') self.on_error = c['main']['on_error'].upper() self.now = dt.datetime.today() self.completion_refresher = CompletionRefresher() self.query_history = [] # Initialize completer smart_completion = c['main'].get('smart_completion', 'True').lower() == 'true' self.completer = MssqlCompleter(smart_completion=smart_completion, settings=self.settings) self._completer_lock = threading.Lock() # input and output file are for non-interactive mode self.input_file = options.input_file self.output_file = options.output_file self.query = options.query self.sqltoolsclient = SqlToolsClient(enable_logging=options.enable_sqltoolsservice_logging) self.mssqlcliclient_main = MssqlCliClient(options, self.sqltoolsclient) # exit and return error if user enters interactive mode with -i or -o arguments enabled if self.interactive_mode and (self.input_file or self.output_file): raise ValueError("Invalid arguments: -i and -o can only be used in non-interactive " "mode.") # exit and return error if both query text and an input file are specified if self.query and self.input_file: raise ValueError("Invalid arguments: either query [-Q] or input file [-i] may be " "specified.") def __del__(self): # Shut-down sqltoolsservice if self.sqltoolsclient: self.sqltoolsclient.shutdown() # TODO: possibly use at a later date for expanded output file functionality # def write_to_file(self, pattern, **_): # if not pattern: # self.output_file = None # message = 'File output disabled' # return [(None, None, None, message, '', True)] # filename = os.path.abspath(os.path.expanduser(pattern)) # if not os.path.isfile(filename): # try: # open(filename, 'w').close() # except IOError as e: # self.output_file = None # message = str(e) + '\nFile output disabled' # return [(None, None, None, message, '', False)] # self.output_file = filename # message = 'Writing to file "%s"' % self.output_file # return [(None, None, None, message, '', True)] def initialize_logging(self): log_file = self.config['main']['log_file'] if log_file == 'default': log_file = config_location() + 'mssqlcli.log' ensure_dir_exists(log_file) log_level = self.config['main']['log_level'] # Disable logging if value is NONE by switching to a no-op handler. # Set log level to a high value so it doesn't even waste cycles getting # called. if log_level.upper() == 'NONE': handler = logging.NullHandler() else: # creates a log buffer with max size of 20 MB and 5 backup files handler = RotatingFileHandler(os.path.expanduser(log_file), encoding='utf-8', maxBytes=1024*1024*20, backupCount=5) level_map = {'CRITICAL': logging.CRITICAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG, 'NONE': logging.CRITICAL } log_level = level_map[log_level.upper()] formatter = logging.Formatter( '%(asctime)s (%(process)d/%(threadName)s) ' '%(name)s %(levelname)s - %(message)s') handler.setFormatter(formatter) root_logger = logging.getLogger('mssqlcli') root_logger.addHandler(handler) root_logger.setLevel(log_level) root_logger.info('Initializing mssqlcli logging.') root_logger.debug('Log file %r.', log_file) def set_main_mssqlcli_client(self, mssqlcli_client): self.mssqlcliclient_main = mssqlcli_client def connect_to_database(self): owner_uri, error_messages = self.mssqlcliclient_main.connect_to_database() if not owner_uri and error_messages: click.secho('\n'.join(error_messages), err=True, fg='yellow') self.logger.debug('Database connection failed: %r.', error_messages) sys.exit(1) def handle_editor_command(self, text): r""" Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e"<enter> to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param text: Document :return: Document """ # FIXME: using application.pre_run_callables like this here is not the best solution. # It's internal api of prompt_toolkit that may change. This was added to fix #668. # We may find a better way to do it in the future. # pylint: disable=no-member editor_command = special.editor_command(text) while editor_command: filename = special.get_filename(text) query = (special.get_editor_query(text) or self.get_last_query()) sql, message = special.open_external_editor(filename, sql=query) if message: # Something went wrong. Raise an exception and bail. raise RuntimeError(message) while True: try: text = self.prompt_session.prompt(default=sql) break except KeyboardInterrupt: sql = "" editor_command = special.editor_command(text) return text def _execute_interactive_command(self, text): """ Runs commands in the interactive CLI mode. """ logger = self.logger # Initialize default metaquery in case execution fails query = MetaQuery(query=text, successful=False) try: output, query = self._evaluate_command(text) except KeyboardInterrupt: # Issue where Ctrl+C propagates to sql tools service process and kills it, # so that query/cancel request can't be sent. # Right now the sql_tools_service process is killed and we restart # it with a new connection. click.secho(u'Cancelling query...', err=True, fg='red') self.reset() logger.debug("cancelled query, sql: %r", text) click.secho("Query cancelled.", err=True, fg='red') except NotImplementedError: click.secho('Not Yet Implemented.', fg="yellow") else: if query.total_time > 1: # pylint: disable=no-member print('Time: %0.03fs (%s)' % (query.total_time, humanize.time.naturaldelta(query.total_time))) else: print('Time: %0.03fs' % query.total_time) # Check if we need to update completions, in order of most # to least drastic changes if query.db_changed: with self._completer_lock: self.completer.reset_completions() self.refresh_completions(persist_priorities='keywords') elif query.meta_changed: self.refresh_completions(persist_priorities='all') if not query.contains_secure_statement: # Allow MssqlCompleter to learn user's preferred keywords, etc. with self._completer_lock: self.completer.extend_query_history(text) self.query_history.append(query) return output def execute_query(self, text): """ Processes a query string and outputs to file or terminal """ if self.interactive_mode: output = self._execute_interactive_command(text) else: # non-interactive mode output, _ = self._evaluate_command(text) self._output_query(output) return output def _output_query(self, output): """ Specifies how query output is handled """ if self.interactive_mode: click.echo_via_pager('\n'.join(output)) else: if self.output_file: try: with open(self.output_file, 'w', encoding='utf-8') as f: click.echo('\n'.join(output), file=f) except IOError as e: click.secho(str(e), err=True, fg='red') sys.exit(1) else: click.echo('\n'.join(output)) def run(self): """ Spins up CLI. """ # raise error if interactive mode is set to false here if not self.interactive_mode: raise ValueError("Invalid arguments: 'run' must be used in interactive mode! Please set " "interactive_mode to True.") # exit and return error if user enters interactive mode with -o argument enabled if self.output_file: raise ValueError("Invalid arguments: -o must be used with interactive mode set to " "false.") history_file = self.config['main']['history_file'] if history_file == 'default': history_file = config_location() + 'history' history = MssqlFileHistory(os.path.expanduser(history_file)) self.refresh_completions(history=history, persist_priorities='none') self.prompt_session = self._build_cli(history) if not self.less_chatty: print('Version: {}'.format(__version__)) print('Mail: [email protected]') print('Home: http://github.com/dbcli/mssql-cli') try: while True: try: text = self.prompt_session.prompt() except KeyboardInterrupt: continue # The reason we check here instead of inside the mssqlcliclient is # because we want to raise the Exit exception which will be # caught by the try/except block that wraps the mssqlcliclient execute # statement. if self.quit_command(text): raise EOFError try: text = self.handle_editor_command(text) except RuntimeError as e: self.logger.error("sql: %r, error: %r", text, e) self.logger.error("traceback: %r", traceback.format_exc()) click.secho(str(e), err=True, fg='red') continue self.execute_query(text) self.now = dt.datetime.today() except EOFError: self.mssqlcliclient_main.shutdown() if not self.less_chatty: print(localized.goodbye()) def _build_cli(self, history): """ Builds prompt session. NOTE: PROMPT-SESSION USES THIS AS DEPENDENCY. """ def get_message(): prompt = self.get_prompt(self.prompt_format) return [(u'class:prompt', prompt)] def get_continuation(width, line_number, is_soft_wrap): """ NOTE: updating parameters will cause prompt session to crash. """ # pylint: disable=unused-argument continuation = self.multiline_continuation_char * (width - 1) + ' ' return [(u'class:continuation', continuation)] get_toolbar_tokens = create_toolbar_tokens_func(self) if self.wider_completion_menu: complete_style = CompleteStyle.MULTI_COLUMN else: complete_style = CompleteStyle.COLUMN with self._completer_lock: self.prompt_session = PromptSession( message=get_message, style=style_factory(self.syntax_style, self.cli_style), # Layout options. lexer=PygmentsLexer(PostgresLexer), prompt_continuation=get_continuation, bottom_toolbar=get_toolbar_tokens, complete_style=complete_style, input_processors=[ ConditionalProcessor( processor=HighlightMatchingBracketProcessor( chars='[](){}'), #pylint: disable=invalid-unary-operand-type filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()), # Render \t as 4 spaces instead of "^I" TabsProcessor(char1=u' ', char2=u' ')], reserve_space_for_menu=self.min_num_menu_lines, # Buffer options. multiline=mssql_is_multiline(self), completer=ThreadedCompleter( DynamicCompleter(lambda: self.completer)), history=history, auto_suggest=AutoSuggestFromHistory(), complete_while_typing=True, # Key bindings. enable_system_prompt=True, enable_open_in_editor=True, # Other options. key_bindings=mssqlcli_bindings(self), editing_mode=EditingMode.VI if self.vi_mode else EditingMode.EMACS, search_ignore_case=True) return self.prompt_session def _should_show_limit_prompt(self, status, rows): """ Returns True if limit prompt should be shown, False otherwise. NOTE: updating parameters will cause prompt session to crash. """ # pylint: disable=unused-argument if not rows: return False return self.interactive_mode and self.row_limit > 0 and len(rows) > self.row_limit def _evaluate_command(self, text): """ Used to run a command entered by the user during CLI operation (Puts the E in REPL) returns (results, MetaQuery) """ # pylint: disable=too-many-locals all_success = True meta_changed = False # CREATE, ALTER, DROP, etc mutated = False # INSERT, DELETE, etc db_changed = False contains_secure_statement = False path_changed = False output = [] total = 0 # Run the query. start = time() # mssql-cli if not self.mssqlcliclient_main.connect_to_database(): click.secho(u'No connection to server. Exiting.') sys.exit(1) for rows, columns, status, sql, is_error in \ self.mssqlcliclient_main.execute_query(text): total = time() - start if self._should_show_limit_prompt(status, rows): click.secho('The result set has more than %s rows.' % self.row_limit, fg='red') if not click.confirm('Do you want to continue?'): click.secho("Aborted!", err=True, fg='red') break contains_secure_statement = security_words_found_in(sql) if is_error: output.append(status) all_success = False continue if self.interactive_mode and self.auto_expand and self.prompt_session: max_width = self.prompt_session.output.get_size().columns else: max_width = None settings = OutputSettings( table_format=self.table_format, dcmlfmt=self.decimal_format, floatfmt=self.float_format, missingval=self.null_string, expanded=self.expanded_output, max_width=max_width, case_function=( self.completer.case if self.interactive_mode and self.settings['case_column_headers'] else lambda x: x ) ) formatted = self.format_output(None, rows, columns, status, settings) output.extend(formatted) db_changed, new_db_name = self.has_change_db_cmd(sql) if new_db_name: self.logger.info('Database context changed.') self.mssqlcliclient_main.connected_database = new_db_name if all_success: meta_changed = meta_changed or self.has_meta_cmd(text) return output, MetaQuery( text, all_success, total, meta_changed, db_changed, path_changed, mutated, contains_secure_statement) def _handle_server_closed_connection(self): """Used during CLI execution""" reconnect = click.prompt( 'Connection reset. Reconnect (Y/n)', show_default=False, type=bool, default=True) if reconnect: self.reset() click.secho('Reconnected!\nTry the command again.', fg='green') def shutdown(self): """ API for shutting down client """ self.mssqlcliclient_main.shutdown() def reset(self): """ Reset mssqlcli client with a new sql tools service and connection. """ self.sqltoolsclient.shutdown() self.sqltoolsclient = SqlToolsClient() self.mssqlcliclient_main = self.mssqlcliclient_main.clone(self.sqltoolsclient) database_response = self.mssqlcliclient_main.connect_to_database() if not database_response: click.secho('Unable reconnect to server %s; database %s.' % ( self.mssqlcliclient_main.server_name, self.mssqlcliclient_main.connected_database), err=True, fg='yellow') self.logger.info(u'Unable to reset connection to server %s; database %s', self.mssqlcliclient_main.server_name, self.mssqlcliclient_main.connected_database) sys.exit(1) else: owner_uri, error_messages = database_response if not owner_uri and error_messages: # can occur if database credentials change during reset self.logger.error(u'Error in reset : %s', error_messages) raise ConnectionResetError(error_messages) def refresh_completions(self, history=None, persist_priorities='all'): # Clone mssqlcliclient to create a new connection with a new owner Uri. mssqlclclient_completion_refresher = self.mssqlcliclient_main.clone() callback = functools.partial(self._on_completions_refreshed, persist_priorities=persist_priorities) self.completion_refresher.refresh(mssqcliclient=mssqlclclient_completion_refresher, callbacks=callback, history=history, settings=self.settings) return [(None, None, None, 'Auto-completion refresh started in the background.')] def _on_completions_refreshed(self, new_completer, persist_priorities): self._swap_completer_objects(new_completer, persist_priorities) if self.prompt_session: # After refreshing, redraw the CLI to clear the statusbar # "Refreshing completions..." indicator self.prompt_session.app.invalidate() def _swap_completer_objects(self, new_completer, persist_priorities): """Swap the completer object with the newly created completer. persist_priorities is a string specifying how the old completer's learned prioritizer should be transferred to the new completer. 'none' - The new prioritizer is left in a new/clean state 'all' - The new prioritizer is updated to exactly reflect the old one 'keywords' - The new prioritizer is updated with old keyword priorities, but not any other. """ with self._completer_lock: old_completer = self.completer self.completer = new_completer if persist_priorities == 'all': # Just swap over the entire prioritizer new_completer.prioritizer = old_completer.prioritizer elif persist_priorities == 'keywords': # Swap over the entire prioritizer, but clear name priorities, # leaving learned keyword priorities alone new_completer.prioritizer = old_completer.prioritizer new_completer.prioritizer.clear_names() elif persist_priorities == 'none': # Leave the new prioritizer as is pass # When mssql-cli is first launched we call refresh_completions before # instantiating the cli object. So it is necessary to check if cli # exists before trying the replace the completer object in cli. self.completer = new_completer def get_completions(self, text, cursor_position): with self._completer_lock: return self.completer.get_completions( Document(text=text, cursor_position=cursor_position), None) def get_prompt(self, string): string = string.replace('\\t', self.now.strftime('%x %X')) string = string.replace('\\u', self.mssqlcliclient_main.user_name or '(none)') string = string.replace('\\h', self.mssqlcliclient_main.prompt_host or '(none)') string = string.replace('\\d', self.mssqlcliclient_main.connected_database or '(none)') string = string.replace('\\p', str(self.mssqlcliclient_main.prompt_port) or '(none)') string = string.replace('\\n', "\n") return string def get_last_query(self): """Get the last query executed or None.""" return self.query_history[-1][0] if self.query_history else None @staticmethod def has_meta_cmd(query): """Determines if the completion needs a refresh by checking if the sql statement is an alter, create, drop, commit or rollback.""" if query and isinstance(query, str): first_token = query.split()[0] if first_token.lower() in ('alter', 'create', 'drop'): return True return False @staticmethod def has_change_db_cmd(query): """Determines if the statement is a database switch such as 'use' or '\\c' Returns (True, DBName) or (False, None) """ if query and isinstance(query, str): first_token = query.split()[0] if first_token.lower() in ('use', '\\c', '\\connect'): return True, query.split()[1].strip('"') return False, None @staticmethod def quit_command(sql): return (sql.strip().lower() == 'exit' or sql.strip().lower() == 'quit' or sql.strip() == r'\q' or sql.strip() == ':q') @staticmethod def format_output(title, cur, headers, status, settings): # pylint: disable=too-many-locals output = [] expanded = (settings.expanded or settings.table_format == 'vertical') table_format = ('vertical' if settings.expanded else settings.table_format) max_width = settings.max_width case_function = settings.case_function formatter = TabularOutputFormatter(format_name=table_format) def format_array(val): if val is None: return settings.missingval if not isinstance(val, list): return val return '{' + ','.join(text_type(format_array(e)) for e in val) + '}' def format_arrays(data, headers, **_): data = list(data) for row in data: row[:] = [ format_array(val) if isinstance(val, list) else val for val in row ] return data, headers output_kwargs = { 'sep_title': 'RECORD {n}', 'sep_character': '-', 'sep_length': (1, 25), 'missing_value': settings.missingval, 'integer_format': settings.dcmlfmt, 'float_format': settings.floatfmt, 'preprocessors': (format_numbers, format_arrays), 'disable_numparse': True, 'preserve_whitespace': True } if not settings.floatfmt: output_kwargs['preprocessors'] = (align_decimals, ) if title: output.append(title) if cur: headers = [case_function(x) for x in headers] if max_width is not None: cur = list(cur) formatted = formatter.format_output(cur, headers, **output_kwargs) if isinstance(formatted, text_type): formatted = iter(formatted.splitlines()) first_line = next(formatted) formatted = itertools.chain([first_line], formatted) if (not expanded and max_width and len( first_line) > max_width and headers): formatted = formatter.format_output( cur, headers, format_name='vertical', column_types=None, **output_kwargs) if isinstance(formatted, text_type): formatted = iter(formatted.splitlines()) output = itertools.chain(output, formatted) if status: # Only print the status if it's not None. output = itertools.chain(output, [status]) return output
the-stack_0_14196
import copy import logging import os import time from collections import Counter from statistics import mean import numpy as np import pandas as pd from .fold_fitting_strategy import AbstractFoldFittingStrategy, SequentialLocalFoldFittingStrategy from ..abstract.abstract_model import AbstractModel from ...constants import MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE, REFIT_FULL_SUFFIX from ...utils.exceptions import TimeLimitExceeded from ...utils.loaders import load_pkl from ...utils.savers import save_pkl from ...utils.utils import CVSplitter, _compute_fi_with_stddev logger = logging.getLogger(__name__) # TODO: Add metadata object with info like score on each model, train time on each model, etc. class BaggedEnsembleModel(AbstractModel): """ Bagged ensemble meta-model which fits a given model multiple times across different splits of the training data. For certain child models such as KNN, this may only train a single model and instead rely on the child model to generate out-of-fold predictions. """ _oof_filename = 'oof.pkl' def __init__(self, model_base: AbstractModel, random_state=0, **kwargs): self.model_base = model_base self._child_type = type(self.model_base) self.models = [] self._oof_pred_proba = None self._oof_pred_model_repeats = None self._n_repeats = 0 # Number of n_repeats with at least 1 model fit, if kfold=5 and 8 models have been fit, _n_repeats is 2 self._n_repeats_finished = 0 # Number of n_repeats finished, if kfold=5 and 8 models have been fit, _n_repeats_finished is 1 self._k_fold_end = 0 # Number of models fit in current n_repeat (0 if completed), if kfold=5 and 8 models have been fit, _k_fold_end is 3 self._k = None # k models per n_repeat, equivalent to kfold value self._k_per_n_repeat = [] # k-fold used for each n_repeat. == [5, 10, 3] if first kfold was 5, second was 10, and third was 3 self._random_state = random_state self.low_memory = True self._bagged_mode = None # _child_oof currently is only set to True for KNN models, that are capable of LOO prediction generation to avoid needing bagging. # TODO: Consider moving `_child_oof` logic to a separate class / refactor OOF logic. # FIXME: Avoid unnecessary refit during refit_full on `_child_oof=True` models, just re-use the original model. self._child_oof = False # Whether the OOF preds were taken from a single child model (Assumes child can produce OOF preds without bagging). self._cv_splitters = [] # Keeps track of the CV splitter used for each bagged repeat. super().__init__(problem_type=self.model_base.problem_type, eval_metric=self.model_base.eval_metric, **kwargs) def _set_default_params(self): default_params = { # 'use_child_oof': False, # [Advanced] Whether to defer to child model for OOF preds and only train a single child. 'save_bag_folds': True, # 'refit_folds': False, # [Advanced, Experimental] Whether to refit bags immediately to a refit_full model in a single .fit call. } for param, val in default_params.items(): self._set_default_param_value(param, val) super()._set_default_params() def _get_default_auxiliary_params(self) -> dict: default_auxiliary_params = super()._get_default_auxiliary_params() extra_auxiliary_params = dict( drop_unique=False, # TODO: Get the value from child instead ) default_auxiliary_params.update(extra_auxiliary_params) return default_auxiliary_params def is_valid(self): return self.is_fit() and (self._n_repeats == self._n_repeats_finished) def can_infer(self): return self.is_fit() and self.params.get('save_bag_folds', True) def is_stratified(self): if self.problem_type in [REGRESSION, QUANTILE, SOFTCLASS]: return False else: return True def is_fit(self): return len(self.models) != 0 def can_fit(self) -> bool: return not self.is_fit() or self._bagged_mode def is_valid_oof(self): return self.is_fit() and (self._child_oof or self._bagged_mode) def get_oof_pred_proba(self, **kwargs): # TODO: Require is_valid == True (add option param to ignore is_valid) return self._oof_pred_proba_func(self._oof_pred_proba, self._oof_pred_model_repeats) @staticmethod def _oof_pred_proba_func(oof_pred_proba, oof_pred_model_repeats): oof_pred_model_repeats_without_0 = np.where(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats) if oof_pred_proba.ndim == 2: oof_pred_model_repeats_without_0 = oof_pred_model_repeats_without_0[:, None] return oof_pred_proba / oof_pred_model_repeats_without_0 def _init_misc(self, **kwargs): child = self._get_model_base().convert_to_template() child.initialize(**kwargs) self.eval_metric = child.eval_metric self.stopping_metric = child.stopping_metric self.quantile_levels = child.quantile_levels self.normalize_pred_probas = child.normalize_pred_probas def preprocess(self, X, preprocess_nonadaptive=True, model=None, **kwargs): if preprocess_nonadaptive: if model is None: if not self.models: return X model = self.models[0] model = self.load_child(model) return model.preprocess(X, preprocess_stateful=False) else: return X def _get_cv_splitter(self, n_splits, n_repeats, groups=None): return CVSplitter(n_splits=n_splits, n_repeats=n_repeats, groups=groups, stratified=self.is_stratified(), random_state=self._random_state) def _fit(self, X, y, X_val=None, y_val=None, X_pseudo=None, y_pseudo=None, k_fold=None, k_fold_start=0, k_fold_end=None, n_repeats=1, n_repeat_start=0, groups=None, **kwargs): use_child_oof = self.params.get('use_child_oof', False) if use_child_oof: if self.is_fit(): # TODO: We may want to throw an exception instead and avoid calling fit more than once return self k_fold = 1 k_fold_end = None groups = None if k_fold is None and groups is None: k_fold = 5 if k_fold is not None and k_fold < 1: k_fold = 1 if k_fold is None or k_fold > 1: k_fold = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups).n_splits self._validate_bag_kwargs( k_fold=k_fold, k_fold_start=k_fold_start, k_fold_end=k_fold_end, n_repeats=n_repeats, n_repeat_start=n_repeat_start, groups=groups, ) if k_fold_end is None: k_fold_end = k_fold model_base = self._get_model_base() model_base.rename(name='') kwargs['feature_metadata'] = self.feature_metadata kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children if self.model_base is not None: self.save_model_base(self.model_base) self.model_base = None if self._oof_pred_proba is None and self.is_fit(): self._load_oof() save_bag_folds = self.params.get('save_bag_folds', True) if k_fold == 1: self._fit_single(X=X, y=y, model_base=model_base, use_child_oof=use_child_oof, **kwargs) return self else: refit_folds = self.params.get('refit_folds', False) if refit_folds: save_bag_folds = False if kwargs.get('time_limit', None) is not None: fold_start = n_repeat_start * k_fold + k_fold_start fold_end = (n_repeats - 1) * k_fold + k_fold_end folds_to_fit = fold_end - fold_start # Reserve time for final refit model kwargs['time_limit'] = kwargs['time_limit'] * folds_to_fit / (folds_to_fit + 1.2) self._fit_folds(X=X, y=y, model_base=model_base, X_pseudo=X_pseudo, y_pseudo=y_pseudo, k_fold=k_fold, k_fold_start=k_fold_start, k_fold_end=k_fold_end, n_repeats=n_repeats, n_repeat_start=n_repeat_start, save_folds=save_bag_folds, groups=groups, **kwargs) # FIXME: Don't save folds except for refit # FIXME: Cleanup self # FIXME: Don't add `_FULL` to name if refit_folds: refit_template = self.convert_to_refit_full_template() refit_template.params['use_child_oof'] = False kwargs['time_limit'] = None refit_template.fit(X=X, y=y, k_fold=1, **kwargs) refit_template._oof_pred_proba = self._oof_pred_proba refit_template._oof_pred_model_repeats = self._oof_pred_model_repeats refit_template._child_oof = True refit_template.fit_time += self.fit_time + self.predict_time return refit_template else: return self def _validate_bag_kwargs(self, *, k_fold, k_fold_start, k_fold_end, n_repeats, n_repeat_start, groups): if groups is not None: if self._n_repeats_finished != 0: raise AssertionError('Bagged models cannot call fit with `groups` specified when a full k-fold set has already been fit.') if n_repeats > 1: raise AssertionError('Cannot perform repeated bagging with `groups` specified.') return if k_fold_end is None: k_fold_end = k_fold if k_fold is None: raise ValueError('k_fold cannot be None.') if k_fold < 1: raise ValueError(f'k_fold must be equal or greater than 1, value: ({k_fold})') if n_repeat_start != self._n_repeats_finished: raise ValueError(f'n_repeat_start must equal self._n_repeats_finished, values: ({n_repeat_start}, {self._n_repeats_finished})') if n_repeats <= n_repeat_start: raise ValueError(f'n_repeats must be greater than n_repeat_start, values: ({n_repeats}, {n_repeat_start})') if k_fold_start != self._k_fold_end: raise ValueError(f'k_fold_start must equal previous k_fold_end, values: ({k_fold_start}, {self._k_fold_end})') if k_fold_start >= k_fold_end: # TODO: Remove this limitation if n_repeats > 1 raise ValueError(f'k_fold_end must be greater than k_fold_start, values: ({k_fold_end}, {k_fold_start})') if (n_repeats - n_repeat_start) > 1 and k_fold_end != k_fold: # TODO: Remove this limitation raise ValueError(f'k_fold_end must equal k_fold when (n_repeats - n_repeat_start) > 1, values: ({k_fold_end}, {k_fold})') if self._k is not None and self._k != k_fold: raise ValueError(f'k_fold must equal previously fit k_fold value for the current n_repeat, values: (({k_fold}, {self._k})') def predict_proba(self, X, normalize=None, **kwargs): model = self.load_child(self.models[0]) X = self.preprocess(X, model=model, **kwargs) pred_proba = model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize) for model in self.models[1:]: model = self.load_child(model) pred_proba += model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize) pred_proba = pred_proba / len(self.models) if self.temperature_scalar is not None: pred_proba = self._apply_temperature_scaling(pred_proba) return pred_proba def _predict_proba(self, X, normalize=False, **kwargs): return self.predict_proba(X=X, normalize=normalize, **kwargs) def score_with_oof(self, y, sample_weight=None): self._load_oof() valid_indices = self._oof_pred_model_repeats > 0 y = y[valid_indices] y_pred_proba = self.get_oof_pred_proba()[valid_indices] if sample_weight is not None: sample_weight = sample_weight[valid_indices] return self.score_with_y_pred_proba(y=y, y_pred_proba=y_pred_proba, sample_weight=sample_weight) def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, **kwargs): if self.is_fit(): raise AssertionError('Model is already fit.') if self._n_repeats != 0: raise ValueError(f'n_repeats must equal 0 when fitting a single model with k_fold == 1, value: {self._n_repeats}') model_base.name = f'{model_base.name}S1F1' model_base.set_contexts(path_context=self.path + model_base.name + os.path.sep) time_start_fit = time.time() model_base.fit(X=X, y=y, time_limit=time_limit, **kwargs) model_base.fit_time = time.time() - time_start_fit model_base.predict_time = None X_len = len(X) # Check if pred_proba is going to take too long if time_limit is not None and X_len >= 10000: max_allowed_time = time_limit * 1.3 # allow some buffer time_left = max( max_allowed_time - model_base.fit_time, time_limit * 0.1, # At least 10% of time_limit 10, # At least 10 seconds ) # Sample at most 500 rows to estimate prediction time of all rows # TODO: Consider moving this into end of abstract model fit for all models. # Currently this only fixes problem when in bagged mode, if not bagging, then inference could still be problamatic n_sample = min(500, round(X_len * 0.1)) frac = n_sample / X_len X_sample = X.sample(n=n_sample) time_start_predict = time.time() model_base.predict_proba(X_sample) time_predict_frac = time.time() - time_start_predict time_predict_estimate = time_predict_frac / frac logger.log(15, f'\t{round(time_predict_estimate, 2)}s\t= Estimated out-of-fold prediction time...') if time_predict_estimate > time_left: logger.warning(f'\tNot enough time to generate out-of-fold predictions for model. Estimated time required was {round(time_predict_estimate, 2)}s compared to {round(time_left, 2)}s of available time.') raise TimeLimitExceeded if use_child_oof: logger.log(15, '\t`use_child_oof` was specified for this model. It will function similarly to a bagged model, but will only fit one child model.') time_start_predict = time.time() if model_base._get_tags().get('valid_oof', False): self._oof_pred_proba = model_base.get_oof_pred_proba(X=X, y=y) else: logger.warning('\tWARNING: `use_child_oof` was specified but child model does not have a dedicated `get_oof_pred_proba` method. This model may have heavily overfit validation scores.') self._oof_pred_proba = model_base.predict_proba(X=X) self._child_oof = True model_base.predict_time = time.time() - time_start_predict model_base.val_score = model_base.score_with_y_pred_proba(y=y, y_pred_proba=self._oof_pred_proba) else: self._oof_pred_proba = model_base.predict_proba(X=X) # TODO: Cheater value, will be overfit to valid set self._oof_pred_model_repeats = np.ones(shape=len(X), dtype=np.uint8) self._n_repeats = 1 self._n_repeats_finished = 1 self._k_per_n_repeat = [1] self._bagged_mode = False model_base.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True) if not self.params.get('save_bag_folds', True): model_base.model = None if self.low_memory: self.save_child(model_base, verbose=False) self.models = [model_base.name] else: self.models = [model_base] self._add_child_times_to_bag(model=model_base) def _fit_folds(self, X, y, model_base, X_pseudo=None, y_pseudo=None, k_fold=None, k_fold_start=0, k_fold_end=None, n_repeats=1, n_repeat_start=0, time_limit=None, sample_weight=None, save_folds=True, groups=None, **kwargs): fold_fitting_strategy = self.params.get('fold_fitting_strategy', SequentialLocalFoldFittingStrategy) # TODO: Preprocess data here instead of repeatedly # FIXME: Raise exception if multiclass/binary and a single val fold contains all instances of a class. (Can happen if custom groups is specified) time_start = time.time() if k_fold_start != 0: cv_splitter = self._cv_splitters[n_repeat_start] else: cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups) if k_fold != cv_splitter.n_splits: k_fold = cv_splitter.n_splits if k_fold_end is None: k_fold_end = k_fold if cv_splitter.n_repeats < n_repeats: # If current cv_splitter doesn't have enough n_repeats for all folds, then create a new one. cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups) fold_fit_args_list, n_repeats_started, n_repeats_finished = self._generate_fold_configs( X=X, y=y, cv_splitter=cv_splitter, k_fold_start=k_fold_start, k_fold_end=k_fold_end, n_repeat_start=n_repeat_start, n_repeat_end=n_repeats, ) fold_fit_args_list = [dict(model_base=model_base, fold_ctx=fold_ctx, kwargs=kwargs) for fold_ctx in fold_fit_args_list] logger.log(20, f'\tFitting {len(fold_fit_args_list)} child models ' f'({fold_fit_args_list[0]["fold_ctx"]["model_name_suffix"]} - {fold_fit_args_list[-1]["fold_ctx"]["model_name_suffix"]})') oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y) models = [] # noinspection PyCallingNonCallable fold_fitting_strategy: AbstractFoldFittingStrategy = fold_fitting_strategy( bagged_ensemble_model=self, X=X, y=y, X_pseudo=X_pseudo, y_pseudo=y_pseudo, sample_weight=sample_weight, time_limit=time_limit, time_start=time_start, models=models, oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats, save_folds=save_folds) for fold_fit_args in fold_fit_args_list: fold_fitting_strategy.schedule_fold_model_fit(**fold_fit_args) fold_fitting_strategy.after_all_folds_scheduled() self.models += models self._bagged_mode = True if self._oof_pred_proba is None: self._oof_pred_proba = oof_pred_proba self._oof_pred_model_repeats = oof_pred_model_repeats else: self._oof_pred_proba += oof_pred_proba self._oof_pred_model_repeats += oof_pred_model_repeats self._cv_splitters += [cv_splitter for _ in range(n_repeats_started)] self._k_per_n_repeat += [k_fold for _ in range(n_repeats_finished)] self._n_repeats = n_repeats if k_fold == k_fold_end: self._k = None self._k_fold_end = 0 self._n_repeats_finished = self._n_repeats else: self._k = k_fold self._k_fold_end = k_fold_end self._n_repeats_finished = self._n_repeats - 1 @staticmethod def _generate_fold_configs(*, X, y, cv_splitter, k_fold_start, k_fold_end, n_repeat_start, n_repeat_end) -> (list, int, int): """ Generates fold configs given a cv_splitter, k_fold start-end and n_repeat start-end. Fold configs are used by inheritors of AbstractFoldFittingStrategy when fitting fold models. Returns a list of fold configs, the number of started repeats, and the number of finished repeats. """ k_fold = cv_splitter.n_splits kfolds = cv_splitter.split(X=X, y=y) fold_start = n_repeat_start * k_fold + k_fold_start fold_end = (n_repeat_end - 1) * k_fold + k_fold_end folds_to_fit = fold_end - fold_start fold_fit_args_list = [] n_repeats_started = 0 n_repeats_finished = 0 for repeat in range(n_repeat_start, n_repeat_end): # For each repeat is_first_set = repeat == n_repeat_start is_last_set = repeat == (n_repeat_end - 1) if (not is_first_set) or (k_fold_start == 0): n_repeats_started += 1 fold_in_set_start = k_fold_start if repeat == n_repeat_start else 0 fold_in_set_end = k_fold_end if is_last_set else k_fold for fold_in_set in range(fold_in_set_start, fold_in_set_end): # For each fold fold = fold_in_set + (repeat * k_fold) fold_ctx = dict( model_name_suffix=f'S{repeat + 1}F{fold_in_set + 1}', # S5F3 = 3rd fold of the 5th repeat set fold=kfolds[fold], is_last_fold=fold == (fold_end - 1), folds_to_fit=folds_to_fit, folds_finished=fold - fold_start, folds_left=fold_end - fold, ) fold_fit_args_list.append(fold_ctx) if fold_in_set_end == k_fold: n_repeats_finished += 1 assert len(fold_fit_args_list) == folds_to_fit, "fold_fit_args_list is not the expected length!" return fold_fit_args_list, n_repeats_started, n_repeats_finished # TODO: Augment to generate OOF after shuffling each column in X (Batching), this is the fastest way. # TODO: Reduce logging clutter during OOF importance calculation (Currently logs separately for each child) # Generates OOF predictions from pre-trained bagged models, assuming X and y are in the same row order as used in .fit(X, y) def compute_feature_importance(self, X, y, features=None, silent=False, time_limit=None, is_oof=False, **kwargs) -> pd.DataFrame: if features is None: # FIXME: use FULL features (children can have different features) features = self.load_child(model=self.models[0]).features if not is_oof: return super().compute_feature_importance(X, y, features=features, time_limit=time_limit, silent=silent, **kwargs) fi_fold_list = [] model_index = 0 num_children = len(self.models) if time_limit is not None: time_limit_per_child = time_limit / num_children else: time_limit_per_child = None if not silent: logging_message = f'Computing feature importance via permutation shuffling for {len(features)} features using out-of-fold (OOF) data aggregated across {num_children} child models...' if time_limit is not None: logging_message = f'{logging_message} Time limit: {time_limit}s...' logger.log(20, logging_message) time_start = time.time() early_stop = False children_completed = 0 log_final_suffix = '' for n_repeat, k in enumerate(self._k_per_n_repeat): if is_oof: if self._child_oof or not self._bagged_mode: raise AssertionError('Model trained with no validation data cannot get feature importances on training data, please specify new test data to compute feature importances (model=%s)' % self.name) kfolds = self._cv_splitters[n_repeat].split(X=X, y=y) cur_kfolds = kfolds[n_repeat * k:(n_repeat + 1) * k] else: cur_kfolds = [(None, list(range(len(X))))] * k for i, fold in enumerate(cur_kfolds): _, test_index = fold model = self.load_child(self.models[model_index + i]) fi_fold = model.compute_feature_importance(X=X.iloc[test_index, :], y=y.iloc[test_index], features=features, time_limit=time_limit_per_child, silent=silent, log_prefix='\t', importance_as_list=True, **kwargs) fi_fold_list.append(fi_fold) children_completed += 1 if time_limit is not None and children_completed != num_children: time_now = time.time() time_left = time_limit - (time_now - time_start) time_child_average = (time_now - time_start) / children_completed if time_left < (time_child_average * 1.1): log_final_suffix = f' (Early stopping due to lack of time...)' early_stop = True break if early_stop: break model_index += k # TODO: DON'T THROW AWAY SAMPLES! USE LARGER N fi_list_dict = dict() for val in fi_fold_list: val = val['importance'].to_dict() # TODO: Don't throw away stddev information of children for key in val: if key not in fi_list_dict: fi_list_dict[key] = [] fi_list_dict[key] += val[key] fi_df = _compute_fi_with_stddev(fi_list_dict) if not silent: logger.log(20, f'\t{round(time.time() - time_start, 2)}s\t= Actual runtime (Completed {children_completed} of {num_children} children){log_final_suffix}') return fi_df def get_features(self): assert self.is_fit(), "The model must be fit before calling the get_features method." return self.load_child(self.models[0]).get_features() def load_child(self, model, verbose=False) -> AbstractModel: if isinstance(model, str): child_path = self.create_contexts(self.path + model + os.path.sep) return self._child_type.load(path=child_path, verbose=verbose) else: return model def save_child(self, model, verbose=False): child = self.load_child(model) child.set_contexts(self.path + child.name + os.path.sep) child.save(verbose=verbose) # TODO: Multiply epochs/n_iterations by some value (such as 1.1) to account for having more training data than bagged models def convert_to_refit_full_template(self): init_args = self.get_params() init_args['hyperparameters']['save_bag_folds'] = True # refit full models must save folds init_args['model_base'] = self.convert_to_refit_full_template_child() init_args['name'] = init_args['name'] + REFIT_FULL_SUFFIX model_full_template = self.__class__(**init_args) return model_full_template def convert_to_refit_full_template_child(self): refit_params_trained = self._get_compressed_params_trained() refit_params = copy.deepcopy(self._get_model_base().get_params()) refit_params['hyperparameters'].update(refit_params_trained) refit_child_template = self._child_type(**refit_params) return refit_child_template def get_params(self): init_args = dict( model_base=self._get_model_base(), random_state=self._random_state, ) init_args.update(super().get_params()) init_args.pop('eval_metric') init_args.pop('problem_type') return init_args def convert_to_template_child(self): return self._get_model_base().convert_to_template() def _get_compressed_params(self, model_params_list=None): if model_params_list is None: model_params_list = [ self.load_child(child).get_trained_params() for child in self.models ] model_params_compressed = dict() for param in model_params_list[0].keys(): model_param_vals = [model_params[param] for model_params in model_params_list] if all(isinstance(val, bool) for val in model_param_vals): counter = Counter(model_param_vals) compressed_val = counter.most_common(1)[0][0] elif all(isinstance(val, int) for val in model_param_vals): compressed_val = round(mean(model_param_vals)) elif all(isinstance(val, float) for val in model_param_vals): compressed_val = mean(model_param_vals) else: try: counter = Counter(model_param_vals) compressed_val = counter.most_common(1)[0][0] except TypeError: compressed_val = model_param_vals[0] model_params_compressed[param] = compressed_val return model_params_compressed def _get_compressed_params_trained(self): model_params_list = [ self.load_child(child).params_trained for child in self.models ] return self._get_compressed_params(model_params_list=model_params_list) def _get_model_base(self): if self.model_base is None: return self.load_model_base() else: return self.model_base def _add_child_times_to_bag(self, model): if self.fit_time is None: self.fit_time = model.fit_time else: self.fit_time += model.fit_time if self.predict_time is None: self.predict_time = model.predict_time else: self.predict_time += model.predict_time @classmethod def load(cls, path: str, reset_paths=True, low_memory=True, load_oof=False, verbose=True): model = super().load(path=path, reset_paths=reset_paths, verbose=verbose) if not low_memory: model.persist_child_models(reset_paths=reset_paths) if load_oof: model._load_oof() return model @classmethod def load_oof(cls, path, verbose=True): try: oof = load_pkl.load(path=path + 'utils' + os.path.sep + cls._oof_filename, verbose=verbose) oof_pred_proba = oof['_oof_pred_proba'] oof_pred_model_repeats = oof['_oof_pred_model_repeats'] except FileNotFoundError: model = cls.load(path=path, reset_paths=True, verbose=verbose) model._load_oof() oof_pred_proba = model._oof_pred_proba oof_pred_model_repeats = model._oof_pred_model_repeats return cls._oof_pred_proba_func(oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats) def _load_oof(self): if self._oof_pred_proba is not None: pass else: oof = load_pkl.load(path=self.path + 'utils' + os.path.sep + self._oof_filename) self._oof_pred_proba = oof['_oof_pred_proba'] self._oof_pred_model_repeats = oof['_oof_pred_model_repeats'] def persist_child_models(self, reset_paths=True): for i, model_name in enumerate(self.models): if isinstance(model_name, str): child_path = self.create_contexts(self.path + model_name + os.path.sep) child_model = self._child_type.load(path=child_path, reset_paths=reset_paths, verbose=True) self.models[i] = child_model def load_model_base(self): return load_pkl.load(path=self.path + 'utils' + os.path.sep + 'model_template.pkl') def save_model_base(self, model_base): save_pkl.save(path=self.path + 'utils' + os.path.sep + 'model_template.pkl', object=model_base) def save(self, path=None, verbose=True, save_oof=True, save_children=False) -> str: if path is None: path = self.path if save_children: model_names = [] for child in self.models: child = self.load_child(child) child.set_contexts(path + child.name + os.path.sep) child.save(verbose=False) model_names.append(child.name) self.models = model_names if save_oof and self._oof_pred_proba is not None: save_pkl.save(path=path + 'utils' + os.path.sep + self._oof_filename, object={ '_oof_pred_proba': self._oof_pred_proba, '_oof_pred_model_repeats': self._oof_pred_model_repeats, }) self._oof_pred_proba = None self._oof_pred_model_repeats = None return super().save(path=path, verbose=verbose) # If `remove_fit_stack=True`, variables will be removed that are required to fit more folds and to fit new stacker models which use this model as a base model. # This includes OOF variables. def reduce_memory_size(self, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs): super().reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs) if remove_fit_stack: try: os.remove(self.path + 'utils' + os.path.sep + self._oof_filename) except FileNotFoundError: pass if requires_save: self._oof_pred_proba = None self._oof_pred_model_repeats = None try: os.remove(self.path + 'utils' + os.path.sep + 'model_template.pkl') except FileNotFoundError: pass if requires_save: self.model_base = None try: os.rmdir(self.path + 'utils') except OSError: pass if reduce_children: for model in self.models: model = self.load_child(model) model.reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs) if requires_save and self.low_memory: self.save_child(model=model) def _get_model_names(self): model_names = [] for model in self.models: if isinstance(model, str): model_names.append(model) else: model_names.append(model.name) return model_names def get_info(self): info = super().get_info() children_info = self._get_child_info() child_memory_sizes = [child['memory_size'] for child in children_info.values()] sum_memory_size_child = sum(child_memory_sizes) if child_memory_sizes: max_memory_size_child = max(child_memory_sizes) else: max_memory_size_child = 0 if self.low_memory: max_memory_size = info['memory_size'] + sum_memory_size_child min_memory_size = info['memory_size'] + max_memory_size_child else: max_memory_size = info['memory_size'] min_memory_size = info['memory_size'] - sum_memory_size_child + max_memory_size_child # Necessary if save_space is used as save_space deletes model_base. if len(self.models) > 0: child_model = self.load_child(self.models[0]) else: child_model = self._get_model_base() child_hyperparameters = child_model.params child_ag_args_fit = child_model.params_aux bagged_info = dict( child_model_type=self._child_type.__name__, num_child_models=len(self.models), child_model_names=self._get_model_names(), _n_repeats=self._n_repeats, # _n_repeats_finished=self._n_repeats_finished, # commented out because these are too technical # _k_fold_end=self._k_fold_end, # _k=self._k, _k_per_n_repeat=self._k_per_n_repeat, _random_state=self._random_state, low_memory=self.low_memory, # If True, then model will attempt to use at most min_memory_size memory by having at most one child in memory. If False, model will use max_memory_size memory. bagged_mode=self._bagged_mode, max_memory_size=max_memory_size, # Memory used when all children are loaded into memory at once. min_memory_size=min_memory_size, # Memory used when only the largest child is loaded into memory. child_hyperparameters=child_hyperparameters, child_hyperparameters_fit=self._get_compressed_params_trained(), child_ag_args_fit=child_ag_args_fit, ) info['bagged_info'] = bagged_info info['children_info'] = children_info child_features_full = list(set().union(*[child['features'] for child in children_info.values()])) info['features'] = child_features_full info['num_features'] = len(child_features_full) return info def get_memory_size(self): models = self.models self.models = None memory_size = super().get_memory_size() self.models = models return memory_size def _get_child_info(self): child_info_dict = dict() for model in self.models: if isinstance(model, str): child_path = self.create_contexts(self.path + model + os.path.sep) child_info_dict[model] = self._child_type.load_info(child_path) else: child_info_dict[model.name] = model.get_info() return child_info_dict def _construct_empty_oof(self, X, y): if self.problem_type == MULTICLASS: oof_pred_proba = np.zeros(shape=(len(X), len(y.unique())), dtype=np.float32) elif self.problem_type == SOFTCLASS: oof_pred_proba = np.zeros(shape=y.shape, dtype=np.float32) elif self.problem_type == QUANTILE: oof_pred_proba = np.zeros(shape=(len(X), len(self.quantile_levels)), dtype=np.float32) else: oof_pred_proba = np.zeros(shape=len(X), dtype=np.float32) oof_pred_model_repeats = np.zeros(shape=len(X), dtype=np.uint8) return oof_pred_proba, oof_pred_model_repeats def _preprocess_fit_resources(self, silent=False, **kwargs): """Pass along to child models to avoid altering up-front""" return kwargs # TODO: Currently double disk usage, saving model in HPO and also saving model in bag # FIXME: with use_bag_holdout=True, the fold-1 scores that are logged are of the inner validation score, not the holdout score. # Fix this by passing X_val, y_val into this method def _hyperparameter_tune(self, X, y, k_fold, scheduler_options, preprocess_kwargs=None, groups=None, **kwargs): if len(self.models) != 0: raise ValueError('self.models must be empty to call hyperparameter_tune, value: %s' % self.models) kwargs['feature_metadata'] = self.feature_metadata kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children self.model_base.set_contexts(self.path + 'hpo' + os.path.sep) # TODO: Preprocess data here instead of repeatedly if preprocess_kwargs is None: preprocess_kwargs = dict() use_child_oof = self.params.get('use_child_oof', False) X = self.preprocess(X=X, preprocess=False, fit=True, **preprocess_kwargs) if use_child_oof: k_fold = 1 X_fold = X y_fold = y X_val_fold = None y_val_fold = None train_index = list(range(len(X))) test_index = train_index cv_splitter = None else: cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=1, groups=groups) if k_fold != cv_splitter.n_splits: k_fold = cv_splitter.n_splits kfolds = cv_splitter.split(X=X, y=y) train_index, test_index = kfolds[0] X_fold, X_val_fold = X.iloc[train_index, :], X.iloc[test_index, :] y_fold, y_val_fold = y.iloc[train_index], y.iloc[test_index] orig_time = scheduler_options[1]['time_out'] if orig_time: scheduler_options[1]['time_out'] = orig_time * 0.8 # TODO: Scheduler doesn't early stop on final model, this is a safety net. Scheduler should be updated to early stop hpo_models, hpo_model_performances, hpo_results = self.model_base.hyperparameter_tune(X=X_fold, y=y_fold, X_val=X_val_fold, y_val=y_val_fold, scheduler_options=scheduler_options, **kwargs) scheduler_options[1]['time_out'] = orig_time bags = {} bags_performance = {} for i, (model_name, model_path) in enumerate(hpo_models.items()): child: AbstractModel = self._child_type.load(path=model_path) # TODO: Create new Ensemble Here bag = copy.deepcopy(self) bag.rename(f"{bag.name}{os.path.sep}T{i}") bag.set_contexts(self.path_root + bag.name + os.path.sep) oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y) if child._get_tags().get('valid_oof', False): y_pred_proba = child.get_oof_pred_proba(X=X, y=y) bag._n_repeats_finished = 1 bag._k_per_n_repeat = [1] bag._bagged_mode = False bag._child_oof = True # TODO: Consider a separate tag for refit_folds vs efficient OOF else: y_pred_proba = child.predict_proba(X_val_fold) oof_pred_proba[test_index] += y_pred_proba oof_pred_model_repeats[test_index] += 1 bag.model_base = None child.rename('') child.set_contexts(bag.path + child.name + os.path.sep) bag.save_model_base(child.convert_to_template()) bag._k = k_fold bag._k_fold_end = 1 bag._n_repeats = 1 bag._oof_pred_proba = oof_pred_proba bag._oof_pred_model_repeats = oof_pred_model_repeats child.rename('S1F1') child.set_contexts(bag.path + child.name + os.path.sep) if not self.params.get('save_bag_folds', True): child.model = None if bag.low_memory: bag.save_child(child, verbose=False) bag.models.append(child.name) else: bag.models.append(child) bag.val_score = child.val_score bag._add_child_times_to_bag(model=child) if cv_splitter is not None: bag._cv_splitters = [cv_splitter] bag.save() bags[bag.name] = bag.path bags_performance[bag.name] = bag.val_score # TODO: hpo_results likely not correct because no renames return bags, bags_performance, hpo_results def _more_tags(self): return {'valid_oof': True}
the-stack_0_14197
""" Account (OOC) commands. These are stored on the Account object and self.caller is thus always an Account, not an Object/Character. These commands go in the AccountCmdset and are accessible also when puppeting a Character (although with lower priority) These commands use the account_caller property which tells the command parent (MuxCommand, usually) to setup caller correctly. They use self.account to make sure to always use the account object rather than self.caller (which change depending on the level you are calling from) The property self.character can be used to access the character when these commands are triggered with a connected character (such as the case of the `ooc` command), it is None if we are OOC. Note that under MULTISESSION_MODE > 2, Account commands should use self.msg() and similar methods to reroute returns to the correct method. Otherwise all text will be returned to all connected sessions. """ from builtins import range import time from codecs import lookup as codecs_lookup from django.conf import settings from evennia.server.sessionhandler import SESSIONS from evennia.utils import utils, create, logger, search COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS) _MAX_NR_CHARACTERS = settings.MAX_NR_CHARACTERS _MULTISESSION_MODE = settings.MULTISESSION_MODE # limit symbol import for API __all__ = ("CmdOOCLook", "CmdIC", "CmdOOC", "CmdPassword", "CmdQuit", "CmdCharCreate", "CmdOption", "CmdSessions", "CmdWho", "CmdColorTest", "CmdQuell") class MuxAccountLookCommand(COMMAND_DEFAULT_CLASS): """ Custom parent (only) parsing for OOC looking, sets a "playable" property on the command based on the parsing. """ def parse(self): """Custom parsing""" super().parse() if _MULTISESSION_MODE < 2: # only one character allowed - not used in this mode self.playable = None return playable = self.account.db._playable_characters if playable is not None: # clean up list if character object was deleted in between if None in playable: playable = [character for character in playable if character] self.account.db._playable_characters = playable # store playable property if self.args: self.playable = dict((utils.to_str(char.key.lower()), char) for char in playable).get(self.args.lower(), None) else: self.playable = playable # Obs - these are all intended to be stored on the Account, and as such, # use self.account instead of self.caller, just to be sure. Also self.msg() # is used to make sure returns go to the right session # note that this is inheriting from MuxAccountLookCommand, # and has the .playable property. class CmdOOCLook(MuxAccountLookCommand): """ look while out-of-character Usage: look Look in the ooc state. """ # This is an OOC version of the look command. Since a # Account doesn't have an in-game existence, there is no # concept of location or "self". If we are controlling # a character, pass control over to normal look. key = "look" aliases = ["l", "ls"] locks = "cmd:all()" help_category = "General" # this is used by the parent account_caller = True def func(self): """implement the ooc look command""" if _MULTISESSION_MODE < 2: # only one character allowed self.msg("You are out-of-character (OOC).\nUse |wic|n to get back into the game.") return # call on-account look helper method self.msg(self.account.at_look(target=self.playable, session=self.session)) class CmdCharCreate(COMMAND_DEFAULT_CLASS): """ create a new character Usage: charcreate <charname> [= desc] Create a new character, optionally giving it a description. You may use upper-case letters in the name - you will nevertheless always be able to access your character using lower-case letters if you want. """ key = "charcreate" locks = "cmd:pperm(Player)" help_category = "General" # this is used by the parent account_caller = True def func(self): """create the new character""" account = self.account if not self.args: self.msg("Usage: charcreate <charname> [= description]") return key = self.lhs desc = self.rhs charmax = _MAX_NR_CHARACTERS if not account.is_superuser and \ (account.db._playable_characters and len(account.db._playable_characters) >= charmax): self.msg("You may only create a maximum of %i characters." % charmax) return from evennia.objects.models import ObjectDB typeclass = settings.BASE_CHARACTER_TYPECLASS if ObjectDB.objects.filter(db_typeclass_path=typeclass, db_key__iexact=key): # check if this Character already exists. Note that we are only # searching the base character typeclass here, not any child # classes. self.msg("|rA character named '|w%s|r' already exists.|n" % key) return # create the character start_location = ObjectDB.objects.get_id(settings.START_LOCATION) default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME) permissions = settings.PERMISSION_ACCOUNT_DEFAULT new_character = create.create_object(typeclass, key=key, location=start_location, home=default_home, permissions=permissions) # only allow creator (and developers) to puppet this char new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Developer) or pperm(Developer);delete:id(%i) or perm(Admin)" % (new_character.id, account.id, account.id)) account.db._playable_characters.append(new_character) if desc: new_character.db.desc = desc elif not new_character.db.desc: new_character.db.desc = "This is a character." self.msg("Created new character %s. Use |wic %s|n to enter the game as this character." % (new_character.key, new_character.key)) logger.log_sec('Character Created: %s (Caller: %s, IP: %s).' % (new_character, account, self.session.address)) class CmdCharDelete(COMMAND_DEFAULT_CLASS): """ delete a character - this cannot be undone! Usage: chardelete <charname> Permanently deletes one of your characters. """ key = "chardelete" locks = "cmd:pperm(Player)" help_category = "General" def func(self): """delete the character""" account = self.account if not self.args: self.msg("Usage: chardelete <charactername>") return # use the playable_characters list to search match = [char for char in utils.make_iter(account.db._playable_characters) if char.key.lower() == self.args.lower()] if not match: self.msg("You have no such character to delete.") return elif len(match) > 1: self.msg("Aborting - there are two characters with the same name. Ask an admin to delete the right one.") return else: # one match from evennia.utils.evmenu import get_input def _callback(caller, callback_prompt, result): if result.lower() == "yes": # only take action delobj = caller.ndb._char_to_delete key = delobj.key caller.db._playable_characters = [pc for pc in caller.db._playable_characters if pc != delobj] delobj.delete() self.msg("Character '%s' was permanently deleted." % key) logger.log_sec('Character Deleted: %s (Caller: %s, IP: %s).' % (key, account, self.session.address)) else: self.msg("Deletion was aborted.") del caller.ndb._char_to_delete match = match[0] account.ndb._char_to_delete = match # Return if caller has no permission to delete this if not match.access(account, 'delete'): self.msg("You do not have permission to delete this character.") return prompt = "|rThis will permanently destroy '%s'. This cannot be undone.|n Continue yes/[no]?" get_input(account, prompt % match.key, _callback) class CmdIC(COMMAND_DEFAULT_CLASS): """ control an object you have permission to puppet Usage: ic <character> Go in-character (IC) as a given Character. This will attempt to "become" a different object assuming you have the right to do so. Note that it's the ACCOUNT character that puppets characters/objects and which needs to have the correct permission! You cannot become an object that is already controlled by another account. In principle <character> can be any in-game object as long as you the account have access right to puppet it. """ key = "ic" # lock must be all() for different puppeted objects to access it. locks = "cmd:all()" aliases = "puppet" help_category = "General" # this is used by the parent account_caller = True def func(self): """ Main puppet method """ account = self.account session = self.session new_character = None if not self.args: new_character = account.db._last_puppet if not new_character: self.msg("Usage: ic <character>") return if not new_character: # search for a matching character new_character = [char for char in search.object_search(self.args) if char.access(account, "puppet")] if not new_character: self.msg("That is not a valid character choice.") return if len(new_character) > 1: self.msg("Multiple targets with the same name:\n %s" % ", ".join("%s(#%s)" % (obj.key, obj.id) for obj in new_character)) return else: new_character = new_character[0] try: account.puppet_object(session, new_character) account.db._last_puppet = new_character logger.log_sec('Puppet Success: (Caller: %s, Target: %s, IP: %s).' % (account, new_character, self.session.address)) except RuntimeError as exc: self.msg("|rYou cannot become |C%s|n: %s" % (new_character.name, exc)) logger.log_sec('Puppet Failed: %s (Caller: %s, Target: %s, IP: %s).' % (exc, account, new_character, self.session.address)) # note that this is inheriting from MuxAccountLookCommand, # and as such has the .playable property. class CmdOOC(MuxAccountLookCommand): """ stop puppeting and go ooc Usage: ooc Go out-of-character (OOC). This will leave your current character and put you in a incorporeal OOC state. """ key = "ooc" locks = "cmd:pperm(Player)" aliases = "unpuppet" help_category = "General" # this is used by the parent account_caller = True def func(self): """Implement function""" account = self.account session = self.session old_char = account.get_puppet(session) if not old_char: string = "You are already OOC." self.msg(string) return account.db._last_puppet = old_char # disconnect try: account.unpuppet_object(session) self.msg("\n|GYou go OOC.|n\n") if _MULTISESSION_MODE < 2: # only one character allowed self.msg("You are out-of-character (OOC).\nUse |wic|n to get back into the game.") return self.msg(account.at_look(target=self.playable, session=session)) except RuntimeError as exc: self.msg("|rCould not unpuppet from |c%s|n: %s" % (old_char, exc)) class CmdSessions(COMMAND_DEFAULT_CLASS): """ check your connected session(s) Usage: sessions Lists the sessions currently connected to your account. """ key = "sessions" locks = "cmd:all()" help_category = "General" # this is used by the parent account_caller = True def func(self): """Implement function""" account = self.account sessions = account.sessions.all() table = self.styled_table("|wsessid", "|wprotocol", "|whost", "|wpuppet/character", "|wlocation") for sess in sorted(sessions, key=lambda x: x.sessid): char = account.get_puppet(sess) table.add_row(str(sess.sessid), str(sess.protocol_key), isinstance(sess.address, tuple) and sess.address[0] or sess.address, char and str(char) or "None", char and str(char.location) or "N/A") self.msg("|wYour current session(s):|n\n%s" % table) class CmdWho(COMMAND_DEFAULT_CLASS): """ list who is currently online Usage: who doing Shows who is currently online. Doing is an alias that limits info also for those with all permissions. """ key = "who" aliases = "doing" locks = "cmd:all()" # this is used by the parent account_caller = True def func(self): """ Get all connected accounts by polling session. """ account = self.account session_list = SESSIONS.get_sessions() session_list = sorted(session_list, key=lambda o: o.account.key) if self.cmdstring == "doing": show_session_data = False else: show_session_data = account.check_permstring("Developer") or account.check_permstring("Admins") naccounts = SESSIONS.account_count() if show_session_data: # privileged info table = self.styled_table("|wAccount Name", "|wOn for", "|wIdle", "|wPuppeting", "|wRoom", "|wCmds", "|wProtocol", "|wHost") for session in session_list: if not session.logged_in: continue delta_cmd = time.time() - session.cmd_last_visible delta_conn = time.time() - session.conn_time account = session.get_account() puppet = session.get_puppet() location = puppet.location.key if puppet and puppet.location else "None" table.add_row(utils.crop(account.get_display_name(account), width=25), utils.time_format(delta_conn, 0), utils.time_format(delta_cmd, 1), utils.crop(puppet.get_display_name(account) if puppet else "None", width=25), utils.crop(location, width=25), session.cmd_total, session.protocol_key, isinstance(session.address, tuple) and session.address[0] or session.address) else: # unprivileged table = self.styled_table("|wAccount name", "|wOn for", "|wIdle") for session in session_list: if not session.logged_in: continue delta_cmd = time.time() - session.cmd_last_visible delta_conn = time.time() - session.conn_time account = session.get_account() table.add_row(utils.crop(account.get_display_name(account), width=25), utils.time_format(delta_conn, 0), utils.time_format(delta_cmd, 1)) is_one = naccounts == 1 self.msg("|wAccounts:|n\n%s\n%s unique account%s logged in." % (table, "One" if is_one else naccounts, "" if is_one else "s")) class CmdOption(COMMAND_DEFAULT_CLASS): """ Set an account option Usage: option[/save] [name = value] Switches: save - Save the current option settings for future logins. clear - Clear the saved options. This command allows for viewing and setting client interface settings. Note that saved options may not be able to be used if later connecting with a client with different capabilities. """ key = "option" aliases = "options" switch_options = ("save", "clear") locks = "cmd:all()" # this is used by the parent account_caller = True def func(self): """ Implements the command """ if self.session is None: return flags = self.session.protocol_flags # Display current options if not self.args: # list the option settings if "save" in self.switches: # save all options self.caller.db._saved_protocol_flags = flags self.msg("|gSaved all options. Use option/clear to remove.|n") if "clear" in self.switches: # clear all saves self.caller.db._saved_protocol_flags = {} self.msg("|gCleared all saved options.") options = dict(flags) # make a copy of the flag dict saved_options = dict(self.caller.attributes.get("_saved_protocol_flags", default={})) if "SCREENWIDTH" in options: if len(options["SCREENWIDTH"]) == 1: options["SCREENWIDTH"] = options["SCREENWIDTH"][0] else: options["SCREENWIDTH"] = " \n".join("%s : %s" % (screenid, size) for screenid, size in options["SCREENWIDTH"].items()) if "SCREENHEIGHT" in options: if len(options["SCREENHEIGHT"]) == 1: options["SCREENHEIGHT"] = options["SCREENHEIGHT"][0] else: options["SCREENHEIGHT"] = " \n".join("%s : %s" % (screenid, size) for screenid, size in options["SCREENHEIGHT"].items()) options.pop("TTYPE", None) header = ("Name", "Value", "Saved") if saved_options else ("Name", "Value") table = self.styled_table(*header) for key in sorted(options): row = [key, options[key]] if saved_options: saved = " |YYes|n" if key in saved_options else "" changed = "|y*|n" if key in saved_options and flags[key] != saved_options[key] else "" row.append("%s%s" % (saved, changed)) table.add_row(*row) self.msg("|wClient settings (%s):|n\n%s|n" % (self.session.protocol_key, table)) return if not self.rhs: self.msg("Usage: option [name = [value]]") return # Try to assign new values def validate_encoding(new_encoding): # helper: change encoding try: codecs_lookup(new_encoding) except LookupError: raise RuntimeError("The encoding '|w%s|n' is invalid. " % new_encoding) return val def validate_size(new_size): return {0: int(new_size)} def validate_bool(new_bool): return True if new_bool.lower() in ("true", "on", "1") else False def update(new_name, new_val, validator): # helper: update property and report errors try: old_val = flags.get(new_name, False) new_val = validator(new_val) if old_val == new_val: self.msg("Option |w%s|n was kept as '|w%s|n'." % (new_name, old_val)) else: flags[new_name] = new_val self.msg("Option |w%s|n was changed from '|w%s|n' to '|w%s|n'." % (new_name, old_val, new_val)) return {new_name: new_val} except Exception as err: self.msg("|rCould not set option |w%s|r:|n %s" % (new_name, err)) return False validators = {"ANSI": validate_bool, "CLIENTNAME": utils.to_str, "ENCODING": validate_encoding, "MCCP": validate_bool, "NOGOAHEAD": validate_bool, "MXP": validate_bool, "NOCOLOR": validate_bool, "NOPKEEPALIVE": validate_bool, "OOB": validate_bool, "RAW": validate_bool, "SCREENHEIGHT": validate_size, "SCREENWIDTH": validate_size, "SCREENREADER": validate_bool, "TERM": utils.to_str, "UTF-8": validate_bool, "XTERM256": validate_bool, "INPUTDEBUG": validate_bool, "FORCEDENDLINE": validate_bool} name = self.lhs.upper() val = self.rhs.strip() optiondict = False if val and name in validators: optiondict = update(name, val, validators[name]) else: self.msg("|rNo option named '|w%s|r'." % name) if optiondict: # a valid setting if "save" in self.switches: # save this option only saved_options = self.account.attributes.get("_saved_protocol_flags", default={}) saved_options.update(optiondict) self.account.attributes.add("_saved_protocol_flags", saved_options) for key in optiondict: self.msg("|gSaved option %s.|n" % key) if "clear" in self.switches: # clear this save for key in optiondict: self.account.attributes.get("_saved_protocol_flags", {}).pop(key, None) self.msg("|gCleared saved %s." % key) self.session.update_flags(**optiondict) class CmdPassword(COMMAND_DEFAULT_CLASS): """ change your password Usage: password <old password> = <new password> Changes your password. Make sure to pick a safe one. """ key = "password" locks = "cmd:pperm(Player)" # this is used by the parent account_caller = True def func(self): """hook function.""" account = self.account if not self.rhs: self.msg("Usage: password <oldpass> = <newpass>") return oldpass = self.lhslist[0] # Both of these are newpass = self.rhslist[0] # already stripped by parse() # Validate password validated, error = account.validate_password(newpass) if not account.check_password(oldpass): self.msg("The specified old password isn't correct.") elif not validated: errors = [e for suberror in error.messages for e in error.messages] string = "\n".join(errors) self.msg(string) else: account.set_password(newpass) account.save() self.msg("Password changed.") logger.log_sec('Password Changed: %s (Caller: %s, IP: %s).' % (account, account, self.session.address)) class CmdQuit(COMMAND_DEFAULT_CLASS): """ quit the game Usage: quit Switch: all - disconnect all connected sessions Gracefully disconnect your current session from the game. Use the /all switch to disconnect from all sessions. """ key = "quit" switch_options = ("all",) locks = "cmd:all()" # this is used by the parent account_caller = True def func(self): """hook function""" account = self.account if 'all' in self.switches: account.msg("|RQuitting|n all sessions. Hope to see you soon again.", session=self.session) reason = "quit/all" for session in account.sessions.all(): account.disconnect_session_from_account(session, reason) else: nsess = len(account.sessions.all()) reason = "quit" if nsess == 2: account.msg("|RQuitting|n. One session is still connected.", session=self.session) elif nsess > 2: account.msg("|RQuitting|n. %i sessions are still connected." % (nsess - 1), session=self.session) else: # we are quitting the last available session account.msg("|RQuitting|n. Hope to see you again, soon.", session=self.session) account.disconnect_session_from_account(self.session, reason) class CmdColorTest(COMMAND_DEFAULT_CLASS): """ testing which colors your client support Usage: color ansi||xterm256 Prints a color map along with in-mud color codes to use to produce them. It also tests what is supported in your client. Choices are 16-color ansi (supported in most muds) or the 256-color xterm256 standard. No checking is done to determine your client supports color - if not you will see rubbish appear. """ key = "color" locks = "cmd:all()" help_category = "General" # this is used by the parent account_caller = True # the slices of the ANSI_PARSER lists to use for retrieving the # relevant color tags to display. Replace if using another schema. # This command can only show one set of markup. slice_bright_fg = slice(7, 15) # from ANSI_PARSER.ansi_map slice_dark_fg = slice(15, 23) # from ANSI_PARSER.ansi_map slice_dark_bg = slice(-8, None) # from ANSI_PARSER.ansi_map slice_bright_bg = slice(None, None) # from ANSI_PARSER.ansi_xterm256_bright_bg_map def table_format(self, table): """ Helper method to format the ansi/xterm256 tables. Takes a table of columns [[val,val,...],[val,val,...],...] """ if not table: return [[]] extra_space = 1 max_widths = [max([len(str(val)) for val in col]) for col in table] ftable = [] for irow in range(len(table[0])): ftable.append([str(col[irow]).ljust(max_widths[icol]) + " " * extra_space for icol, col in enumerate(table)]) return ftable def func(self): """Show color tables""" if self.args.startswith("a"): # show ansi 16-color table from evennia.utils import ansi ap = ansi.ANSI_PARSER # ansi colors # show all ansi color-related codes bright_fg = ["%s%s|n" % (code, code.replace("|", "||")) for code, _ in ap.ansi_map[self.slice_bright_fg]] dark_fg = ["%s%s|n" % (code, code.replace("|", "||")) for code, _ in ap.ansi_map[self.slice_dark_fg]] dark_bg = ["%s%s|n" % (code.replace("\\", ""), code.replace("|", "||").replace("\\", "")) for code, _ in ap.ansi_map[self.slice_dark_bg]] bright_bg = ["%s%s|n" % (code.replace("\\", ""), code.replace("|", "||").replace("\\", "")) for code, _ in ap.ansi_xterm256_bright_bg_map[self.slice_bright_bg]] dark_fg.extend(["" for _ in range(len(bright_fg) - len(dark_fg))]) table = utils.format_table([bright_fg, dark_fg, bright_bg, dark_bg]) string = "ANSI colors:" for row in table: string += "\n " + " ".join(row) self.msg(string) self.msg("||X : black. ||/ : return, ||- : tab, ||_ : space, ||* : invert, ||u : underline\n" "To combine background and foreground, add background marker last, e.g. ||r||[B.\n" "Note: bright backgrounds like ||[r requires your client handling Xterm256 colors.") elif self.args.startswith("x"): # show xterm256 table table = [[], [], [], [], [], [], [], [], [], [], [], []] for ir in range(6): for ig in range(6): for ib in range(6): # foreground table table[ir].append("|%i%i%i%s|n" % (ir, ig, ib, "||%i%i%i" % (ir, ig, ib))) # background table table[6 + ir].append("|%i%i%i|[%i%i%i%s|n" % (5 - ir, 5 - ig, 5 - ib, ir, ig, ib, "||[%i%i%i" % (ir, ig, ib))) table = self.table_format(table) string = "Xterm256 colors (if not all hues show, your client might not report that it can handle xterm256):" string += "\n" + "\n".join("".join(row) for row in table) table = [[], [], [], [], [], [], [], [], [], [], [], []] for ibatch in range(4): for igray in range(6): letter = chr(97 + (ibatch * 6 + igray)) inverse = chr(122 - (ibatch * 6 + igray)) table[0 + igray].append("|=%s%s |n" % (letter, "||=%s" % letter)) table[6 + igray].append("|=%s|[=%s%s |n" % (inverse, letter, "||[=%s" % letter)) for igray in range(6): # the last row (y, z) has empty columns if igray < 2: letter = chr(121 + igray) inverse = chr(98 - igray) fg = "|=%s%s |n" % (letter, "||=%s" % letter) bg = "|=%s|[=%s%s |n" % (inverse, letter, "||[=%s" % letter) else: fg, bg = " ", " " table[0 + igray].append(fg) table[6 + igray].append(bg) table = self.table_format(table) string += "\n" + "\n".join("".join(row) for row in table) self.msg(string) else: # malformed input self.msg("Usage: color ansi||xterm256") class CmdQuell(COMMAND_DEFAULT_CLASS): """ use character's permissions instead of account's Usage: quell unquell Normally the permission level of the Account is used when puppeting a Character/Object to determine access. This command will switch the lock system to make use of the puppeted Object's permissions instead. This is useful mainly for testing. Hierarchical permission quelling only work downwards, thus an Account cannot use a higher-permission Character to escalate their permission level. Use the unquell command to revert back to normal operation. """ key = "quell" aliases = ["unquell"] locks = "cmd:pperm(Player)" help_category = "General" # this is used by the parent account_caller = True def _recache_locks(self, account): """Helper method to reset the lockhandler on an already puppeted object""" if self.session: char = self.session.puppet if char: # we are already puppeting an object. We need to reset # the lock caches (otherwise the superuser status change # won't be visible until repuppet) char.locks.reset() account.locks.reset() def func(self): """Perform the command""" account = self.account permstr = account.is_superuser and " (superuser)" or "(%s)" % (", ".join(account.permissions.all())) if self.cmdstring in ('unquell', 'unquell'): if not account.attributes.get('_quell'): self.msg("Already using normal Account permissions %s." % permstr) else: account.attributes.remove('_quell') self.msg("Account permissions %s restored." % permstr) else: if account.attributes.get('_quell'): self.msg("Already quelling Account %s permissions." % permstr) return account.attributes.add('_quell', True) puppet = self.session.puppet if puppet: cpermstr = "(%s)" % ", ".join(puppet.permissions.all()) cpermstr = "Quelling to current puppet's permissions %s." % cpermstr cpermstr += "\n(Note: If this is higher than Account permissions %s," \ " the lowest of the two will be used.)" % permstr cpermstr += "\nUse unquell to return to normal permission usage." self.msg(cpermstr) else: self.msg("Quelling Account permissions%s. Use unquell to get them back." % permstr) self._recache_locks(account) class CmdStyle(COMMAND_DEFAULT_CLASS): """ In-game style options Usage: style style <option> = <value> Configure stylings for in-game display elements like table borders, help entriest etc. Use without arguments to see all available options. """ key = "style" switch_options = ['clear'] def func(self): if not self.args: self.list_styles() return self.set() def list_styles(self): table = self.styled_table('Option', 'Description', 'Type', 'Value', width=78) for op_key in self.account.options.options_dict.keys(): op_found = self.account.options.get(op_key, return_obj=True) table.add_row(op_key, op_found.description, op_found.__class__.__name__, op_found.display()) self.msg(str(table)) def set(self): try: result = self.account.options.set(self.lhs, self.rhs) except ValueError as e: self.msg(str(e)) return self.msg('Style %s set to %s' % (self.lhs, result))
the-stack_0_14200
import mxnet as mx import numpy as np import cv2 from tools.rand_sampler import RandSampler class DetIter(mx.io.DataIter): """ Detection Iterator, which will feed data and label to network Optional data augmentation is performed when providing batch Parameters: ---------- imdb : Imdb image database batch_size : int batch size data_shape : int or (int, int) image shape to be resized mean_pixels : float or float list [R, G, B], mean pixel values rand_samplers : list random cropping sampler list, if not specified, will use original image only rand_mirror : bool whether to randomly mirror input images, default False shuffle : bool whether to shuffle initial image list, default False rand_seed : int or None whether to use fixed random seed, default None max_crop_trial : bool if random crop is enabled, defines the maximum trial time if trial exceed this number, will give up cropping is_train : bool whether in training phase, default True, if False, labels might be ignored """ def __init__(self, imdb, batch_size, data_shape, \ mean_pixels=[128, 128, 128], rand_samplers=[], \ rand_mirror=False, shuffle=False, rand_seed=None, \ is_train=True, max_crop_trial=50): super(DetIter, self).__init__() self._imdb = imdb self.batch_size = batch_size if isinstance(data_shape, int): data_shape = (data_shape, data_shape) self._data_shape = data_shape self._mean_pixels = mx.nd.array(mean_pixels).reshape((3,1,1)) if not rand_samplers: self._rand_samplers = [] else: if not isinstance(rand_samplers, list): rand_samplers = [rand_samplers] assert isinstance(rand_samplers[0], RandSampler), "Invalid rand sampler" self._rand_samplers = rand_samplers self.is_train = is_train self._rand_mirror = rand_mirror self._shuffle = shuffle if rand_seed: np.random.seed(rand_seed) # fix random seed self._max_crop_trial = max_crop_trial self._current = 0 self._size = imdb.num_images self._index = np.arange(self._size) self._data = None self._label = None self._get_batch() @property def provide_data(self): return [(k, v.shape) for k, v in self._data.items()] @property def provide_label(self): if self.is_train: return [(k, v.shape) for k, v in self._label.items()] else: return [] def reset(self): self._current = 0 if self._shuffle: np.random.shuffle(self._index) def iter_next(self): return self._current < self._size def next(self): if self.iter_next(): self._get_batch() data_batch = mx.io.DataBatch(data=self._data.values(), label=self._label.values(), pad=self.getpad(), index=self.getindex()) self._current += self.batch_size return data_batch else: raise StopIteration def getindex(self): return self._current // self.batch_size def getpad(self): pad = self._current + self.batch_size - self._size return 0 if pad < 0 else pad def _get_batch(self): """ Load data/label from dataset """ batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1])) batch_label = [] for i in range(self.batch_size): if (self._current + i) >= self._size: if not self.is_train: continue # use padding from middle in each epoch idx = (self._current + i + self._size // 2) % self._size index = self._index[idx] else: index = self._index[self._current + i] # index = self.debug_index im_path = self._imdb.image_path_from_index(index) with open(im_path, 'rb') as fp: img_content = fp.read() img = mx.img.imdecode(img_content) gt = self._imdb.label_from_index(index).copy() if self.is_train else None data, label = self._data_augmentation(img, gt) batch_data[i] = data if self.is_train: batch_label.append(label) self._data = {'data': batch_data} if self.is_train: self._label = {'label': mx.nd.array(np.array(batch_label))} else: self._label = {'label': None} def _data_augmentation(self, data, label): """ perform data augmentations: crop, mirror, resize, sub mean, swap channels... """ if self.is_train and self._rand_samplers: rand_crops = [] for rs in self._rand_samplers: rand_crops += rs.sample(label) num_rand_crops = len(rand_crops) # randomly pick up one as input data if num_rand_crops > 0: index = int(np.random.uniform(0, 1) * num_rand_crops) width = data.shape[1] height = data.shape[0] crop = rand_crops[index][0] xmin = int(crop[0] * width) ymin = int(crop[1] * height) xmax = int(crop[2] * width) ymax = int(crop[3] * height) if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height: data = mx.img.fixed_crop(data, xmin, ymin, xmax-xmin, ymax-ymin) else: # padding mode new_width = xmax - xmin new_height = ymax - ymin offset_x = 0 - xmin offset_y = 0 - ymin data_bak = data data = mx.nd.full((new_height, new_width, 3), 128, dtype='uint8') data[offset_y:offset_y+height, offset_x:offset_x + width, :] = data_bak label = rand_crops[index][1] if self.is_train: interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, \ cv2.INTER_NEAREST, cv2.INTER_LANCZOS4] else: interp_methods = [cv2.INTER_LINEAR] interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))] data = mx.img.imresize(data, self._data_shape[0], self._data_shape[1], interp_method) if self.is_train and self._rand_mirror: if np.random.uniform(0, 1) > 0.5: data = mx.nd.flip(data, axis=1) valid_mask = np.where(label[:, 0] > -1)[0] tmp = 1.0 - label[valid_mask, 1] label[valid_mask, 1] = 1.0 - label[valid_mask, 3] label[valid_mask, 3] = tmp data = mx.nd.transpose(data, (2,0,1)) data = data.astype('float32') data = data - self._mean_pixels return data, label
the-stack_0_14204
from parse_tree import parse_sentence from statement import * import statement def reit(logic_val, reference): if len(reference) > 1: return False logic_tree = parse_sentence(logic_val) ref_tree = parse_sentence(reference[0]) return compareTree(logic_tree, ref_tree) def orIntro(logic_val, reference): if len(reference) > 1: return False logic_tree = parse_sentence(logic_val) ref_tree = parse_sentence(reference[0]) temp_tree = logic_tree # TODO: Make sure all top level are ors #while isinstance(temp_tree, BinaryStatement): # if not temp_tree.value == '|': # return False # temp_tree = temp_tree.left ors = getAllOrOperands(logic_tree, []) for i in ors: print_tree(i) if compareTree(i, ref_tree) == True: return True return False def orElim(logic_val, reference): if len(reference) == 0: return False logic_tree = parse_sentence(logic_val) ref_tree = parse_sentence(reference[0]) ors = getAllOrOperands(ref_tree, []) #TODO: Make sure all top level are ors for ref in reference[1:]: ref_tree2 = parse_sentence(ref) for i in ors: if compareTree(ref_tree2.left, i) == True: break else: return False for ref in reference[1:]: ref_tree2 = parse_sentence(ref) if compareTree(ref_tree2.right, logic_tree) == False: return False return True def andIntro(logic_val, reference): if len(reference) == 0: return False logic_tree = parse_sentence(logic_val) refs = [] for i in reference: refs.append(parse_sentence(i)) # TODO: Make sure all top level are ands ands = getAllAndOperands(logic_tree, []) for i in ands: for j in refs: if compareTree(i, j) == True: break else: return False return True def andElim(logic_val, reference): if len(reference) > 1: return False logic_tree = parse_sentence(logic_val) ref_tree = parse_sentence(reference[0]) temp_tree = ref_tree # TODO: Make sure all top level are ands #while isinstance(temp_tree, BinaryStatement): # if not temp_tree.value == '|': # return False # temp_tree = temp_tree.left ands = getAllAndOperands(ref_tree, []) for i in ands: if compareTree(i, logic_tree) == True: return True return False def notIntro(logic_val, reference): if len(reference) > 1: return False logic_tree = parse_sentence(logic_val) ref_tree = parse_sentence(reference[0]) # this is a subproof if not ref_tree.value == "-": # subproof condition return False if not isinstance(ref_tree.right, ContradictionStatement): return False neg = UnaryStatement("~", ref_tree.left) return compareTree(neg, logic_tree) def notElim(logic_val, reference): if len(reference) > 1: return False logic_tree = parse_sentence(logic_val) ref_tree = parse_sentence(reference[0]) if not ref_tree.value == "~": return False if not ref_tree.child.value == "~": return False return compareTree(ref_tree.child.child, logic_tree) def contraIntro(logic_val, reference): if len(reference) > 2: return False if not logic_val == "!": return False # Statements could have been selected out of order ref_tree1 = parse_sentence(reference[0]) ref_tree2 = parse_sentence(reference[1]) neg = UnaryStatement("~", ref_tree1) if compareTree(ref_tree2, neg) == True: return True else: ref_tree1 = parse_sentence(reference[1]) ref_tree2 = parse_sentence(reference[0]) neg = UnaryStatement("~", ref_tree1) if compareTree(ref_tree2, neg) == True: return True return False def contraElim(logic_val, reference): if len(reference) > 1: return False ref_tree = parse_sentence(reference[0]) return compareTree(ref_tree, ContradictionStatement()) def impIntro(logic_val, reference): if len(reference) > 1: return False logic_tree = parse_sentence(logic_val) ref_tree = parse_sentence(reference[0]) return compareTree(logic_tree, ref_tree) def impElim(logic_val, reference): if len(reference) > 2: return False ref_tree = parse_sentence(reference[0]) logic_tree = parse_sentence("(" + reference[1] + ") - (" + logic_val + ")") return compareTree(ref_tree, logic_tree) def biIntro(logic_val, reference): if len(reference) > 2: return False logic_tree = parse_sentence(logic_val) if not logic_tree.value == "=": return False ref_tree1 = parse_sentence(reference[0]) ref_tree2 = parse_sentence(reference[1]) if compareTree(ref_tree1.left, ref_tree2.right) == False: return False if compareTree(ref_tree2.left, ref_tree1.right) == False: return False if compareTree(logic_tree.left, ref_tree1.left) == True and \ compareTree(logic_tree.right, ref_tree1.right) == True: return True elif compareTree(logic_tree.left, ref_tree2.left) == True and \ compareTree(logic_tree.right, ref_tree2.right) == True: return True else: return False def biElim(logic_val, reference): if len(reference) > 2: return False ref_tree = parse_sentence(reference[0]) logic_tree = parse_sentence("(" + reference[1] + ") = (" + logic_val + ")") if compareTree(ref_tree, logic_tree) == True: return True logic_tree = parse_sentence("(" + logic_val + ") = (" + reference[1] + ")") if compareTree(ref_tree, logic_tree) == True: return True ref_tree = parse_sentence(reference[1]) logic_tree = parse_sentence("(" + reference[0] + ") = (" + logic_val + ")") if compareTree(ref_tree, logic_tree) == True: return True logic_tree = parse_sentence("(" + logic_val + ") = (" + reference[0] + ")") if compareTree(ref_tree, logic_tree) == True: return True return False
the-stack_0_14205
import logging import pandas as pd from scipy.sparse import csr_matrix from sklearn.feature_extraction.text import TfidfVectorizer from datasets.nlp import NLP from representations.representation import Representation logger = logging.getLogger() class TfIdf(Representation): def __init__(self, args): self.args = args self.vectorizer: TfidfVectorizer = TfidfVectorizer(lowercase=not self.args.no_lower_case, ) self.lower_case = not self.args.no_lower_case self.nlp = NLP() def fit_vectorizer(self, text, stem_first=True): if stem_first: stemmed = self.stem(text) self.vectorizer.fit(stemmed.apply(lambda x: ' '.join(x))) else: self.vectorizer.fit(text) def __call__(self, raw: pd.Series, fit_vectorizer: bool = True, stem_first=True) -> csr_matrix: if stem_first: logger.info('Stemming...') data = self.stem(raw).apply(lambda x: ' '.join(x)) else: data = raw if fit_vectorizer: logger.info('Fitting TF-IDF...') self.vectorizer.fit(data) result = self.vectorizer.transform(data) return result def stem(self, raw: pd.Series): return raw.apply( lambda x: [ self.nlp.stem(word) for word in self.nlp.tokenize(x) if self.nlp.stem(word) is not None ] ) def get_vectorizer(self): return self.vectorizer
the-stack_0_14206
# # Copyright (c) 2019-2021 Triad National Security, LLC # All rights reserved. # # This file is part of the bueno project. See the LICENSE file at the # top-level directory of this distribution for more information. # ''' Bueno run script for the unstructured mesh physics mini-app, PENNANT ''' import re import io import csv import sys import typing from bueno.public import container from bueno.public import experiment from bueno.public import logger from bueno.public import metadata from bueno.public import utils # pylint: disable=too-few-public-methods class AddArgsAction(experiment.CLIAddArgsAction): ''' Handle custom argument processing ''' def __call__(self, cliconfig: experiment.CLIConfiguration) -> None: ''' New argument definitions ''' cliconfig.argparser.add_argument( '--pinfile', help="pennant input file", default='./experiments/nohsmall/nohsmall.pnt' ) class Experiment: ''' PENNANT benchmark definition ''' def __init__(self, config: experiment.CLIConfiguration) -> None: ''' Experiment configuration. ''' experiment.name(config.args.name) self.config = config # PENNANT input file self.pinfile = config.args.pinfile self.data: typing.Dict[str, list] = { 'commands': list(), 'results': list() } # Emit program config to terminal & collected assets. self.emit_conf() self.add_assets() def emit_conf(self) -> None: ''' Emit configuration to terminal ''' pcd = dict() pcd['Program'] = vars(self.config.args) utils.yamlp(pcd, 'Program') def add_assets(self) -> None: ''' Select additional assets to copy ''' metadata.add_asset(metadata.FileAsset(self.config.args.input)) metadata.add_asset(metadata.FileAsset(self.pinfile)) def post_action(self, **kwargs: typing.Dict[str, str]) -> None: ''' Post experiment iteration action ''' logger.log('# Starting Post Action...') cmd = kwargs.pop('command') # Record command used in iteration. self.data['commands'].append(cmd) # Record timing data from PENNANT terminal output. self.parse_output(kwargs.pop('output')) def parse_output(self, out1: typing.List[str]) -> None: ''' Parse timing results information from PENNANT terminal output. ''' # Search for end of run data. pos = -1 for pos, line in enumerate(out1): if line == 'Run complete\n': print('Found runtime table!') break # No data found, stop test. if pos == -1: logger.log('ERROR: No post-run data found') sys.exit() # Isolate terminal lines containing timing details. timing = out1[pos + 1: pos + 6] # Format end of run data. results = [] for row in timing: items = row.split(',') for item in items: if '*' in item or item == '\n': continue # Skip empty or decorative lines. # Trim whitespace. item = re.sub(r'[ ]*\=[ ]+', ':', item) item = item.strip() # Remove unecessary characters. item = re.sub(r'[()]', '', item) results.append(item.split(':')[1]) # Discard label # Append iteration results to Experiment data self.data['results'].append(results) def run(self, genspec: str) -> None: ''' Experiment iterations definition ''' logger.log('# Starting Runs...') # Generate the iterative run commands. rcmd = self.config.args.runcmds pruns = experiment.runcmds(rcmd[0], rcmd[1], rcmd[2], rcmd[3]) executable = self.config.args.executable appargs = genspec.format(executable) # Execute generated run commands. for prun in pruns: logger.log('') container.prun(prun, appargs, postaction=self.post_action) def report(self) -> None: ''' Generate csv report from run iterations. ''' logger.emlog(F'# {experiment.name()} Report') # Setup table. table = utils.Table() sio = io.StringIO(newline=None) dataraw = csv.writer(sio) header = ['Cycle', 'Cstop', 'Time', 'Tstop', 'Hydro Cycle', 'Command'] dataraw.writerow(header) table.addrow(header) # Populate table. for index, entry in enumerate(self.data['results']): entry.append(self.data['commands'][index]) dataraw.writerow(entry) table.addrow(entry) # Write table to csv & display to terminal. csvname = self.config.args.csv_output metadata.add_asset(metadata.StringIOAsset(sio, csvname)) table.emit() logger.log('') def main(argv: typing.List[str]) -> None: ''' Setup and start experiment. ''' # Program description. desc = 'bueno run script for PENNANT experiments.' # Default Configuration. defaults = experiment.CannedCLIConfiguration.Defaults defaults.name = 'pennant' defaults.description = desc defaults.input = './experiments/config.txt' defaults.executable = '~/PENNANT/build/pennant' defaults.runcmds = (2, 2, 'mpirun -n %n', 'nidx + 1') defaults.csv_output = 'data.csv' # Compile and parse configuration. config = experiment.CannedCLIConfiguration(desc, argv, defaults) config.addargs(AddArgsAction) config.parseargs() for genspec in experiment.readgs(config.args.input, config): # Update config after each iteration exp = Experiment(config) exp.run(genspec) exp.report() # vim: ft=python ts=4 sts=4 sw=4 expandtab
the-stack_0_14208
#!/usr/bin/python3 import socket import threading import time import numpy as np from picamera2 import Picamera2 from picamera2.encoders import H264Encoder from picamera2.outputs import CircularOutput, FileOutput lsize = (320, 240) picam2 = Picamera2() video_config = picam2.video_configuration(main={"size": (1280, 720), "format": "RGB888"}, lores={"size": lsize, "format": "YUV420"}) picam2.configure(video_config) picam2.start_preview() encoder = H264Encoder(1000000, repeat=True) circ = CircularOutput() encoder.output = [circ] picam2.encoder = encoder picam2.start() picam2.start_encoder() w, h = lsize prev = None encoding = False ltime = 0 def server(): global circ, picam2 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(("0.0.0.0", 10001)) sock.listen() while tup := sock.accept(): event = threading.Event() conn, addr = tup stream = conn.makefile("wb") filestream = FileOutput(stream) filestream.start() picam2.encoder.output = [circ, filestream] filestream.connectiondead = lambda ex: event.set() event.wait() t = threading.Thread(target=server) t.setDaemon(True) t.start() while True: cur = picam2.capture_buffer("lores") cur = cur[:w * h].reshape(h, w) if prev is not None: # Measure pixels differences between current and # previous frame mse = np.square(np.subtract(cur, prev)).mean() if mse > 7: if not encoding: epoch = int(time.time()) circ.fileoutput = "{}.h264".format(epoch) circ.start() encoding = True print("New Motion", mse) ltime = time.time() else: if encoding and time.time() - ltime > 5.0: circ.stop() encoding = False prev = cur picam2.stop_encoder()
the-stack_0_14209
#!/usr/bin/env python from __future__ import print_function, division import itertools, time, copy import collections, random import os, pickle import numba import numpy as np board_size = 15 estimate_level = 4 t_random = 0#.01# controls how random the bonus for level=0 show_q = False def strategy(state): """ AI's strategy """ """ Information provided to you: state = (board, last_move, playing, board_size) board = (x_stones, o_stones) stones is a set contains positions of one player's stones. e.g. x_stones = {(8,8), (8,9), (8,10), (8,11)} playing = 0|1, the current player's index Your strategy will return a position code for the next stone, e.g. (8,7) """ global board_size board, last_move, playing, board_size = state initialize() #print('estimate_level', estimate_level) other_player = int(not playing) my_stones = board[playing] opponent_stones = board[other_player] # put the first stone in the center if it's the start of the game center = int((board_size-1)/2) if last_move is None: # if it's the first move of the game r = np.random.randint(board_size) c = np.random.randint(board_size) best_move = (r, c) #best_move = (center, center) strategy.zobrist_code = strategy.zobrist_me[best_move] return (best_move[0]+1, best_move[1]+1) else: last_move = (last_move[0]-1, last_move[1]-1) # update zobrist_code with opponent last move strategy.zobrist_code ^= strategy.zobrist_opponent[last_move] # build new state representation state = np.zeros(board_size**2, dtype=np.int32).reshape(board_size, board_size) for i,j in my_stones: state[i-1,j-1] = 1 for i,j in opponent_stones: state[i-1,j-1] = -1 #if strategy.zobrist_code in U_stone.cache: # print("Calculated Move: %.3f" %U_stone.cache[strategy.zobrist_code]) #else: # print("Didn't know this move!") if len(my_stones) == 0: level = 7 else: level = 0 # clear the U cache U_stone.cache = dict() alpha = -1.0 beta = 2.0 empty_spots_left = np.sum(state==0) best_move, best_q = best_action_q(state, strategy.zobrist_code, empty_spots_left, last_move, alpha, beta, 1, level) if show_q: print("best_q = %f" % best_q) # update zobrist_code with my move strategy.zobrist_code ^= strategy.zobrist_me[best_move] # return the best move return (best_move[0]+1, best_move[1]+1) level_max_n = [20, 20, 12, 12, 8, 8, 6, 6, 4, 4, 4, 4, 4, 4, 4] def best_action_q(state, zobrist_code, empty_spots_left, last_move, alpha, beta, player, level): "Return the optimal action for a state" if empty_spots_left == 0: # Board filled up, it's a tie return None, 0.5 #move_interest_values = np.zeros(board_size**2, dtype=np.float32).reshape(board_size,board_size) move_interest_values = best_action_q.move_interest_values move_interest_values.fill(0) # reuse the same array # boost the interests of closer moves by a little bit # note that it might boost a taken spot, but an available spot will at least get 10 interest in find_interesting_moves() boost_dist = 3 r, c = last_move xmin = max(0, r-boost_dist) xmax = min(board_size, r+boost_dist+1) ymin = max(0, c-boost_dist) ymax = min(board_size, c+boost_dist+1) move_interest_values[xmin:xmax, ymin:ymax] = 1.5 verbose = False #if level == 0: # verbose = True n_moves = level_max_n[level] interested_moves = find_interesting_moves(state, empty_spots_left, move_interest_values, player, n_moves, verbose) if len(interested_moves) == 1: current_move = interested_moves[0] current_move = (current_move[0], current_move[1]) q = Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level) if verbose: print(current_move, q) return current_move, q #best_move = (-1,-1) # admit defeat if all moves have 0 win rate best_move = (interested_moves[0,0], interested_moves[0,1]) # continue to play even I'm losing if player == 1: max_q = 0.0 max_bonused_q = 0.0 for current_move in interested_moves: current_move = (current_move[0], current_move[1]) q = Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level+1) if level == 0 and q > 0: bonus_q = abs(np.random.normal(0, t_random)) / (226-empty_spots_left)**2 if q + bonus_q > max_q: max_q = q + bonus_q best_move = current_move max_bonused_q = bonus_q else: if q > alpha: alpha = q if q > max_q: max_q = q best_move = current_move if verbose: print(current_move, q) if q == 1.0 or beta <= alpha: break best_q = max_q - max_bonused_q elif player == -1: min_q = 1.0 for current_move in interested_moves: current_move = (current_move[0], current_move[1]) q = Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level+1) if q < beta: beta = q if q < min_q: min_q = q best_move = current_move if q == 0.0 or beta <= alpha: break best_q = min_q return best_move, best_q @numba.jit(nopython=True, nogil=True) def find_interesting_moves(state, empty_spots_left, move_interest_values, player, n_moves, verbose=False): """ Look at state and find the interesing n_move moves. input: ------- state: numpy.array board_size x board_size empty_spots_left: number of empty spots on the board player: 1 or -1, the current player n_moves: int, desired number of interesing moves output: ------- interested_moves: numpy.array final_n_moves x 2 *note : final_n_moves = 1 if limited * else final_n_moves = n_moves + number of length-4 moves *note2: final_n_moves will not exceed empty_spots_left #suggested_n_moves: suggested number of moves to """ force_to_block = False exist_will_win_move = False directions = ((1,1), (1,0), (0,1), (1,-1)) final_single_move = np.zeros(2, dtype=np.int64).reshape(1,2) # for returning the single move for r in range(board_size): for c in range(board_size): if state[r,c] != 0: continue interest_value = 10 # as long as it's a valid point, this is for avoiding the taken spaces my_hard_4 = 0 for dr, dc in directions: my_line_length = 1 # last_move opponent_line_length = 1 # try to extend in the positive direction (max 4 times) ext_r = r ext_c = c skipped_1 = 0 my_blocked = False opponent_blocked = False for i in range(4): ext_r += dr ext_c += dc if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size: break elif state[ext_r, ext_c] == player: if my_blocked is True: break else: my_line_length += 1 opponent_blocked = True elif state[ext_r, ext_c] == -player: if opponent_blocked is True: break else: opponent_line_length += 1 my_blocked = True elif skipped_1 is 0: skipped_1 = i + 1 # allow one skip and record the position of the skip else: break # the backward counting starts at the furthest "unskipped" stone forward_my_open = False forward_opponent_open = False if skipped_1 == 0: my_line_length_back = my_line_length opponent_line_length_back = opponent_line_length elif skipped_1 == 1: my_line_length_back = 1 opponent_line_length_back = 1 forward_my_open = True forward_opponent_open = True else: if my_blocked is False: my_line_length_back = skipped_1 opponent_line_length_back = 1 forward_my_open = True else: my_line_length_back = 1 opponent_line_length_back = skipped_1 forward_opponent_open = True my_line_length_no_skip = my_line_length_back opponent_line_length_no_skip = opponent_line_length_back # backward is a little complicated, will try to extend my stones first ext_r = r ext_c = c skipped_2 = 0 opponent_blocked = False for i in range(5-my_line_length_no_skip): ext_r -= dr ext_c -= dc if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size: break elif state[ext_r, ext_c] == player: my_line_length_back += 1 opponent_blocked = True elif skipped_2 is 0 and state[ext_r, ext_c] == 0: skipped_2 = i + 1 else: break # see if i'm winning if my_line_length_back == 5: final_single_move[0,0] = r final_single_move[0,1] = c return final_single_move #interested_n_moves[0] = move #return interested_n_moves, True # extend my forward line length to check if there is hard 4 if skipped_2 is 0: my_line_length += my_line_length_back - my_line_length_no_skip else: my_line_length += skipped_2 - 1 # notice that here the forward length can exceed 5 after extension, but it should be at max 4 if my_line_length > 4: my_line_length = 4 backward_my_open = True if skipped_2 > 0 else False backward_opponent_open = False # then try to extend the opponent if opponent_blocked is True: if skipped_2 == 1: backward_opponent_open = True else: ext_r = r ext_c = c skipped_2 = 0 for i in range(5-opponent_line_length_no_skip): ext_r -= dr ext_c -= dc if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size: break elif state[ext_r, ext_c] == -player: opponent_line_length_back += 1 elif skipped_2 is 0 and state[ext_r, ext_c] == 0: skipped_2 = i + 1 else: break # extend my forward line length to check if there is hard 4 if skipped_2 is 0: opponent_line_length += opponent_line_length_back - opponent_line_length_no_skip else: opponent_line_length += skipped_2 - 1 backward_opponent_open = True # here if opponent_line_length_back == 5, skipped_2 will be 0 and this flag won't be True # but it do not affect our final result, because we have to block this no matter if it's open # notice that here the forward length can exceed 5 after extension, but it should be at max 4 if opponent_line_length > 4: opponent_line_length = 4 # check if we have to block this if opponent_line_length_back == 5: final_single_move[0,0] = r final_single_move[0,1] = c force_to_block = True elif force_to_block is False: # if I will win after this move, I won't consider other moves if forward_my_open is True and my_line_length == 4: my_hard_4 += 1 if backward_my_open is True and my_line_length_back == 4: my_hard_4 += 1 if my_hard_4 >= 2: final_single_move[0,0] = r final_single_move[0,1] = c exist_will_win_move = True if force_to_block is False and exist_will_win_move is False: # compute the interest_value for other moves if forward_my_open is True: interest_value += my_line_length ** 4 if backward_my_open is True: interest_value += my_line_length_back ** 4 if forward_opponent_open is True: interest_value += opponent_line_length ** 4 if backward_opponent_open is True: interest_value += opponent_line_length_back ** 4 # after looking at all directions, record the total interest_value of this move move_interest_values[r, c] += interest_value if interest_value > 256: # one (length_4) ** 4, highly interesting move n_moves += 1 # all moves have been investigated now see if we have to block first if force_to_block is True or exist_will_win_move is True: if verbose is True: print(final_single_move[0,0], final_single_move[0,1], "Only One") return final_single_move else: flattened_interest = move_interest_values.ravel() # The interest value > 250 means at least one length_4 or three length_3 which make it highly interesting #n_high_interest_moves = np.sum(flattened_interest > 266) # did it in the loop if n_moves > empty_spots_left: n_moves = empty_spots_left high_interest_idx = np.argsort(flattened_interest)[-n_moves:][::-1] interested_moves = np.empty(n_moves*2, dtype=np.int64).reshape(n_moves, 2) interested_moves[:,0] = high_interest_idx // board_size interested_moves[:,1] = high_interest_idx % board_size if verbose is True: print("There are", n_moves, "interested_moves") for i in range(n_moves): print(interested_moves[i,0],interested_moves[i,1],' : ', flattened_interest[high_interest_idx[i]]) return interested_moves def Q_stone(state, zobrist_code, empty_spots_left, current_move, alpha, beta, player, level): # update the state state[current_move] = player # update the zobrist code for the new state if player == 1: move_code = strategy.zobrist_me[current_move] else: move_code = strategy.zobrist_opponent[current_move] new_zobrist_code = zobrist_code ^ move_code result = U_stone(state, new_zobrist_code, empty_spots_left-1, current_move, alpha, beta, player, level) # revert the changes for the state state[current_move] = 0 return result def U_stone(state, zobrist_code, empty_spots_left, last_move, alpha, beta, player, level): try: return U_stone.cache[zobrist_code] except: pass if i_will_win(state, last_move, player): return 1.0 if player == 1 else 0.0 elif level >= estimate_level: result = estimate_U(state, player) else: best_move, best_q = best_action_q(state, zobrist_code, empty_spots_left, last_move, alpha, beta, -player, level) result = best_q U_stone.cache[zobrist_code] = result return result @numba.jit(nopython=True, nogil=True) def estimate_U(state, player): u = 0.0 my_max_n = 0 opponent_max_n = 0 for i in range(board_size): for j in range(board_size): # horizontal wins -- if j <= board_size - 5: my_blocked, opponent_blocked = False, False my_n, opponent_n = 0, 0 for k in range(5): if state[i, j+k] == -1: my_blocked = True opponent_n += 1 elif state[i, j+k] == 1: opponent_blocked = True my_n += 1 if my_blocked is True and opponent_blocked is True: break if my_blocked is False: u += 3 ** my_n if my_n > my_max_n: my_max_n = my_n if opponent_blocked is False: u -= 3 ** opponent_n if opponent_n > opponent_max_n: opponent_max_n = opponent_n # vertical wins | if i <= board_size - 5: my_blocked, opponent_blocked = False, False my_n, opponent_n = 0, 0 for k in range(5): if state[i+k, j] == -1: my_blocked = True opponent_n += 1 elif state[i+k, j] == 1: opponent_blocked = True my_n += 1 if my_blocked is True and opponent_blocked is True: break if my_blocked is False: u += 3 ** my_n if my_n > my_max_n: my_max_n = my_n if opponent_blocked is False: u -= 3 ** opponent_n if opponent_n > opponent_max_n: opponent_max_n = opponent_n # left oblique wins / if i <= board_size - 5 and j >= 4: my_blocked, opponent_blocked = False, False my_n, opponent_n = 0, 0 for k in range(5): if state[i+k, j-k] == -1: my_blocked = True opponent_n += 1 elif state[i+k, j-k] == 1: opponent_blocked = True my_n += 1 if my_blocked is True and opponent_blocked is True: break if my_blocked is False: u += 3 ** my_n if my_n > my_max_n: my_max_n = my_n if opponent_blocked is False: u -= 3 ** opponent_n if opponent_n > opponent_max_n: opponent_max_n = opponent_n # right oblique wins \ if i <= board_size - 5 and j <= board_size - 5: my_blocked, opponent_blocked = False, False my_n, opponent_n = 0, 0 for k in range(5): if state[i+k, j+k] == -1: my_blocked = True opponent_n += 1 elif state[i+k, j+k] == 1: opponent_blocked = True my_n += 1 if my_blocked is True and opponent_blocked is True: break if my_blocked is False: u += 3 ** my_n if my_n > my_max_n: my_max_n = my_n if opponent_blocked is False: u -= 3 ** opponent_n if opponent_n > opponent_max_n: opponent_max_n = opponent_n if player == 1: # next move is opponent longer = 2 * (3 **opponent_max_n) # one of the longest can get 1 longer block = 3 ** my_max_n u -= max(longer, block) else: # next move is me longer = 2 * (3 ** my_max_n) block = 3 ** opponent_max_n u += max(longer, block) if u > 0: result = 1.0 - 0.5 * np.exp(-u**2 * 0.0001) else: result = 0.5 * np.exp(-u**2 * 0.0001) return result @numba.jit(nopython=True,nogil=True) def i_win(state, last_move, player): """ Return true if I just got 5-in-a-row with last_move """ r, c = last_move # try all 4 directions, the other 4 is included directions = [(1,1), (1,0), (0,1), (1,-1)] for dr, dc in directions: line_length = 1 # last_move # try to extend in the positive direction (max 4 times) ext_r = r ext_c = c for _ in range(4): ext_r += dr ext_c += dc if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size: break elif state[ext_r, ext_c] == player: line_length += 1 else: break if line_length is 5: return True # 5 in a row # try to extend in the opposite direction ext_r = r ext_c = c for _ in range(5-line_length): ext_r -= dr ext_c -= dc if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size: break elif state[ext_r, ext_c] == player: line_length += 1 else: break if line_length is 5: return True # 5 in a row return False @numba.jit(nopython=True,nogil=True) def i_will_win(state, last_move, player): """ Return true if I will win next step if the opponent don't have 4-in-a-row. Winning Conditions: 1. 5 in a row. 2. 4 in a row with both end open. (free 4) 3. 4 in a row with one missing stone x 2 (hard 4 x 2) """ r, c = last_move # try all 4 directions, the other 4 is equivalent directions = [(1,1), (1,0), (0,1), (1,-1)] n_hard_4 = 0 # number of hard 4s found for dr, dc in directions: #print(dr, dc) line_length = 1 # last_move # try to extend in the positive direction (max 4 times) ext_r = r ext_c = c skipped_1 = 0 for i in range(4): ext_r += dr ext_c += dc if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size: break elif state[ext_r, ext_c] == player: line_length += 1 elif skipped_1 is 0 and state[ext_r, ext_c] == 0: skipped_1 = i+1 # allow one skip and record the position of the skip else: break if line_length is 5: return True # 5 in a row #print("Forward line_length",line_length) # try to extend in the opposite direction ext_r = r ext_c = c skipped_2 = 0 # the backward counting starts at the furthest "unskipped" stone if skipped_1 is not 0: line_length_back = skipped_1 else: line_length_back = line_length line_length_no_skip = line_length_back for i in range(5-line_length_back): ext_r -= dr ext_c -= dc if ext_r < 0 or ext_r >= board_size or ext_c < 0 or ext_c >= board_size: break elif state[ext_r, ext_c] == player: line_length_back += 1 elif skipped_2 is 0 and state[ext_r, ext_c] == 0: skipped_2 = i + 1 else: break #print("Backward line_length",line_length_back) if line_length_back is 5: return True # 5 in a row if line_length_back == 4 and skipped_2 is not 0: n_hard_4 += 1 # backward hard 4 if n_hard_4 == 2: return True # two hard 4 #print("back n_hard_4 = ", n_hard_4) # extend the forward line to the furthest "unskipped" stone #print("line_length_back", line_length_back) if skipped_2 is 0: line_length += line_length_back - line_length_no_skip else: line_length += skipped_2 - 1 if line_length >= 4 and skipped_1 is not 0: n_hard_4 += 1 # forward hard 4 if n_hard_4 == 2: return True # two hard 4 or free 4 #print('total n_hard_4', n_hard_4) return False def initialize(): # initialize zobrist for u caching if not hasattr(strategy, 'zobrist_me'): strategy.zobrist_me = np.random.randint(np.iinfo(np.int64).max, size=board_size**2).reshape(board_size,board_size) #if not hasattr(strategy, 'zobrist_opponent'): strategy.zobrist_opponent = np.random.randint(np.iinfo(np.int64).max, size=board_size**2).reshape(board_size,board_size) #if not hasattr(strategy, 'zobrist_code'): strategy.zobrist_code = 0 if not hasattr(U_stone, 'cache'): U_stone.cache = dict() if not hasattr(best_action_q, 'move_interest_values'): best_action_q.move_interest_values = np.zeros(board_size**2, dtype=np.float32).reshape(board_size,board_size) def finish(): del strategy.zobrist_me del strategy.zobrist_opponent del strategy.zobrist_code del U_stone.cache del best_action_q.move_interest_values return def board_show(stones): if isinstance(stones, np.ndarray): stones = {(s1,s2) for s1, s2 in stones} print(' '*4 + ' '.join([chr(97+i) for i in xrange(board_size)])) print (' '*3 + '='*(2*board_size)) for x in xrange(1, board_size+1): row = ['%2s|'%x] for y in xrange(1, board_size+1): if (x-1,y-1) in stones: c = 'x' else: c = '-' row.append(c) print (' '.join(row)) def print_state(state): assert isinstance(state, np.ndarray) print(' '*4 + ' '.join([chr(97+i) for i in xrange(board_size)])) print (' '*3 + '='*(2*board_size)) for x in xrange(1, board_size+1): row = ['%2s|'%x] for y in xrange(1, board_size+1): if state[x-1,y-1] == 1: c = 'o' elif state[x-1,y-1] == -1: c = 'x' else: c = '-' row.append(c) print (' '.join(row)) def check(): global board_size board_size = 15 state = np.zeros(board_size**2, dtype=np.int32).reshape(board_size, board_size) # check if i_win() is working properly state[zip(*[(8,9), (8,11), (8,8), (8,10), (8,12)])] = 1 assert i_win(state, (8,10), 1) == True state.fill(0) state[zip(*[(8,10), (9,11), (8,8), (9,12), (7,9), (10,9), (11,12), (11,13)])] = 1 assert i_win(state, (10,12), 1) == True state.fill(0) state[zip(*[(8,10), (8,12), (8,8), (9,12), (7,9), (10,9), (11,12), (11,13)])] = 1 assert i_win(state, (10,12), 1) == False # check if i_will_win() is working properly # o - x x X x - o state.fill(0) state[zip(*[(8,9), (8,11), (8,8)])] = 1 state[zip(*[(8,6), (8,13)])] = -1 assert i_will_win(state, (8, 10), 1) == True # state.fill(0) state[zip(*[(7,7), (7,8), (9,11)])] = 1 state[zip(*[(6,8), (7,9)])] = -1 print(state) assert i_will_win(state, (8,10), -1) == False ## o - x x X x o #assert i_will_win({(8,9), (8,11), (8,8)}, {(8,6), (8,12)}, (8,10)) == False ## o - x x X o ## x ## ## x ## x #assert i_will_win({(8,9), (8,8), (9,10), (11,10), (12,10)}, {(8,6), (8,11)}, (8,10)) == False ## o - x x X x o ## x ## ## x ## x #assert i_will_win({(8,9), (8,8), (9,10), (11,10), (12,10)}, {(8,6), (8,11)}, (8,10)) == False ## o - x x X x o ## x ## ## x ## x #assert i_will_win({ (8,8), (8,9), (8,11), (9,9), (11,7), (12,6)}, {(8,6), (8,12)}, (8,10)) == True ## | x x x X - x x x - - o #assert i_will_win({(8,1), (8,2), (8,0), (8,9), (8,7), (8,8)}, {(8,10)}, (8,3)) == False ## | x x - x X x x o #assert i_will_win({(8,1), (8,2), (8,4), (8,6), (8,7)}, {(8,8)}, (8,5)) == False ## | x x - x X - x x o #assert i_will_win({(8,1), (8,2), (8,4), (8,7), (8,8)}, {(8,9)}, (8,5)) == True ## | x x x - X - x x x o #assert i_will_win({(8,1), (8,2), (8,3), (8,7), (8,8), (8,9)}, {(8,10)}, (8,5)) == True ## | x - x X x - x o #assert i_will_win({(8,1), (8,3), (8,5), (8,7)}, {(8,8)}, (8,4)) == True #assert i_will_win({(8,8), (8,10), (9,9), (11,7), (11,9)}, {(7,7), (7,9), (8,7), (10,8), (11,8)}, (8,9)) == False print("All check passed!") if __name__ == '__main__': import pickle state = pickle.load(open('debug.state','rb')) board, last_move, playing, board_size = state player_stones = board[playing] other = int(not playing) ai_stones = board[other] player_move = (8,9) player_stones.add(player_move) state = (player_stones, ai_stones), player_move, other, board_size strategy(state) #import time #check() #test3() #benchmark() #benchmark2()
the-stack_0_14210
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union from google.api_core import client_options as client_options_lib from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.ads.googleads.v9.resources.types import extension_feed_item from google.ads.googleads.v9.services.types import extension_feed_item_service from google.rpc import status_pb2 # type: ignore from .transports.base import ( ExtensionFeedItemServiceTransport, DEFAULT_CLIENT_INFO, ) from .transports.grpc import ExtensionFeedItemServiceGrpcTransport class ExtensionFeedItemServiceClientMeta(type): """Metaclass for the ExtensionFeedItemService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[ExtensionFeedItemServiceTransport]] _transport_registry["grpc"] = ExtensionFeedItemServiceGrpcTransport def get_transport_class( cls, label: str = None, ) -> Type[ExtensionFeedItemServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class ExtensionFeedItemServiceClient( metaclass=ExtensionFeedItemServiceClientMeta ): """Service to manage extension feed items.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "googleads.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ExtensionFeedItemServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info( info ) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ExtensionFeedItemServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> ExtensionFeedItemServiceTransport: """Return the transport used by the client instance. Returns: ExtensionFeedItemServiceTransport: The transport used by the client instance. """ return self._transport def __enter__(self): return self def __exit__(self, type, value, traceback): """Releases underlying transport's resources. .. warning:: ONLY use as a context manager if the transport is NOT shared with other clients! Exiting the with block will CLOSE the transport and may cause errors in other clients! """ self.transport.close() @staticmethod def ad_group_path(customer_id: str, ad_group_id: str,) -> str: """Return a fully-qualified ad_group string.""" return "customers/{customer_id}/adGroups/{ad_group_id}".format( customer_id=customer_id, ad_group_id=ad_group_id, ) @staticmethod def parse_ad_group_path(path: str) -> Dict[str, str]: """Parse a ad_group path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def asset_path(customer_id: str, asset_id: str,) -> str: """Return a fully-qualified asset string.""" return "customers/{customer_id}/assets/{asset_id}".format( customer_id=customer_id, asset_id=asset_id, ) @staticmethod def parse_asset_path(path: str) -> Dict[str, str]: """Parse a asset path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/assets/(?P<asset_id>.+?)$", path ) return m.groupdict() if m else {} @staticmethod def campaign_path(customer_id: str, campaign_id: str,) -> str: """Return a fully-qualified campaign string.""" return "customers/{customer_id}/campaigns/{campaign_id}".format( customer_id=customer_id, campaign_id=campaign_id, ) @staticmethod def parse_campaign_path(path: str) -> Dict[str, str]: """Parse a campaign path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/campaigns/(?P<campaign_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def extension_feed_item_path(customer_id: str, feed_item_id: str,) -> str: """Return a fully-qualified extension_feed_item string.""" return "customers/{customer_id}/extensionFeedItems/{feed_item_id}".format( customer_id=customer_id, feed_item_id=feed_item_id, ) @staticmethod def parse_extension_feed_item_path(path: str) -> Dict[str, str]: """Parse a extension_feed_item path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/extensionFeedItems/(?P<feed_item_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def geo_target_constant_path(criterion_id: str,) -> str: """Return a fully-qualified geo_target_constant string.""" return "geoTargetConstants/{criterion_id}".format( criterion_id=criterion_id, ) @staticmethod def parse_geo_target_constant_path(path: str) -> Dict[str, str]: """Parse a geo_target_constant path into its component segments.""" m = re.match(r"^geoTargetConstants/(?P<criterion_id>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Return a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Return a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Return a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Return a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path ) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, ExtensionFeedItemServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the extension feed item service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.ExtensionFeedItemServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( "true", "false", ): raise ValueError( "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" ) use_client_cert = ( os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" ) ssl_credentials = None is_mtls = False if use_client_cert: if client_options.client_cert_source: import grpc # type: ignore cert, key = client_options.client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) is_mtls = True else: creds = SslCredentials() is_mtls = creds.is_mtls ssl_credentials = creds.ssl_credentials if is_mtls else None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, ExtensionFeedItemServiceTransport): # transport is a ExtensionFeedItemServiceTransport instance. if credentials: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) self._transport = transport elif isinstance(transport, str): Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, host=self.DEFAULT_ENDPOINT ) else: self._transport = ExtensionFeedItemServiceGrpcTransport( credentials=credentials, host=api_endpoint, ssl_channel_credentials=ssl_credentials, client_info=client_info, ) def get_extension_feed_item( self, request: Union[ extension_feed_item_service.GetExtensionFeedItemRequest, dict ] = None, *, resource_name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> extension_feed_item.ExtensionFeedItem: r"""Returns the requested extension feed item in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Args: request (Union[google.ads.googleads.v9.services.types.GetExtensionFeedItemRequest, dict]): The request object. Request message for [ExtensionFeedItemService.GetExtensionFeedItem][google.ads.googleads.v9.services.ExtensionFeedItemService.GetExtensionFeedItem]. resource_name (:class:`str`): Required. The resource name of the extension feed item to fetch. This corresponds to the ``resource_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v9.resources.types.ExtensionFeedItem: An extension feed item. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([resource_name]): raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a extension_feed_item_service.GetExtensionFeedItemRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, extension_feed_item_service.GetExtensionFeedItemRequest ): request = extension_feed_item_service.GetExtensionFeedItemRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if resource_name is not None: request.resource_name = resource_name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.get_extension_feed_item ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("resource_name", request.resource_name),) ), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def mutate_extension_feed_items( self, request: Union[ extension_feed_item_service.MutateExtensionFeedItemsRequest, dict ] = None, *, customer_id: str = None, operations: Sequence[ extension_feed_item_service.ExtensionFeedItemOperation ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> extension_feed_item_service.MutateExtensionFeedItemsResponse: r"""Creates, updates, or removes extension feed items. Operation statuses are returned. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `CollectionSizeError <>`__ `CountryCodeError <>`__ `DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__ `ExtensionFeedItemError <>`__ `FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__ `ImageError <>`__ `InternalError <>`__ `LanguageCodeError <>`__ `MutateError <>`__ `NewResourceCreationError <>`__ `OperationAccessDeniedError <>`__ `QuotaError <>`__ `RangeError <>`__ `RequestError <>`__ `SizeLimitError <>`__ `StringLengthError <>`__ `UrlFieldError <>`__ Args: request (Union[google.ads.googleads.v9.services.types.MutateExtensionFeedItemsRequest, dict]): The request object. Request message for [ExtensionFeedItemService.MutateExtensionFeedItems][google.ads.googleads.v9.services.ExtensionFeedItemService.MutateExtensionFeedItems]. customer_id (:class:`str`): Required. The ID of the customer whose extension feed items are being modified. This corresponds to the ``customer_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. operations (:class:`Sequence[google.ads.googleads.v9.services.types.ExtensionFeedItemOperation]`): Required. The list of operations to perform on individual extension feed items. This corresponds to the ``operations`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v9.services.types.MutateExtensionFeedItemsResponse: Response message for an extension feed item mutate. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([customer_id, operations]): raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a extension_feed_item_service.MutateExtensionFeedItemsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, extension_feed_item_service.MutateExtensionFeedItemsRequest ): request = extension_feed_item_service.MutateExtensionFeedItemsRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if customer_id is not None: request.customer_id = customer_id if operations is not None: request.operations = operations # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.mutate_extension_feed_items ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("customer_id", request.customer_id),) ), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response __all__ = ("ExtensionFeedItemServiceClient",)
the-stack_0_14211
import json import os table_objects = ["bowl", "bottle", "can", "computer keyboard", "keypad", "display", "phone", "jar", "knife", "lamp", "laptop", "microphone" "mug", "remote", "wine bottle"] data = json.load(open('taxonomy.json')) for datapoint in data: for obj in table_objects: if obj in datapoint['name']: os.system('unzip {}.zip'.format(datapoint['synsetId'])) break
the-stack_0_14213
from scripts.logger import Logger from wows.wowsapi import WowsApi from wows.wowsdb import Wows_database import math class WorldofWarships: def __init__(self, key, db_path): self.logger = Logger(self.__class__.__name__) self.logger.debug('Initializing wows class.') self.wowsapi = WowsApi(key) self.wowsdb = Wows_database(db_path) def update(self): # check version version_db = self.wowsdb.get_db_version() version_api = self.wowsapi.get_api_version() # return if version is up to date. if version_db == version_api: self.logger.debug(f'Returning as database has latest version {version_db}.') return self.update_warships() self.update_shipparams() # finally update version self.wowsdb.update_version(version_api) def update_warships(self): """ Update warships table in database. """ self.logger.debug('Updating warships in database.') warships_count = self.wowsapi.get_warships_count() pages = math.ceil(warships_count / 100) warships_api = self.wowsapi.get_warships(pages) warships_db = self.wowsdb.get_warships() warships_db_ids = list(map(lambda warship:warship.ship_id, warships_db)) for warship in warships_api: # if warship not found in db, register if warship.ship_id not in warships_db_ids: self.wowsdb.register_ship(warship) else: index = warships_db_ids.index(warship.ship_id) warship_db = warships_db[index] assert warship.ship_id == warship_db.ship_id # if warship from api differes from warship in db, update if warship != warship_db: self.wowsdb.update_warship(warship) self.logger.debug('Warships updated.') def update_shipparams(self): """ Update shipparameters table in database. """ self.logger.debug('Updating shipparams in database.') ship_ids = self.wowsdb.get_ship_ids() for ship_id in ship_ids: param = self.wowsapi.get_ship_profile(ship_id[0]) self.wowsdb.update_shipparam(param) self.logger.debug('Ship parameters updated.')
the-stack_0_14214
import graphene from graphene_django.types import DjangoObjectType from .models import Category, Ingredient class CategoryType(DjangoObjectType): class Meta: model = Category class IngredientType(DjangoObjectType): class Meta: model = Ingredient class Query(object): category = graphene.Field(CategoryType, id=graphene.Int(), name=graphene.String()) all_categories = graphene.List(CategoryType) ingredient = graphene.Field( IngredientType, id=graphene.Int(), name=graphene.String() ) all_ingredients = graphene.List(IngredientType) def resolve_all_categories(self, context): return Category.objects.all() def resolve_all_ingredients(self, context): # We can easily optimize query count in the resolve method return Ingredient.objects.select_related("category").all() def resolve_category(self, context, id=None, name=None): if id is not None: return Category.objects.get(pk=id) if name is not None: return Category.objects.get(name=name) return None def resolve_ingredient(self, context, id=None, name=None): if id is not None: return Ingredient.objects.get(pk=id) if name is not None: return Ingredient.objects.get(name=name) return None
the-stack_0_14215
"""Helpers.""" import inspect from collections import Iterable, Mapping from typing import Optional, Tuple, List, Iterable as IterableType from aiohttp import web from mimeparse import parse_media_range, _filter_blank from .abc.field import FieldABC from .fields.decorators import Tag from .typings import Callee, MimeTypeComponents, QFParsed from .common import JSONAPI def is_generator(obj): """Return True if ``obj`` is a generator.""" return inspect.isgeneratorfunction(obj) or inspect.isgenerator(obj) def is_iterable_but_not_string(obj): """Return True if ``obj`` is an iterable object that isn't a string.""" return ( (isinstance(obj, Iterable) and not hasattr(obj, "strip")) or is_generator(obj) ) def is_indexable_but_not_string(obj): """Return True if ``obj`` is indexable but isn't a string.""" return not hasattr(obj, "strip") and hasattr(obj, "__getitem__") def is_collection(obj, exclude=()): """Return True if ``obj`` is a collection type.""" return (not isinstance(obj, (Mapping,) + exclude) and is_iterable_but_not_string(obj)) def ensure_collection(value, exclude=()): """Ensure value is collection.""" return value if is_collection(value, exclude=exclude) else (value,) def first(iterable, default=None, key=None): """ Return first element of *iterable*. Return first element of *iterable* that evaluates to ``True``, else return ``None`` or optional *default*. >>> first([0, False, None, [], (), 42]) 42 >>> first([0, False, None, [], ()]) is None True >>> first([0, False, None, [], ()], default='ohai') 'ohai' >>> import re >>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)']) >>> m.group(1) 'bc' The optional *key* argument specifies a one-argument predicate function like that used for *filter()*. The *key* argument, if supplied, should be in keyword form. For example, finding the first even number in an iterable: >>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0) 4 Contributed by Hynek Schlawack, author of `the original standalone module`_ .. _the original standalone module: https://github.com/hynek/first """ return next(filter(key, iterable), default) def make_sentinel(name='_MISSING', var_name=None): """ Create sentinel instance. Creates and returns a new **instance** of a new class, suitable for usage as a "sentinel", a kind of singleton often used to indicate a value is missing when ``None`` is a valid input. >>> make_sentinel(var_name='_MISSING') _MISSING The most common use cases here in project are as default values for optional function arguments, partly because of its less-confusing appearance in automatically generated documentation. Sentinels also function well as placeholders in queues and linked lists. .. note:: By design, additional calls to ``make_sentinel`` with the same values will not produce equivalent objects. >>> make_sentinel('TEST') == make_sentinel('TEST') False >>> type(make_sentinel('TEST')) == type(make_sentinel('TEST')) False :arg str name: Name of the Sentinel :arg str var_name: Set this name to the name of the variable in its respective module enable pickleability. """ class Sentinel(object): def __init__(self): self.name = name self.var_name = var_name def __repr__(self): if self.var_name: return self.var_name return '%s(%r)' % (self.__class__.__name__, self.name) if var_name: def __reduce__(self): return self.var_name def __nonzero__(self): return False __bool__ = __nonzero__ return Sentinel() def get_router_resource(app: web.Application, resource: str): """Return route of JSON API application for resource.""" return app.router[f"{app[JSONAPI]['routes_namespace']}.{resource}"] def get_processors(obj, tag: Tag, field: FieldABC, default: Optional[Callee] = None): has_processors = getattr(obj, '_has_processors', False) if has_processors: processor_tag = tag, field.key processors = obj.__processors__.get(processor_tag) if processors: for processor_name in processors: processor = getattr(obj, processor_name) processor_kwargs = \ processor.__processing_kwargs__.get(processor_tag) yield processor, processor_kwargs return if not callable(default): return yield default, {} def quality_and_fitness_parsed(mime_type: str, parsed_ranges: List[MimeTypeComponents] ) -> QFParsed: """Find the best match for a mime-type amongst parsed media-ranges. Find the best match for a given mime-type against a list of media_ranges that have already been parsed by parse_media_range(). Returns a tuple of the fitness value and the value of the 'q' quality parameter of the best match, or (-1, 0) if no match was found. Just as for quality_parsed(), 'parsed_ranges' must be a list of parsed media ranges. Cherry-picked from python-mimeparse and improved. """ best_fitness = -1 best_fit_q = 0 (target_type, target_subtype, target_params) = parse_media_range(mime_type) best_matched = None for (type, subtype, params) in parsed_ranges: # check if the type and the subtype match type_match = ( type in (target_type, '*') or target_type == '*' ) subtype_match = ( subtype in (target_subtype, '*') or target_subtype == '*' ) # if they do, assess the "fitness" of this mime_type if type_match and subtype_match: # 100 points if the type matches w/o a wildcard fitness = type == target_type and 100 or 0 # 10 points if the subtype matches w/o a wildcard fitness += subtype == target_subtype and 10 or 0 # 1 bonus point for each matching param besides "q" param_matches = sum([ 1 for (key, value) in target_params.items() if key != 'q' and key in params and value == params[key] ]) fitness += param_matches # finally, add the target's "q" param (between 0 and 1) fitness += float(target_params.get('q', 1)) if fitness > best_fitness: best_fitness = fitness best_fit_q = params['q'] best_matched = (type, subtype, params) return (float(best_fit_q), best_fitness), best_matched def best_match(supported: IterableType[str], header: str) -> Tuple[str, Optional[MimeTypeComponents]]: """Return mime-type with the highest quality ('q') from list of candidates. Takes a list of supported mime-types and finds the best match for all the media-ranges listed in header. The value of header must be a string that conforms to the format of the HTTP Accept: header. The value of 'supported' is a list of mime-types. The list of supported mime-types should be sorted in order of increasing desirability, in case of a situation where there is a tie. Cherry-picked from python-mimeparse and improved. >>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1') ('text/xml', ('text', '*', {'q': '0.5'})) """ split_header = _filter_blank(header.split(',')) parsed_header = [parse_media_range(r) for r in split_header] weighted_matches = {} for i, mime_type in enumerate(supported): weight, match = quality_and_fitness_parsed(mime_type, parsed_header) weighted_matches[(weight, i)] = (mime_type, match) best = max(weighted_matches.keys()) return best[0][0] and weighted_matches[best] or ('', None) def get_mime_type_params(mime_type: MimeTypeComponents): return {k: v for k, v in mime_type[2].items() if k != 'q'} MISSING = make_sentinel()
the-stack_0_14216
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 数据库系统 Case Name : LocalDateTime类型setObject并发用例 Description : 1.写配置文件 2.编译java工具 3.建表 4.并发执行java脚本 5.查询结果 6.重复step4-5 50次 Expect : History : """ import unittest import os import datetime import time from yat.test import Node from yat.test import macro from testcase.utils.Logger import Logger from testcase.utils.Common import Common from testcase.utils.CommonSH import CommonSH from testcase.utils.Constant import Constant from testcase.utils.ComThread import ComThread class Jdbcisreadonly(unittest.TestCase): def setUp(self): self.log = Logger() self.db_primary_user_node = Node(node='PrimaryDbUser') self.db_primary_root_node = Node(node='PrimaryRoot') self.log.info("-----------this is setup-----------") self.log.info("Opengauss_Function_JDBC_Set_Get_Object_Case0018 start") self.targetpath = "/home/jdbc_test" self.properties = os.path.join(self.targetpath, "jdbc_case0001.properties") self.sql_path = os.path.join(self.targetpath, "jdbc_set_get_object") self.java_name = "jdbc_set_get_object_case0018" self.tb_name = "jdbc_set_get_object_case0018" self.common = Common() self.constant = Constant() self.commonshpri = CommonSH('PrimaryDbUser') def test_index(self): self.log.info('--------1.写配置文件-------') self.common.scp_file(self.db_primary_root_node, f"{self.java_name}.java", self.targetpath) result = self.db_primary_root_node.sh( f"touch {self.properties}").result() self.log.info(result) config = f'echo "password=' \ f'{self.db_primary_user_node.db_password}"> {self.properties}' self.db_primary_root_node.sh(config) config = f'echo "port={self.db_primary_user_node.db_port}">> ' \ f'{self.properties}' self.db_primary_root_node.sh(config) config = f'echo "hostname={self.db_primary_user_node.db_host}">> ' \ f'{self.properties}' self.db_primary_root_node.sh(config) config = f'echo "user={self.db_primary_user_node.db_user}">> ' \ f'{self.properties}' self.db_primary_root_node.sh(config) config = f'echo "dbname={self.db_primary_user_node.db_name}">> ' \ f'{self.properties}' self.db_primary_root_node.sh(config) config = f'echo "stringtype=unspecified">> {self.properties}' self.db_primary_root_node.sh(config) config = f'cat {self.properties}' result = self.db_primary_root_node.sh(config).result() self.assertTrue("password=" in result and "port=" in result and "hostname=" in result and "user=" in result and "dbname=" in result) self.log.info('--------------2. 编译java工具------------------') self.db_primary_root_node.scp_put(macro.JDBC_PATH, f"{self.targetpath}/postgresql.jar") cmd = f"javac -encoding utf-8 -cp " \ f"{os.path.join(self.targetpath, 'postgresql.jar')} " \ f"{os.path.join(self.targetpath, f'{self.java_name}.java')}" self.log.info(cmd) result = self.db_primary_root_node.sh(cmd).result() self.log.info(result) self.log.info("---------------3.创建表----------------------") cmd = f"drop table if exists {self.tb_name};" \ f"create table {self.tb_name}(t_time timestamp);" result = self.commonshpri.execut_db_sql(cmd) self.log.info(result) self.assertIn(self.constant.CREATE_TABLE_SUCCESS, result) self.log.info("-------------4.运行java工具---------------------") for index in range(50): self.log.info(f"======round {index}========") today = self.db_primary_root_node.sh( "date '+%Y-%m-%d 00:00:00'").result() yesterday = (datetime.datetime.strptime( today, '%Y-%m-%d 00:00:00') - datetime.timedelta(days=+1) ).strftime('%Y-%m-%d 00:00:00') tomorrow = (datetime.datetime.strptime( today, '%Y-%m-%d 00:00:00') - datetime.timedelta(days=-1) ).strftime('%Y-%m-%d 00:00:00') self.log.info(f"today is {today}, tomorrow is " f"{tomorrow}, yesterday is {yesterday}") cmd = f" java -cp " \ f"{os.path.join(self.targetpath, 'postgresql.jar')}" \ f":{self.targetpath} " \ f"{self.java_name} -F {self.properties}" self.log.info(cmd) insert_thread = [] for i in range(9): insert_thread.append(ComThread( self.common.get_sh_result, args=(self.db_primary_root_node, cmd))) insert_thread[i].setDaemon(True) insert_thread[i].start() time.sleep(2) for i in range(9): insert_thread[i].join(30) result = insert_thread[i].get_result() self.assertNotIn('error', result) cmd = f"select * from {self.tb_name} order by 1 desc;" insert_result = self.commonshpri.execut_db_sql(cmd) self.log.info(insert_result) self.assertIn("(126 rows)", insert_result) self.assertEqual(insert_result.count('infinity'), 36) self.assertEqual(insert_result.count('-infinity'), 18) self.assertEqual(insert_result.count('1970-01-01 00:00:00'), 9) self.assertEqual(insert_result.count('2020-02-29 23:59:59'), 18) result_time = self.db_primary_root_node.sh( "date '+%Y-%m-%d %H:%M:%S'").result() self.log.info(result_time) now = [] now.append((datetime.datetime.strptime( result_time, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(minutes=1) ).strftime('%Y-%m-%d %H:%M')) now.append((datetime.datetime.strptime( result_time, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(minutes=-1) ).strftime('%Y-%m-%d %H:%M')) now.append((datetime.datetime.strptime( result_time, '%Y-%m-%d %H:%M:%S')).strftime('%Y-%m-%d %H:%M')) self.log.info(f"now is {now}") self.assertTrue((insert_result.count(now[0]) + insert_result.count(now[1]) + insert_result.count(now[2])) >= 27) self.assertGreaterEqual(insert_result.count(tomorrow), 9) self.assertGreaterEqual(insert_result.count(yesterday), 9) self.assertGreaterEqual(insert_result.count(today), 9) for line in range(2, 10): self.assertEqual(' ', insert_result.splitlines()[line]) cmd = f"delete from {self.tb_name};" result = self.commonshpri.execut_db_sql(cmd) self.log.info(result) def tearDown(self): self.log.info('------------this is tearDown-------------') self.log.info('------------------清理环境-------------') cmd = f"drop table if exists {self.tb_name};" result = self.commonshpri.execut_db_sql(cmd) self.log.info(result) cmd = f"rm -rf {self.targetpath}" self.log.info(cmd) self.db_primary_root_node.sh(cmd) self.log.info("-Opengauss_Function_JDBC_Set_Get_Object_Case0018 end-")
the-stack_0_14217
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in math_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import pywrap_tensorflow as c_api from tensorflow.python.compat import compat from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import object_identity def _safe_shape_div(x, y): """Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`.""" return x // math_ops.maximum(y, 1) @ops.RegisterGradient("ArgMax") def _ArgMaxGrad(op, grad): del op, grad return [None, None] @ops.RegisterGradient("ArgMin") def _ArgMinGrad(op, grad): del op, grad return [None, None] # TODO(rmlarsen): Implement gradient. ops.NotDifferentiable("EuclideanNorm") def SmartBroadcastGradientArgs(x, y, grad): """Optimized version of `broadcast_gradient_args` that caches results. This implementation avoids creating `broadcast_gradient_args` ops in the case that the input shapes are fully defined, and provides hints to the calling code that can be used to avoid creating reduction and reshaping ops. Args: x: The left input tensor to a broadcasting binary op. y: The right input tensor to a broadcasting binary op. grad: The incoming gradient tensor for a broadcasting binary op. Returns: A pair of tuples, containing: * A 3-tuple of broadcast information for x, containing: * The shape of x (as a tuple or Tensor). * The reduction indices for x (as a tuple or Tensor). * A boolean, which if True, indicates that x's shape differs from grad's shape (and so x's gradient must be reduced and/or reshaped). * A 3-tuple of broadcast information for y, containing the respective details for y. """ # NOTE: It may be productive to apply these optimizations in the eager case # as well. if context.executing_eagerly() or not ( isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor) and isinstance(grad, ops.Tensor)): sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) return (sx, rx, True), (sy, ry, True) # pylint: disable=protected-access x_shape_tuple = x._shape_tuple() y_shape_tuple = y._shape_tuple() grad_shape_tuple = grad._shape_tuple() # pylint: enable=protected-access if (x_shape_tuple is None or None in x_shape_tuple or y_shape_tuple is None or None in y_shape_tuple): sx = array_ops.shape_internal(x, optimize=False) sy = array_ops.shape_internal(y, optimize=False) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) return (sx, rx, True), (sy, ry, True) x_needs_reduction = x_shape_tuple != grad_shape_tuple y_needs_reduction = y_shape_tuple != grad_shape_tuple # Get the default graph rather than relying on `x.graph`, `y.graph`, or # `grad.graph`, because these may be eager tensors. g = ops.get_default_graph() try: rx, ry = g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] # pylint: disable=protected-access return (x_shape_tuple, rx, x_needs_reduction), ( y_shape_tuple, ry, y_needs_reduction) except KeyError: rx, ry = array_ops.broadcast_gradient_args(x_shape_tuple, y_shape_tuple) # TODO(mrry): If this becomes a bottleneck, add a multi-output version of # `TF_TryEvaluateConstant()`. rx_value = tuple(c_api.TF_TryEvaluateConstant_wrapper( rx.graph._c_graph, rx._as_tf_output())) # pylint: disable=protected-access assert rx_value is not None ry_value = tuple(c_api.TF_TryEvaluateConstant_wrapper( ry.graph._c_graph, ry._as_tf_output())) # pylint: disable=protected-access assert ry_value is not None g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] = ( # pylint: disable=protected-access rx_value, ry_value) return (x_shape_tuple, rx_value, x_needs_reduction), ( y_shape_tuple, ry_value, y_needs_reduction) _empty_tuple = () def _IsScalar(x): return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access @ops.RegisterGradient("Sum") def _SumGrad(op, grad): """Gradient for Sum.""" # Fast path for when reducing to a scalar and ndims is known: adds only # Reshape and Tile ops (and possibly a Shape). input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access if input_0_shape is not None: axes = tensor_util.constant_value(op.inputs[1]) if axes is not None: rank = len(input_0_shape) if np.array_equal(axes, np.arange(rank)): # Reduce all dims. if context.executing_eagerly(): ctx = context.context() new_shape = ctx.ones_rank_cache().get(rank) if new_shape is None: new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32) ctx.ones_rank_cache().put(rank, new_shape) else: new_shape = [1] * rank grad = array_ops.reshape(grad, new_shape) # If shape is not fully defined (but rank is), we use Shape. if None not in input_0_shape: input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32) else: input_shape = array_ops.shape(op.inputs[0]) return [array_ops.tile(grad, input_shape), None] elif None not in input_0_shape and not context.executing_eagerly(): # The shape and reduction indices are statically known, so we use a # graph-level cache to avoid recomputing `reduced_shape()` for each # invocation. graph = ops.get_default_graph() # Canonicalize `axes` to be a tuple of indices. The incoming # value may be a scalar or a vector, and may include negative indices. axes = tuple(axes.reshape(-1)) try: output_shape_kept_dims, tile_scaling = graph._reduced_shape_cache[ # pylint: disable=protected-access (input_0_shape, axes)] except KeyError: # Compute and cache `output_shape_kept_dims` and `tile_scaling`. def EvaluateAsTuple(t): value = c_api.TF_TryEvaluateConstant_wrapper( t.graph._c_graph, t._as_tf_output()) # pylint: disable=protected-access assert value is not None return tuple(value) output_shape_kept_dims = EvaluateAsTuple( math_ops.reduced_shape(input_0_shape, axes)) tile_scaling = EvaluateAsTuple( _safe_shape_div(input_0_shape, output_shape_kept_dims)) graph._reduced_shape_cache[(input_0_shape, axes)] = ( # pylint:disable=protected-access output_shape_kept_dims, tile_scaling) grad = array_ops.reshape(grad, output_shape_kept_dims) return [array_ops.tile(grad, tile_scaling), None] input_shape = array_ops.shape(op.inputs[0]) # TODO(apassos) remove this once device placement for eager ops makes more # sense. with ops.colocate_with(input_shape): output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) return [array_ops.tile(grad, tile_scaling), None] def _MinOrMaxGrad(op, grad): """Gradient for Min or Max. Amazingly it's precisely the same code.""" input_shape = array_ops.shape(op.inputs[0]) output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) y = op.outputs[0] y = array_ops.reshape(y, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) # Compute the number of selected (maximum or minimum) elements in each # reduction dimension. If there are multiple minimum or maximum elements # then the gradient will be divided between them. indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype) num_selected = array_ops.reshape( math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims) return [math_ops.divide(indicators, num_selected) * grad, None] @ops.RegisterGradient("Max") def _MaxGrad(op, grad): """Gradient for Max.""" return _MinOrMaxGrad(op, grad) @ops.RegisterGradient("Min") def _MinGrad(op, grad): return _MinOrMaxGrad(op, grad) @ops.RegisterGradient("Mean") def _MeanGrad(op, grad): """Gradient for Mean.""" sum_grad = _SumGrad(op, grad)[0] input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access if (input_shape is not None and output_shape is not None and None not in input_shape and None not in output_shape): input_size = np.prod(input_shape) output_size = np.prod(output_shape) factor = input_size // max(output_size, 1) factor = constant_op.constant(factor, dtype=sum_grad.dtype) else: input_shape = array_ops.shape(op.inputs[0]) output_shape = array_ops.shape(op.outputs[0]) factor = _safe_shape_div( math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape)) return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None @ops.RegisterGradient("Prod") def _ProdGrad(op, grad): """Gradient for Prod.""" # The gradient can be expressed by dividing the product by each entry of the # input tensor, but this approach can't deal with zeros in the input. # Here, we avoid this problem by composing the output as a product of two # cumprod operations. input_shape = array_ops.shape(op.inputs[0]) # Reshape reduction indices for the case where the parameter is a scalar reduction_indices = array_ops.reshape(op.inputs[1], [-1]) # Expand grad to full input shape output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1]) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = array_ops.reshape(grad, output_shape_kept_dims) grad = array_ops.tile(grad, tile_scaling) # Pack all reduced dimensions into a single one, so we can perform the # cumprod ops. If the reduction dims list is empty, it defaults to float32, # so we need to cast here. We put all the shape-related ops on CPU to avoid # copying back and forth, and since listdiff is CPU only. with ops.device("/cpu:0"): rank = array_ops.rank(op.inputs[0]) reduction_indices = (reduction_indices + rank) % rank reduced = math_ops.cast(reduction_indices, dtypes.int32) idx = math_ops.range(0, rank) other, _ = array_ops.setdiff1d(idx, reduced) perm = array_ops.concat([reduced, other], 0) reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced)) other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other)) permuted = array_ops.transpose(op.inputs[0], perm) permuted_shape = array_ops.shape(permuted) reshaped = array_ops.reshape(permuted, (reduced_num, other_num)) # Calculate product, leaving out the current entry left = math_ops.cumprod(reshaped, axis=0, exclusive=True) right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True) # For complex inputs, the gradient is in the conjugate direction. y = array_ops.reshape( math_ops.conj(left) * math_ops.conj(right), permuted_shape) # Invert the transpose and reshape operations. # Make sure to set the statically known shape information through a reshape. out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm)) return array_ops.reshape(out, input_shape), None @ops.RegisterGradient("SegmentSum") def _SegmentSumGrad(op, grad): """Gradient for SegmentSum.""" return array_ops.gather(grad, op.inputs[1]), None @ops.RegisterGradient("SegmentMean") def _SegmentMeanGrad(op, grad): """Gradient for SegmentMean.""" input_rank = array_ops.rank(op.inputs[0]) ones_shape = array_ops.concat([ array_ops.shape(op.inputs[1]), array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1) ], 0) ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype)) scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1])) return array_ops.gather(scaled_grad, op.inputs[1]), None @ops.RegisterGradient("SparseSegmentSum") def _SparseSegmentSumGrad(op, grad): """Gradient for SparseSegmentSum.""" input_rows = array_ops.shape(op.inputs[0])[0] return (math_ops.unsorted_segment_sum( array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None, None) @ops.RegisterGradient("SparseSegmentSumWithNumSegments") def _SparseSegmentSumWithNumSegmentsGrad(op, grad): """Gradient for SparseSegmentSumWithNumSegments.""" input_rows = array_ops.shape(op.inputs[0])[0] return (math_ops.unsorted_segment_sum( array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None, None, None) @ops.RegisterGradient("SparseSegmentMean") def _SparseSegmentMeanGrad(op, grad): """Gradient for SparseSegmentMean.""" dim0 = array_ops.shape(op.inputs[0])[0] return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None) @ops.RegisterGradient("SparseSegmentMeanWithNumSegments") def _SparseSegmentMeanWithNumSegmentsGrad(op, grad): """Gradient for SparseSegmentMeanWithNumSegments.""" dim0 = array_ops.shape(op.inputs[0])[0] return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None, None) @ops.RegisterGradient("SparseSegmentSqrtN") def _SparseSegmentSqrtNGrad(op, grad): """Gradient for SparseSegmentSqrtN.""" dim0 = array_ops.shape(op.inputs[0])[0] return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None) @ops.RegisterGradient("SparseSegmentSqrtNWithNumSegments") def _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad): """Gradient for SparseSegmentSqrtNWithNumSegments.""" dim0 = array_ops.shape(op.inputs[0])[0] return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None, None) def _SegmentMinOrMaxGrad(op, grad): """ Gradient for SegmentMin and SegmentMax. """ zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype) # Get the number of selected (minimum or maximum) elements in each segment. gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1]) is_selected = math_ops.equal(op.inputs[0], gathered_outputs) num_selected = math_ops.segment_sum( math_ops.cast(is_selected, grad.dtype), op.inputs[1]) # Compute the gradient for each segment. The gradient for the ith segment is # divided evenly among the selected elements in that segment. weighted_grads = math_ops.divide(grad, num_selected) gathered_grads = array_ops.gather(weighted_grads, op.inputs[1]) return array_ops.where(is_selected, gathered_grads, zeros), None @ops.RegisterGradient("SegmentMin") def _SegmentMinGrad(op, grad): """Gradient for SegmentMin.""" return _SegmentMinOrMaxGrad(op, grad) @ops.RegisterGradient("SegmentMax") def _SegmentMaxGrad(op, grad): """Gradient for SegmentMax.""" return _SegmentMinOrMaxGrad(op, grad) def _GatherDropNegatives(params, ids, zero_clipped_indices=None, is_positive=None): """ Helper function for unsorted segment ops. Gathers params for positive segment ids and gathers 0 for inputs with negative segment id. Also returns the clipped indices and a boolean mask with the same shape as ids where a positive id is masked as true. With this, the latter two can be passed as arguments to this function to reuse them. """ if zero_clipped_indices is None: zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids)) gathered = array_ops.gather(params, zero_clipped_indices) if is_positive is None: is_positive = math_ops.greater_equal(ids, 0) # tf.where(condition, x, y) requires condition to have the same shape as x # and y. # todo(philjd): remove this if tf.where supports broadcasting (#9284) for _ in range(gathered.shape.ndims - is_positive.shape.ndims): is_positive = array_ops.expand_dims(is_positive, -1) is_positive = ( is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool)) # replace gathered params of negative indices with 0 zero_slice = array_ops.zeros_like(gathered) return (array_ops.where(is_positive, gathered, zero_slice), zero_clipped_indices, is_positive) def _UnsortedSegmentMinOrMaxGrad(op, grad): """ Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """ # Get the number of selected (minimum or maximum) elements in each segment. gathered_outputs, zero_clipped_indices, is_positive = \ _GatherDropNegatives(op.outputs[0], op.inputs[1]) is_selected = math_ops.equal(op.inputs[0], gathered_outputs) is_selected = math_ops.logical_and(is_selected, is_positive) num_selected = math_ops.unsorted_segment_sum( math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2]) # Compute the gradient for each segment. The gradient for the ith segment is # divided evenly among the selected elements in that segment. weighted_grads = math_ops.divide(grad, num_selected) gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None, zero_clipped_indices, is_positive) zeros = array_ops.zeros_like(gathered_grads) return array_ops.where(is_selected, gathered_grads, zeros), None, None @ops.RegisterGradient("UnsortedSegmentSum") def _UnsortedSegmentSumGrad(op, grad): """Gradient for UnsortedSegmentSum.""" return _GatherDropNegatives(grad, op.inputs[1])[0], None, None @ops.RegisterGradient("UnsortedSegmentMax") def _UnsortedSegmentMaxGrad(op, grad): """ Gradient for UnsortedSegmentMax. """ return _UnsortedSegmentMinOrMaxGrad(op, grad) @ops.RegisterGradient("UnsortedSegmentMin") def _UnsortedSegmentMinGrad(op, grad): """ Gradient for UnsortedSegmentMin. """ return _UnsortedSegmentMinOrMaxGrad(op, grad) @ops.RegisterGradient("UnsortedSegmentProd") def _UnsortedSegmentProdGrad(op, grad): """ Gradient for UnsortedSegmentProd. The gradient can be expressed for each segment by dividing the segment's product by each element of the segment input tensor, but this approach can't deal with zeros in the input. Unlike reduce_prod we can't use cumsum here as individual segments may have a different number of elements. Therefore we consider three cases: 1) A segment input contains no zeros and we can safely divide by the input tensor. 2) A segment contains exactly one zero. Then the gradient of each input of the segment is zero except for the 0-input, there the gradient is the product of the remaining segment entries. 3) A segment contains at least two zeros. The gradient is zero for all segment inputs. """ # Note that unsorted_segment_sum will filter out the negative indices, # so we don't need to do a logical_and with is_positive here is_zero = math_ops.equal(op.inputs[0], 0) num_zeros = gen_math_ops.unsorted_segment_sum( math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2]) # handle case 3 and set the gradient to 0 for segments with more than one # 0 as input grad = array_ops.where( math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad) # replace all zeros with ones and compute the unsorted_segment_prod non_zero_data = array_ops.where(is_zero, array_ops.ones_like(op.inputs[0]), op.inputs[0]) non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data, op.inputs[1], op.inputs[2]) # clip the indices for gather to be positive zero_clipped_indices = math_ops.maximum(op.inputs[1], array_ops.zeros_like(op.inputs[1])) gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices) gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices) prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf. # Now fetch the individual results for segments containing 0 and those that # don't. is_zero will also fetch results for entries with negative index # but the following gather_drop_negatives sets the corresponding entry in # grad to 0 for these partial_derivative = array_ops.where(is_zero, gathered_non_zero_prod, prod_divided_by_el) gathered_grad = _GatherDropNegatives(grad, op.inputs[1], zero_clipped_indices)[0] return gathered_grad * partial_derivative, None, None @ops.RegisterGradient("Abs") def _AbsGrad(op, grad): x = op.inputs[0] return grad * math_ops.sign(x) @ops.RegisterGradient("Neg") def _NegGrad(_, grad): """Returns -grad.""" return -grad @ops.RegisterGradient("Inv") def _InvGrad(op, grad): """Returns -grad * (1 / x^2).""" y = op.outputs[0] # y = 1 / x return gen_math_ops.reciprocal_grad(y, grad) @ops.RegisterGradient("Reciprocal") def _ReciprocalGrad(op, grad): """Returns -grad * (1 / x^2).""" y = op.outputs[0] # y = 1 / x return gen_math_ops.reciprocal_grad(y, grad) @ops.RegisterGradient("InvGrad") def _InvGradGrad(op, grad): b = op.inputs[1] # op.output[0]: y = -b * conj(a)^2 with ops.control_dependencies([grad]): ca = math_ops.conj(op.inputs[0]) cg = math_ops.conj(grad) return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad) @ops.RegisterGradient("ReciprocalGrad") def _ReciprocalGradGrad(op, grad): b = op.inputs[1] # op.output[0]: y = -b * conj(a)^2 with ops.control_dependencies([grad]): ca = math_ops.conj(op.inputs[0]) cg = math_ops.conj(grad) return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad) @ops.RegisterGradient("Square") def _SquareGrad(op, grad): x = op.inputs[0] # Added control dependencies to prevent 2*x from being computed too early. with ops.control_dependencies([grad]): x = math_ops.conj(x) y = constant_op.constant(2.0, dtype=x.dtype) return math_ops.multiply(grad, math_ops.multiply(x, y)) @ops.RegisterGradient("Sqrt") def _SqrtGrad(op, grad): y = op.outputs[0] # y = x^(1/2) return gen_math_ops.sqrt_grad(y, grad) @ops.RegisterGradient("SqrtGrad") def _SqrtGradGrad(op, grad): a = op.inputs[0] y = op.outputs[0] # y = 0.5 * b / conj(a) with ops.control_dependencies([grad]): if compat.forward_compatible(2019, 9, 14): ga = gen_math_ops.xdivy(grad, a) return -gen_math_ops.mul_no_nan(y, math_ops.conj(ga)), 0.5 * ga else: ga = grad / a return -math_ops.conj(ga) * y, 0.5 * ga @ops.RegisterGradient("Rsqrt") def _RsqrtGrad(op, grad): """Returns -0.5 * grad * conj(y)^3.""" y = op.outputs[0] # y = x^(-1/2) return gen_math_ops.rsqrt_grad(y, grad) @ops.RegisterGradient("RsqrtGrad") def _RsqrtGradGrad(op, grad): """Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3.""" a = op.inputs[0] # a = x^{-1/2} b = op.inputs[1] # backprop gradient for a with ops.control_dependencies([grad]): ca = math_ops.conj(a) cg = math_ops.conj(grad) grad_a = -1.5 * cg * b * math_ops.square(ca) grad_b = gen_math_ops.rsqrt_grad(ca, grad) return grad_a, grad_b @ops.RegisterGradient("Exp") def _ExpGrad(op, grad): """Returns grad * exp(x).""" y = op.outputs[0] # y = e^x with ops.control_dependencies([grad]): y = math_ops.conj(y) if compat.forward_compatible(2019, 9, 14): return math_ops.mul_no_nan(y, grad) else: return grad * y @ops.RegisterGradient("Expm1") def _Expm1Grad(op, grad): """Returns grad * exp(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) y = math_ops.exp(x) if compat.forward_compatible(2019, 9, 14): return math_ops.mul_no_nan(y, grad) else: return grad * y @ops.RegisterGradient("Log") def _LogGrad(op, grad): """Returns grad * (1/x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) if compat.forward_compatible(2019, 9, 14): return gen_math_ops.xdivy(grad, x) else: return grad * math_ops.reciprocal(x) @ops.RegisterGradient("Log1p") def _Log1pGrad(op, grad): """Returns grad * (1/(1 + x)).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) if compat.forward_compatible(2019, 9, 14): return gen_math_ops.xdivy(grad, 1 + x) else: return grad * math_ops.reciprocal(1 + x) @ops.RegisterGradient("Xlogy") def _XLogyGrad(op, grad): """Returns gradient of xlogy(x, y) with respect to x and y.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) with ops.control_dependencies([grad]): not_zero_x = math_ops.cast( math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype) partial_x = gen_math_ops.xlogy(not_zero_x, y) partial_y = gen_math_ops.xdivy(x, y) return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy)) @ops.RegisterGradient("Xdivy") def _XDivyGrad(op, grad): """Returns gradient of xdivy(x, y) with respect to x and y.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) with ops.control_dependencies([grad]): not_zero_x = math_ops.cast( math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype) partial_x = gen_math_ops.xdivy(not_zero_x, y) partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2) return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx), array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy)) @ops.RegisterGradient("Sinh") def _SinhGrad(op, grad): """Returns grad * cosh(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) return grad * math_ops.cosh(x) @ops.RegisterGradient("Cosh") def _CoshGrad(op, grad): """Returns grad * sinh(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) return grad * math_ops.sinh(x) @ops.RegisterGradient("Tanh") def _TanhGrad(op, grad): """Returns grad * (1 - tanh(x) * tanh(x)).""" y = op.outputs[0] # y = tanh(x) with ops.control_dependencies([grad]): y = math_ops.conj(y) return gen_math_ops.tanh_grad(y, grad) @ops.RegisterGradient("Asinh") def _AsinhGrad(op, grad): """Returns grad * 1/cosh(y).""" y = op.outputs[0] with ops.control_dependencies([grad]): y = math_ops.conj(y) return grad / math_ops.cosh(y) @ops.RegisterGradient("Acosh") def _AcoshGrad(op, grad): """Returns grad * 1/sinh(y).""" y = op.outputs[0] with ops.control_dependencies([grad]): y = math_ops.conj(y) if compat.forward_compatible(2019, 9, 14): return math_ops.xdivy(grad, math_ops.sinh(y)) else: return grad / math_ops.sinh(y) @ops.RegisterGradient("Atanh") def _AtanhGrad(op, grad): """Returns grad * 1/ (1 - x^2).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) inv = math_ops.reciprocal(math_ops.subtract(one, x2)) return grad * inv @ops.RegisterGradient("TanhGrad") def _TanhGradGrad(op, grad): with ops.control_dependencies([grad]): a = math_ops.conj(op.inputs[0]) b = math_ops.conj(op.inputs[1]) return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad) @ops.RegisterGradient("Erf") def _ErfGrad(op, grad): """Returns grad * 2/sqrt(pi) * exp(-x**2).""" x = op.inputs[0] two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype) with ops.control_dependencies([grad]): x = math_ops.conj(x) return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x)) @ops.RegisterGradient("Erfc") def _ErfcGrad(op, grad): """Returns -grad * 2/sqrt(pi) * exp(-x**2).""" x = op.inputs[0] minus_two_over_root_pi = constant_op.constant( -2 / np.sqrt(np.pi), dtype=grad.dtype) with ops.control_dependencies([grad]): x = math_ops.conj(x) return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x)) @ops.RegisterGradient("Lgamma") def _LgammaGrad(op, grad): """Returns grad * digamma(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) if compat.forward_compatible(2019, 9, 14): return math_ops.mul_no_nan(math_ops.digamma(x), grad) else: return grad * math_ops.digamma(x) @ops.RegisterGradient("Digamma") def _DigammaGrad(op, grad): """Compute gradient of the digamma function with respect to its argument.""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x) if compat.forward_compatible(2019, 9, 14): return math_ops.mul_no_nan(partial_x, grad) else: return grad * partial_x @ops.RegisterGradient("BesselI0e") def _BesselI0eGrad(op, grad): """Compute gradient of bessel_i0e(x) with respect to its argument.""" x = op.inputs[0] y = op.outputs[0] with ops.control_dependencies([grad]): partial_x = (math_ops.bessel_i1e(x) - math_ops.sign(x) * y) if compat.forward_compatible(2019, 9, 14): return math_ops.mul_no_nan(partial_x, grad) else: return grad * partial_x @ops.RegisterGradient("BesselI1e") def _BesselI1eGrad(op, grad): """Compute gradient of bessel_i1e(x) with respect to its argument.""" x = op.inputs[0] y = op.outputs[0] with ops.control_dependencies([grad]): # For x = 0, the correct gradient is 0.5. # However, the main branch gives NaN because of the division by x, so # we impute the gradient manually. # An alternative solution is to express the gradient via bessel_i0e and # bessel_i2e, but the latter is not yet implemented in Eigen. eps = np.finfo(x.dtype.as_numpy_dtype).eps zeros = array_ops.zeros_like(x) x_is_not_tiny = math_ops.abs(x) > eps safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros) dy_dx = math_ops.bessel_i0e(safe_x) - y * ( math_ops.sign(safe_x) + math_ops.reciprocal(safe_x)) dy_dx = array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros) if compat.forward_compatible(2019, 9, 14): return math_ops.mul_no_nan(dy_dx, grad) else: return grad * dy_dx @ops.RegisterGradient("Igamma") def _IgammaGrad(op, grad): """Returns gradient of igamma(a, x) with respect to a and x.""" a = op.inputs[0] x = op.inputs[1] sa = array_ops.shape(a) sx = array_ops.shape(x) ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx) with ops.control_dependencies([grad]): partial_a = gen_math_ops.igamma_grad_a(a, x) # Perform operations in log space before summing, because Gamma(a) # and Gamma'(a) can grow large. partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) - math_ops.lgamma(a)) if compat.forward_compatible(2019, 9, 14): return (array_ops.reshape( math_ops.reduce_sum(math_ops.mul_no_nan(partial_a, grad), ra), sa), array_ops.reshape( math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx)) else: return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa), array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) @ops.RegisterGradient("Igammac") def _IgammacGrad(op, grad): """Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x.""" igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad) return (-igamma_grad_a, -igamma_grad_x) @ops.RegisterGradient("Betainc") def _BetaincGrad(op, grad): """Returns gradient of betainc(a, b, x) with respect to x.""" # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b a, b, x = op.inputs # two cases: x is a scalar and a/b are same-shaped tensors, or vice # versa; so its sufficient to check against shape(a). sa = array_ops.shape(a) sx = array_ops.shape(x) _, rx = gen_array_ops.broadcast_gradient_args(sa, sx) # Perform operations in log space before summing, because terms # can grow large. log_beta = ( gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) - gen_math_ops.lgamma(a + b)) partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) + (a - 1) * math_ops.log(x) - log_beta) # TODO(b/36815900): Mark None return values as NotImplemented if compat.forward_compatible(2019, 9, 14): return ( None, # da None, # db array_ops.reshape( math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx)) else: return ( None, # da None, # db array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) @ops.RegisterGradient("Zeta") def _ZetaGrad(op, grad): """Returns gradient of zeta(x, q) with respect to x and q.""" # TODO(tillahoffmann): Add derivative with respect to x x = op.inputs[0] q = op.inputs[1] # Broadcast gradients sx = array_ops.shape(x) sq = array_ops.shape(q) unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq) # Evaluate gradient with ops.control_dependencies([grad]): x = math_ops.conj(x) q = math_ops.conj(q) partial_q = -x * math_ops.zeta(x + 1, q) # TODO(b/36815900): Mark None return values as NotImplemented if compat.forward_compatible(2019, 9, 14): return (None, array_ops.reshape( math_ops.reduce_sum(math_ops.mul_no_nan(partial_q, grad), rq), sq)) else: return (None, array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq)) @ops.RegisterGradient("Polygamma") def _PolygammaGrad(op, grad): """Returns gradient of psi(n, x) with respect to n and x.""" # TODO(tillahoffmann): Add derivative with respect to n n = op.inputs[0] x = op.inputs[1] # Broadcast gradients sn = array_ops.shape(n) sx = array_ops.shape(x) unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx) # Evaluate gradient with ops.control_dependencies([grad]): n = math_ops.conj(n) x = math_ops.conj(x) partial_x = math_ops.polygamma(n + 1, x) # TODO(b/36815900): Mark None return values as NotImplemented if compat.forward_compatible(2019, 9, 14): return (None, array_ops.reshape( math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx)) else: return (None, array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx)) @ops.RegisterGradient("Sigmoid") def _SigmoidGrad(op, grad): """Returns grad * sigmoid(x) * (1 - sigmoid(x)).""" y = op.outputs[0] # y = sigmoid(x) with ops.control_dependencies([grad]): y = math_ops.conj(y) return gen_math_ops.sigmoid_grad(y, grad) @ops.RegisterGradient("SigmoidGrad") def _SigmoidGradGrad(op, grad): with ops.control_dependencies([grad]): a = math_ops.conj(op.inputs[0]) b = math_ops.conj(op.inputs[1]) gb = grad * b return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad) @ops.RegisterGradient("Sign") def _SignGrad(op, _): """Returns 0.""" x = op.inputs[0] return array_ops.zeros(array_ops.shape(x), dtype=x.dtype) @ops.RegisterGradient("Sin") def _SinGrad(op, grad): """Returns grad * cos(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) return grad * math_ops.cos(x) @ops.RegisterGradient("Cos") def _CosGrad(op, grad): """Returns grad * -sin(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) return -grad * math_ops.sin(x) @ops.RegisterGradient("Tan") def _TanGrad(op, grad): """Returns grad * 1/sec^2(x).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) secx = math_ops.reciprocal(math_ops.cos(x)) secx2 = math_ops.square(secx) if compat.forward_compatible(2019, 9, 14): return math_ops.mul_no_nan(secx2, grad) else: return secx2 * grad @ops.RegisterGradient("Asin") def _AsinGrad(op, grad): """Returns grad * 1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) if compat.forward_compatible(2019, 9, 14): return math_ops.xdivy(grad, den) else: inv = math_ops.reciprocal(den) return grad * inv @ops.RegisterGradient("Acos") def _AcosGrad(op, grad): """Returns grad * -1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) if compat.forward_compatible(2019, 9, 14): return -math_ops.xdivy(grad, den) else: inv = math_ops.reciprocal(den) return -grad * inv @ops.RegisterGradient("Atan") def _AtanGrad(op, grad): """Returns grad * 1/ (1 + x^2).""" x = op.inputs[0] with ops.control_dependencies([grad]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) inv = math_ops.reciprocal(math_ops.add(one, x2)) return grad * inv @ops.RegisterGradient("Atan2") def _Atan2Grad(op, grad): """Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2).""" y = op.inputs[0] x = op.inputs[1] with ops.control_dependencies([grad]): if compat.forward_compatible(2019, 9, 14): grad_inv = math_ops.xdivy(grad, (math_ops.square(x) + math_ops.square(y))) else: grad_inv = grad / (math_ops.square(x) + math_ops.square(y)) return x * grad_inv, -y * grad_inv @ops.RegisterGradient("AddN") def _AddNGrad(op, grad): """Copies the gradient to all inputs.""" # Not broadcasting. return [grad] * len(op.inputs) def _ShapesFullySpecifiedAndEqual(x, y, grad): # pylint: disable=protected-access x_shape = x._shape_tuple() y_shape = y._shape_tuple() grad_shape = grad._shape_tuple() # pylint: enable=protected-access return (x_shape == y_shape and x_shape == grad_shape and x_shape is not None and None not in x_shape) @ops.RegisterGradient("Add") @ops.RegisterGradient("AddV2") def _AddGrad(op, grad): """Gradient for Add.""" y = op.inputs[1] skip_input_indices = None try: skip_input_indices = op.skip_input_indices if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar( y): return grad, None except AttributeError: # No gradient skipping, so do the full gradient computation pass x = op.inputs[0] if (isinstance(grad, ops.Tensor) and _ShapesFullySpecifiedAndEqual(x, y, grad)): return grad, grad (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = ( SmartBroadcastGradientArgs(x, y, grad)) if skip_input_indices is not None and 0 in skip_input_indices: gx = None elif not must_reduce_x: gx = grad else: gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx) if skip_input_indices is not None and 1 in skip_input_indices: gy = None elif not must_reduce_y: gy = grad else: gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy) return (gx, gy) @ops.RegisterGradient("Sub") def _SubGrad(op, grad): """Gradient for Sub.""" y = op.inputs[1] skip_input_indices = None try: skip_input_indices = op.skip_input_indices if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar( y): return grad, None except AttributeError: # No gradient skipping, so do the full gradient computation pass x = op.inputs[0] if (isinstance(grad, ops.Tensor) and _ShapesFullySpecifiedAndEqual(x, y, grad)): return grad, -grad (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = ( SmartBroadcastGradientArgs(x, y, grad)) if skip_input_indices is not None and 0 in skip_input_indices: gx = None elif not must_reduce_x: gx = grad else: gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx) if skip_input_indices is not None and 1 in skip_input_indices: gy = None elif not must_reduce_y: gy = -grad else: gy = array_ops.reshape(math_ops.reduce_sum(-grad, ry), sy) return (gx, gy) @ops.RegisterGradient("Mul") def _MulGrad(op, grad): """The gradient of scalar multiplication.""" y = op.inputs[1] skip_input_indices = None try: skip_input_indices = op.skip_input_indices if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar( y): return gen_math_ops.mul(grad, math_ops.conj(y)), None except AttributeError: # No gradient skipping, so do the full gradient computation pass x = op.inputs[0] if (isinstance(grad, ops.Tensor) and _ShapesFullySpecifiedAndEqual(x, y, grad) and grad.dtype in (dtypes.int32, dtypes.float32)): return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x) assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype) (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = ( SmartBroadcastGradientArgs(x, y, grad)) x = math_ops.conj(x) y = math_ops.conj(y) if skip_input_indices is not None and 0 in skip_input_indices: gx = None elif not must_reduce_x: gx = gen_math_ops.mul(grad, y) else: gx = array_ops.reshape( math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx) if skip_input_indices is not None and 1 in skip_input_indices: gy = None elif not must_reduce_y: gy = gen_math_ops.mul(x, grad) else: gy = array_ops.reshape( math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy) return (gx, gy) @ops.RegisterGradient("MulNoNan") def _MulNoNanGrad(op, grad): """The gradient of scalar multiplication with NaN-suppression.""" x = op.inputs[0] y = op.inputs[1] if (isinstance(grad, ops.Tensor) and _ShapesFullySpecifiedAndEqual(x, y, grad)): return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad) assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype) sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) return (array_ops.reshape( math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx), array_ops.reshape( math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy)) @ops.RegisterGradient("Div") def _DivGrad(op, grad): """The gradient for the Div operator.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) if compat.forward_compatible(2019, 9, 14): return (array_ops.reshape( math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx), array_ops.reshape( math_ops.reduce_sum( math_ops.mul_no_nan( math_ops.divide(math_ops.divide(-x, y), y), grad), ry), sy)) else: return (array_ops.reshape( math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx), array_ops.reshape( math_ops.reduce_sum( grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy)) @ops.RegisterGradient("FloorDiv") def _FloorDivGrad(_, unused_grad): """The gradient for the FloorDiv operator.""" return None, None @ops.RegisterGradient("FloorMod") def _FloorModGrad(op, grad): """Returns grad * (1, -floor(x/y)).""" x = math_ops.conj(op.inputs[0]) y = math_ops.conj(op.inputs[1]) sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) floor_xy = math_ops.floor_div(x, y) gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx) gy = array_ops.reshape( math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy) return gx, gy @ops.RegisterGradient("TruncateDiv") def _TruncateDivGrad(_, unused_grad): return None, None @ops.RegisterGradient("RealDiv") def _RealDivGrad(op, grad): """RealDiv op gradient.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) if compat.forward_compatible(2019, 9, 14): return (array_ops.reshape( math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx), array_ops.reshape( math_ops.reduce_sum( math_ops.mul_no_nan( math_ops.realdiv(math_ops.realdiv(-x, y), y), grad), ry), sy)) else: return (array_ops.reshape( math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx), array_ops.reshape( math_ops.reduce_sum( grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry), sy)) @ops.RegisterGradient("DivNoNan") def _DivNoNanGrad(op, grad): """DivNoNan op gradient.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) x = math_ops.conj(x) y = math_ops.conj(y) if compat.forward_compatible(2019, 9, 14): return (array_ops.reshape( math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx), array_ops.reshape( math_ops.reduce_sum( math_ops.mul_no_nan( math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y), grad), ry), sy)) else: return (array_ops.reshape( math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx), array_ops.reshape( math_ops.reduce_sum( grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y), ry), sy)) @ops.RegisterGradient("Pow") def _PowGrad(op, grad): """Returns grad * (y*x^(y-1), z*log(x)).""" x = op.inputs[0] y = op.inputs[1] use_mul_no_nan = compat.forward_compatible(2019, 9, 14) skip_input_indices = None try: skip_input_indices = op.skip_input_indices # TODO(mrry): If `y` is a constant, we can combine `tf.sub()` and the # constant `1` into a single constant op. if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar( y): x = math_ops.conj(x) y = math_ops.conj(y) if use_mul_no_nan: return gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad), None else: return grad * y * math_ops.pow(x, y - 1), None except AttributeError: # No gradient skipping, so do the full gradient computation pass (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = ( SmartBroadcastGradientArgs(x, y, grad)) x = math_ops.conj(x) y = math_ops.conj(y) if skip_input_indices is None or 0 not in skip_input_indices: if use_mul_no_nan: gx = gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad) else: gx = grad * y * math_ops.pow(x, y - 1) if must_reduce_x: gx = array_ops.reshape(math_ops.reduce_sum(gx, rx), sx) else: gx = None if skip_input_indices is None or 1 not in skip_input_indices: z = math_ops.conj(op.outputs[0]) # Avoid false singularity at x = 0 if x.dtype.is_complex: # real(x) < 0 is fine for the complex case mask = math_ops.not_equal(x, 0) else: # There's no sensible real value to return if x < 0, so return 0 mask = x > 0 safe_x = array_ops.where(mask, x, array_ops.ones_like(x)) log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x)) if use_mul_no_nan: gy = gen_math_ops.mul_no_nan(z * log_x, grad) else: gy = grad * z * log_x if must_reduce_y: gy = array_ops.reshape(math_ops.reduce_sum(gy, ry), sy) else: gy = None return gx, gy def _MaximumMinimumGradInputOnly(op, grad, selector_op): x = op.inputs[0] y = op.inputs[1] zeros = array_ops.zeros_like(grad) xmask = selector_op(x, y) xgrad = array_ops.where(xmask, grad, zeros) ygrad = None # Return None for ygrad since the config allows that. return (xgrad, ygrad) def _MaximumMinimumGrad(op, grad, selector_op): """Factor out the code for the gradient of Maximum or Minimum.""" y = op.inputs[1] skip_input_indices = None try: skip_input_indices = op.skip_input_indices if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar( y): # When we want to get gradients for the first input only, and the second # input tensor is a scalar, we can do a much simpler calculation return _MaximumMinimumGradInputOnly(op, grad, selector_op) except AttributeError: # No gradient skipping, so do the full gradient computation pass x = op.inputs[0] gdtype = grad.dtype sx = array_ops.shape(x) sy = array_ops.shape(y) gradshape = array_ops.shape(grad) zeros = array_ops.zeros(gradshape, gdtype) xmask = selector_op(x, y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) if skip_input_indices is not None and 0 in skip_input_indices: gx = None else: xgrad = array_ops.where(xmask, grad, zeros) gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx) if skip_input_indices is not None and 1 in skip_input_indices: gy = None else: ygrad = array_ops.where(xmask, zeros, grad) gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy) return (gx, gy) @ops.RegisterGradient("Maximum") def _MaximumGrad(op, grad): """Returns grad*(x > y, x <= y) with type of grad.""" return _MaximumMinimumGrad(op, grad, math_ops.greater_equal) @ops.RegisterGradient("Minimum") def _MinimumGrad(op, grad): """Returns grad*(x < y, x >= y) with type of grad.""" return _MaximumMinimumGrad(op, grad, math_ops.less_equal) @ops.RegisterGradient("SquaredDifference") def _SquaredDifferenceGrad(op, grad): """Returns the gradient for (x-y)^2.""" x = op.inputs[0] y = op.inputs[1] skip_input_indices = None try: skip_input_indices = op.skip_input_indices except AttributeError: # No gradient skipping, so do the full gradient computation pass with ops.control_dependencies([grad]): # The parens ensure that if grad is IndexedSlices, it'll get multiplied by # Tensor (not a number like 2.0) which causes it to convert to Tensor. x_grad = math_ops.scalar_mul(2.0, grad) * (x - y) if (isinstance(grad, ops.Tensor) and _ShapesFullySpecifiedAndEqual(x, y, grad)): return x_grad, -x_grad (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = ( SmartBroadcastGradientArgs(x, y, grad)) if skip_input_indices is not None and 0 in skip_input_indices: gx = None elif must_reduce_x: gx = array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx) else: gx = x_grad if skip_input_indices is not None and 1 in skip_input_indices: gy = None elif must_reduce_y: gy = -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy) else: gy = -x_grad return (gx, gy) # Logical operations have no gradients. ops.NotDifferentiable("Less") ops.NotDifferentiable("LessEqual") ops.NotDifferentiable("Greater") ops.NotDifferentiable("GreaterEqual") ops.NotDifferentiable("Equal") ops.NotDifferentiable("ApproximateEqual") ops.NotDifferentiable("NotEqual") ops.NotDifferentiable("LogicalAnd") ops.NotDifferentiable("LogicalOr") ops.NotDifferentiable("LogicalNot") @ops.RegisterGradient("Select") def _SelectGrad(op, grad): c = op.inputs[0] x = op.inputs[1] zeros = array_ops.zeros_like(x) return (None, array_ops.where(c, grad, zeros), array_ops.where( c, zeros, grad)) @ops.RegisterGradient("SelectV2") def _SelectGradV2(op, grad): c = op.inputs[0] x = op.inputs[1] y = op.inputs[2] zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype) gx = array_ops.where_v2(c, grad, zeros) x_shape = array_ops.shape(x) output_shape = array_ops.shape(op.outputs[0]) # Reduce away broadcasted leading dims. reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape) gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x) gx = array_ops.reshape(gx, x_shape) gy = array_ops.where_v2(c, zeros, grad) y_shape = array_ops.shape(y) # Reduce away broadcasted leading dims. reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape) gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y) gy = array_ops.reshape(gy, y_shape) return (None, gx, gy) def _MatMulGradAgainstFirstOnly(op, grad): """Gradient for MatMul, only for the first input.""" t_a = op.get_attr("transpose_a") t_b = op.get_attr("transpose_b") b = math_ops.conj(op.inputs[1]) if not t_a and not t_b: grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True) elif not t_a and t_b: grad_a = gen_math_ops.mat_mul(grad, b) elif t_a and not t_b: grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True) elif t_a and t_b: grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True) return grad_a, None def _MatMulGradAgainstSecondOnly(op, grad): """Gradient for MatMul, only for the second input.""" t_a = op.get_attr("transpose_a") t_b = op.get_attr("transpose_b") a = math_ops.conj(op.inputs[0]) if not t_a and not t_b: grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True) elif not t_a and t_b: grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True) elif t_a and not t_b: grad_b = gen_math_ops.mat_mul(a, grad) elif t_a and t_b: grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True) return None, grad_b @ops.RegisterGradient("MatMul") def _MatMulGrad(op, grad): """Gradient for MatMul.""" try: skip_input_indices = op.skip_input_indices if skip_input_indices is not None: if 1 in skip_input_indices: return _MatMulGradAgainstFirstOnly(op, grad) elif 0 in skip_input_indices: return _MatMulGradAgainstSecondOnly(op, grad) except AttributeError: # No gradient skipping, so do the full gradient computation pass t_a = op.get_attr("transpose_a") t_b = op.get_attr("transpose_b") a = math_ops.conj(op.inputs[0]) b = math_ops.conj(op.inputs[1]) if not t_a and not t_b: grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True) grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True) elif not t_a and t_b: grad_a = gen_math_ops.mat_mul(grad, b) grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True) elif t_a and not t_b: grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True) grad_b = gen_math_ops.mat_mul(a, grad) elif t_a and t_b: grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True) grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True) return grad_a, grad_b @ops.RegisterGradient("SparseMatMul") def _SparseMatMulGrad(op, grad): """Gradient for SparseMatMul.""" t_a = op.get_attr("transpose_a") t_b = op.get_attr("transpose_b") is_sparse = object_identity.ObjectIdentityDictionary() is_sparse[op.inputs[0]] = op.get_attr("a_is_sparse") is_sparse[op.inputs[1]] = op.get_attr("b_is_sparse") # Use heuristic to figure out if grad might be sparse is_sparse[grad] = not context.executing_eagerly() and ( grad.op.type == "ReluGrad") def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False): """Helper function to create SparseMatMul op.""" assert t1 in is_sparse and t2 in is_sparse t1_sparse = is_sparse[t1] t2_sparse = is_sparse[t2] if transpose_b: t2 = array_ops.transpose(t2) transpose_b = False prod = math_ops.matmul( t1, t2, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=t1_sparse, b_is_sparse=t2_sparse) if prod.dtype != out_dtype: prod = math_ops.cast(prod, out_dtype) return prod dtype_a = op.inputs[0].dtype dtype_b = op.inputs[1].dtype if not t_a and not t_b: return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True), _SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True)) elif not t_a and t_b: return (_SparseMatMul(grad, op.inputs[1], dtype_a), _SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True)) elif t_a and not t_b: return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True), _SparseMatMul(op.inputs[0], grad, dtype_b)) elif t_a and t_b: return (_SparseMatMul( op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True), _SparseMatMul( grad, op.inputs[0], dtype_b, transpose_a=True, transpose_b=True)) @ops.RegisterGradient("Floor") def _FloorGrad(_, unused_grad): return [None] @ops.RegisterGradient("Ceil") def _CeilGrad(_, unused_grad): return [None] @ops.RegisterGradient("Round") def _RoundGrad(_, unused_grad): return [None] @ops.RegisterGradient("Rint") def _RintGrad(_, unused_grad): # the gradient of Rint is zero return [None] @ops.RegisterGradient("BatchMatMul") def _BatchMatMul(op, grad): """Returns the gradient of x and y given the gradient of x * y.""" x = op.inputs[0] y = op.inputs[1] adj_x = op.get_attr("adj_x") adj_y = op.get_attr("adj_y") if not adj_x: if not adj_y: grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True) grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False) else: grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False) grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False) else: if not adj_y: grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True) grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False) else: grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True) grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True) return grad_x, grad_y @ops.RegisterGradient("BatchMatMulV2") def _BatchMatMulV2(op, grad): """Returns the gradient of x and y given the gradient of x * y.""" x = op.inputs[0] y = op.inputs[1] adj_x = op.get_attr("adj_x") adj_y = op.get_attr("adj_y") if not adj_x: if not adj_y: grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True) grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False) else: grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False) grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False) else: if not adj_y: grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True) grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False) else: grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True) grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True) # Reduce along the broadcasted batch dimensions, if broadcasting is required. shape_x_static = x.get_shape() shape_y_static = y.get_shape() if not (shape_x_static.is_fully_defined() and shape_y_static.is_fully_defined() and shape_x_static == shape_y_static): sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2]) grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx) grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy) return grad_x, grad_y ops.NotDifferentiable("Range") ops.NotDifferentiable("LinSpace") @ops.RegisterGradient("Complex") def _ComplexGrad(op, grad): """Returns the real and imaginary components of 'grad', respectively.""" x = op.inputs[0] y = op.inputs[1] sx = array_ops.shape(x) sy = array_ops.shape(y) rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy) return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx), array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy)) @ops.RegisterGradient("Real") def _RealGrad(_, grad): """Returns 'grad' as the real part and set the imaginary part 0.""" zero = constant_op.constant(0, dtype=grad.dtype) return math_ops.complex(grad, zero) @ops.RegisterGradient("Imag") def _ImagGrad(_, grad): """Returns 'grad' as the imaginary part and set the real part 0.""" zero = constant_op.constant(0, dtype=grad.dtype) return math_ops.complex(zero, grad) @ops.RegisterGradient("Angle") def _AngleGrad(op, grad): """Returns -grad / (Im(x) + iRe(x))""" x = op.inputs[0] with ops.control_dependencies([grad]): re = math_ops.real(x) im = math_ops.imag(x) z = math_ops.reciprocal(math_ops.complex(im, re)) zero = constant_op.constant(0, dtype=grad.dtype) complex_grad = math_ops.complex(grad, zero) return -complex_grad * z @ops.RegisterGradient("Conj") def _ConjGrad(_, grad): """Returns the complex conjugate of grad.""" return math_ops.conj(grad) @ops.RegisterGradient("ComplexAbs") def _ComplexAbsGrad(op, grad): """Returns the gradient of ComplexAbs.""" return math_ops.div_no_nan( math_ops.complex( grad, array_ops.zeros_like(grad)) * op.inputs[0], math_ops.complex( op.outputs[0], array_ops.zeros_like(op.outputs[0]))) @ops.RegisterGradient("Cast") def _CastGrad(op, grad): t = [ dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16, dtypes.complex64, dtypes.complex128 ] src_type = op.inputs[0].dtype.base_dtype dst_type = grad.dtype.base_dtype if src_type in t and dst_type in t: return math_ops.cast(grad, src_type) else: return None @ops.RegisterGradient("Cross") def _CrossGrad(op, grad): u = op.inputs[0] v = op.inputs[1] return (math_ops.cross(v, grad), math_ops.cross(grad, u)) @ops.RegisterGradient("Cumsum") def _CumsumGrad(op, grad): axis = op.inputs[1] exclusive = op.get_attr("exclusive") reverse = op.get_attr("reverse") return [ math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse), None ] @ops.RegisterGradient("Cumprod") def _CumprodGrad(op, grad): x = op.inputs[0] axis = op.inputs[1] exclusive = op.get_attr("exclusive") reverse = op.get_attr("reverse") # TODO This fails when x contains 0 and should be fixed prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse) out = math_ops.cumsum( prod * grad, axis, exclusive=exclusive, reverse=not reverse) return [out / x, None] @ops.RegisterGradient("CumulativeLogsumexp") def _CumulativeLogsumexpGrad(op, grad): x = op.inputs[0] axis = op.inputs[1] cumulative_logsumexp = op.outputs[0] exclusive = op.get_attr("exclusive") reverse = op.get_attr("reverse") # Split the incoming gradient into positive and negative part # in order to take logs. This is required for stable results. log_grad_positive = array_ops.where_v2( math_ops.greater(grad, 0), math_ops.log(grad), grad.dtype.min) log_grad_negative = array_ops.where_v2( math_ops.less(grad, 0), math_ops.log(-grad), grad.dtype.min) output_pos = math_ops.exp( math_ops.cumulative_logsumexp( log_grad_positive - cumulative_logsumexp, axis=axis, reverse=not reverse, exclusive=exclusive) + x) output_neg = math_ops.exp( math_ops.cumulative_logsumexp( log_grad_negative - cumulative_logsumexp, axis=axis, reverse=not reverse, exclusive=exclusive) + x) return [output_pos - output_neg, None] @ops.RegisterGradient("NextAfter") def _NextAfterGrad(op, grad): """Returns gradient of nextafter(x1, x2) with respect to x1 and x2.""" x1 = op.inputs[0] x2 = op.inputs[1] s_x1 = array_ops.shape(x1) s_x2 = array_ops.shape(x2) r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2) with ops.control_dependencies([grad]): partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype) partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype) return (array_ops.reshape( math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1), array_ops.reshape( math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))
the-stack_0_14218
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7 # importing the Kratos Library import KratosMultiphysics import KratosMultiphysics.ShallowWaterApplication as Shallow # Check that KratosMultiphysics was imported in the main script KratosMultiphysics.CheckForPreviousImport() ## Import base class file from shallow_water_base_solver import ShallowWaterBaseSolver def CreateSolver(model, custom_settings): return EulerianPrimitiveVarSolver(model, custom_settings) class EulerianPrimitiveVarSolver(ShallowWaterBaseSolver): def __init__(self, model, custom_settings): super(EulerianPrimitiveVarSolver, self).__init__(model, custom_settings) # Set the element and condition names for the replace settings self.element_name = "EulerPrimVarElement" self.condition_name = "Condition" self.min_buffer_size = 2 def AddDofs(self): KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_X, self.main_model_part) KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Y, self.main_model_part) KratosMultiphysics.VariableUtils().AddDof(Shallow.HEIGHT, self.main_model_part) self.print_on_rank_zero("::[EulerianPrimitiveVarSolver]::", "Shallow water solver DOFs added correctly.") def SolveSolutionStep(self): if self._TimeBufferIsInitialized: # If a node and it's neighbours are dry, set ACTIVE flag to false self.ShallowVariableUtils.SetDryWetState() # Solve equations on mesh is_converged = self.solver.SolveSolutionStep() # Compute free surface self.ShallowVariableUtils.ComputeFreeSurfaceElevation() # If water height is negative or close to zero, reset values self.ShallowVariableUtils.CheckDryPrimitiveVariables() return is_converged
the-stack_0_14219
#!/usr/bin/env python3 # Copyright (c) 2014-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse from collections import deque import configparser import datetime import os import time import shutil import signal import sys import subprocess import tempfile import re import logging # Formatting. Default colors to empty strings. BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): if os.name == 'nt': import ctypes kernel32 = ctypes.windll.kernel32 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 # Enable ascii color control to stdout stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE) stdout_mode = ctypes.c_int32() kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode)) kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) # Enable ascii color control to stderr stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE) stderr_mode = ctypes.c_int32() kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode)) kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') GREEN = ('\033[0m', '\033[0;32m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 BASE_SCRIPTS = [ # Scripts that are run by the travis build process. # Longest test should go first, to favor running tests in parallel 'feature_fee_estimation.py', 'wallet_hd.py', 'wallet_backup.py', # vv Tests less than 5m vv 'mining_getblocktemplate_longpoll.py', 'feature_maxuploadtarget.py', 'feature_block.py', 'rpc_fundrawtransaction.py', 'p2p_compactblocks.py', 'feature_segwit.py', # vv Tests less than 2m vv 'wallet_basic.py', 'wallet_labels.py', 'p2p_segwit.py', 'p2p_timeouts.py', 'wallet_dump.py', 'wallet_listtransactions.py', # vv Tests less than 60s vv 'p2p_sendheaders.py', 'wallet_zapwallettxes.py', 'wallet_importmulti.py', 'mempool_limit.py', 'rpc_txoutproof.py', 'wallet_listreceivedby.py', 'wallet_abandonconflict.py', 'feature_csv_activation.py', 'rpc_rawtransaction.py', 'wallet_address_types.py', 'feature_bip68_sequence.py', 'p2p_feefilter.py', 'feature_reindex.py', # vv Tests less than 30s vv 'wallet_keypool_topup.py', 'interface_zmq.py', 'interface_bitcoin_cli.py', 'mempool_resurrect.py', 'wallet_txn_doublespend.py --mineblock', 'tool_wallet.py', 'wallet_txn_clone.py', 'wallet_txn_clone.py --segwit', 'rpc_getchaintips.py', 'rpc_misc.py', 'interface_rest.py', 'mempool_spend_coinbase.py', 'mempool_reorg.py', 'mempool_persist.py', 'wallet_multiwallet.py', 'wallet_multiwallet.py --usecli', 'wallet_createwallet.py', 'wallet_createwallet.py --usecli', 'interface_http.py', 'interface_rpc.py', 'rpc_psbt.py', 'rpc_users.py', 'feature_proxy.py', 'rpc_signrawtransaction.py', 'wallet_groups.py', 'p2p_disconnect_ban.py', 'rpc_decodescript.py', 'rpc_blockchain.py', 'rpc_deprecated.py', 'wallet_disable.py', 'rpc_net.py', 'wallet_keypool.py', 'p2p_mempool.py', 'p2p_blocksonly.py', 'mining_prioritisetransaction.py', 'p2p_invalid_locator.py', 'p2p_invalid_block.py', 'p2p_invalid_messages.py', 'p2p_invalid_tx.py', 'feature_assumevalid.py', 'example_test.py', 'wallet_txn_doublespend.py', 'wallet_txn_clone.py --mineblock', 'feature_notifications.py', 'rpc_invalidateblock.py', 'feature_rbf.py', 'mempool_packages.py', 'rpc_createmultisig.py', 'feature_versionbits_warning.py', 'rpc_preciousblock.py', 'wallet_importprunedfunds.py', 'p2p_leak_tx.py', 'rpc_signmessage.py', 'wallet_balance.py', 'feature_nulldummy.py', 'mempool_accept.py', 'wallet_import_rescan.py', 'wallet_import_with_label.py', 'rpc_bind.py --ipv4', 'rpc_bind.py --ipv6', 'rpc_bind.py --nonloopback', 'mining_basic.py', 'wallet_bumpfee.py', 'rpc_named_arguments.py', 'wallet_listsinceblock.py', 'p2p_leak.py', 'wallet_encryption.py', 'wallet_scriptaddress2.py', 'feature_dersig.py', 'feature_cltv.py', 'rpc_uptime.py', 'wallet_resendwallettransactions.py', 'wallet_fallbackfee.py', 'feature_minchainwork.py', 'rpc_getblockstats.py', 'wallet_create_tx.py', 'p2p_fingerprint.py', 'feature_uacomment.py', 'wallet_coinbase_category.py', 'feature_filelock.py', 'p2p_unrequested_blocks.py', 'feature_includeconf.py', 'rpc_deriveaddresses.py', 'rpc_deriveaddresses.py --usecli', 'rpc_scantxoutset.py', 'feature_logging.py', # 'p2p_node_network_limited.py', # incompatible with Omni 'feature_blocksdir.py', 'feature_config_args.py', 'rpc_help.py', 'feature_help.py', 'feature_shutdown.py', 'omni_reorg.py', 'omni_clientexpiry.py', 'omni_stov1.py', 'omni_freeze.py', 'omni_graceperiod.py', 'omni_createtoken.py', 'omni_freedexspec.py', 'omni_dexversionsspec.py', 'omni_basicspec.py', 'omni_reorgspec.py', 'omni_sendallspec.py', 'omni_crowdsalespec.py', 'omni_smartandmanagedspec.py', 'omni_stospec.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] EXTENDED_SCRIPTS = [ # These tests are not run by the travis build process. # Longest test should go first, to favor running tests in parallel 'feature_pruning.py', 'feature_dbcrash.py', ] # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS NON_SCRIPTS = [ # These are python files that live in the functional tests directory, but are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", ] def main(): # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment') parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure') args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" config.read_file(open(configfile, encoding="utf8")) passon_args.append("--configfile=%s" % configfile) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) # Create base test directory tmpdir = "%s/test_runner_Ł_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) os.makedirs(tmpdir) logging.debug("Temporary test directory at %s" % tmpdir) enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if not enable_bitcoind: print("No functional tests to run.") print("Rerun ./configure with --with-daemon and then make") sys.exit(0) # Build list of tests test_list = [] if tests: # Individual tests have been specified. Run specified tests that exist # in the ALL_SCRIPTS list. Accept the name with or without .py extension. tests = [test + ".py" if ".py" not in test else test for test in tests] for test in tests: if test in ALL_SCRIPTS: test_list.append(test) else: print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test)) elif args.extended: # Include extended tests test_list += ALL_SCRIPTS else: # Run base tests only test_list += BASE_SCRIPTS # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')] for exclude_test in exclude_tests: # Remove <test_name>.py and <test_name>.py --arg from the test list exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test] for exclude_item in exclude_list: test_list.remove(exclude_item) if not exclude_list: print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test)) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script (with args removed) and exit. parser.print_help() subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h']) sys.exit(0) check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci) check_script_prefixes() if not args.keepcache: shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) run_tests( test_list=test_list, src_dir=config["environment"]["SRCDIR"], build_dir=config["environment"]["BUILDDIR"], tmpdir=tmpdir, jobs=args.jobs, enable_coverage=args.coverage, args=passon_args, combined_logs_len=args.combinedlogslen, failfast=args.failfast, runs_ci=args.ci, ) def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci): args = args or [] # Warn if bitcoind is already running (unix only) try: if subprocess.check_output(["pidof", "omnilited"]) is not None: print("%sWARNING!%s There is already a omnilited process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0])) except (OSError, subprocess.SubprocessError): pass # Warn if there is a cache directory cache_dir = "%s/test/cache" % build_dir if os.path.isdir(cache_dir): print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir)) tests_dir = src_dir + '/test/functional/' flags = ['--cachedir={}'.format(cache_dir)] + args if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug("Initializing coverage directory at %s" % coverage.dir) else: coverage = None if len(test_list) > 1 and jobs > 1: # Populate cache try: subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir]) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise #Run Tests job_queue = TestHandler( num_tests_parallel=jobs, tests_dir=tests_dir, tmpdir=tmpdir, test_list=test_list, flags=flags, timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds ) start_time = time.time() test_results = [] max_len_name = len(max(test_list, key=len)) test_count = len(test_list) for i in range(test_count): test_result, testdir, stdout, stderr = job_queue.get_next() test_results.append(test_result) done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0]) if test_result.status == "Passed": logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time)) elif test_result.status == "Skipped": logging.debug("%s skipped" % (done_str)) else: print("%s failed, Duration: %s s\n" % (done_str, test_result.time)) print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0])) print('\n============') print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0])) print('============\n') combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir] if BOLD[0]: combined_logs_args += ['--color'] combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate() print("\n".join(deque(combined_logs.splitlines(), combined_logs_len))) if failfast: logging.debug("Early exiting after test failure") break print_results(test_results, max_len_name, (int(time.time() - start_time))) if coverage: coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) # This will be a no-op unless failfast is True in which case there may be dangling # processes which need to be killed. job_queue.kill_and_join() sys.exit(not all_passed) def print_results(test_results, max_len_name, runtime): results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) status = TICK + "Passed" if all_passed else CROSS + "Failed" if not all_passed: results += RED[1] results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] if not all_passed: results += RED[0] results += "Runtime: %s s\n" % (runtime) print(results) class TestHandler: """ Trigger the test scripts passed in via the list. """ def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration): assert num_tests_parallel >= 1 self.num_jobs = num_tests_parallel self.tests_dir = tests_dir self.tmpdir = tmpdir self.timeout_duration = timeout_duration self.test_list = test_list self.flags = flags self.num_running = 0 self.jobs = [] def get_next(self): while self.num_running < self.num_jobs and self.test_list: # Add tests self.num_running += 1 test = self.test_list.pop(0) portseed = len(self.test_list) portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = test.split() testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] self.jobs.append((test, time.time(), subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), testdir, log_stdout, log_stderr)) if not self.jobs: raise IndexError('pop from empty list') dot_count = 0 while True: # Return first proc that finishes time.sleep(.5) for job in self.jobs: (name, start_time, proc, testdir, log_out, log_err) = job if int(time.time() - start_time) > self.timeout_duration: # In travis, timeout individual tests (to stop tests hanging and not providing useful output). proc.send_signal(signal.SIGINT) if proc.poll() is not None: log_out.seek(0), log_err.seek(0) [stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)] log_out.close(), log_err.close() if proc.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif proc.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" self.num_running -= 1 self.jobs.remove(job) clearline = '\r' + (' ' * dot_count) + '\r' print(clearline, end='', flush=True) dot_count = 0 return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr print('.', end='', flush=True) dot_count += 1 def kill_and_join(self): """Send SIGKILL to all jobs and block until all have ended.""" procs = [i[2] for i in self.jobs] for proc in procs: proc.kill() for proc in procs: proc.wait() class TestResult(): def __init__(self, name, status, time): self.name = name self.status = status self.time = time self.padding = 0 def sort_key(self): if self.status == "Passed": return 0, self.name.lower() elif self.status == "Failed": return 2, self.name.lower() elif self.status == "Skipped": return 1, self.name.lower() def __repr__(self): if self.status == "Passed": color = GREEN glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0] @property def was_successful(self): return self.status != "Failed" def check_script_prefixes(): """Check that test scripts start with one of the allowed name prefixes.""" good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|omni|p2p|rpc|wallet|tool)_") bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None] if bad_script_names: print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names))) print(" %s" % ("\n ".join(sorted(bad_script_names)))) raise AssertionError("Some tests are not following naming convention!") def check_script_list(*, src_dir, fail_on_warn): """Check scripts directory. Check that there are no scripts in the functional tests directory which are not being run by pull-tester.py.""" script_dir = src_dir + '/test/functional/' python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")]) missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) if len(missed_tests) != 0: print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) if fail_on_warn: # On travis this warning is an error to prevent merging incomplete commits into master sys.exit(1) class RPCCoverage(): """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `litecoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - %s\n" % command) for command in sorted(uncovered))) else: print("All RPC commands covered.") def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test-framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file: all_cmds.update([line.strip() for line in coverage_ref_file.readlines()]) for root, _, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r', encoding="utf8") as coverage_file: covered_cmds.update([line.strip() for line in coverage_file.readlines()]) return all_cmds - covered_cmds if __name__ == '__main__': main()
the-stack_0_14220
from nonebot import on_request, get_driver from nonebot.adapters.onebot.v11.bot import Bot from nonebot.adapters.onebot.v11.event import GroupRequestEvent, FriendRequestEvent from nonebot.adapters.onebot.v11.message import Message try: master = get_driver().config.master except: master = [] add_req = on_request() @add_req.handle() async def group_add(bot: Bot, event: GroupRequestEvent): ''' 入群申请 ''' if (event.sub_type) == "add": if str(event.comment) == 'ATRI -My Dear Moments-': await bot.set_group_add_request(flag=event.flag, sub_type='add', approve=True) elif (event.sub_type) == "invite": if event.user_id in master: await bot.set_group_add_request(flag=event.flag, sub_type='invite', approve=True) else : await bot.set_group_add_request(flag=event.flag, sub_type='invite', approve=False) add_friend_req = on_request() @add_friend_req.handle() async def friend_add(bot: Bot, event: FriendRequestEvent): ''' 好友添加请求 ''' pass
the-stack_0_14221
from functools import partial import threading import os from kivy.app import App from kivy.clock import Clock from kivy.lang import Builder from kivy.properties import ObjectProperty, StringProperty, OptionProperty from kivy.core.window import Window from kivy.uix.button import Button from kivy.utils import platform from kivy.uix.widget import Widget from kivy.core.window import Window from kivy.clock import Clock from kivy.utils import platform from electrum_ltc.base_wizard import BaseWizard from electrum_ltc.util import is_valid_email from . import EventsDialog from ...i18n import _ from .password_dialog import PasswordDialog # global Variables is_test = (platform == "linux") test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve" test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach" test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL" Builder.load_string(''' #:import Window kivy.core.window.Window #:import _ electrum_ltc_gui.kivy.i18n._ <WizardTextInput@TextInput> border: 4, 4, 4, 4 font_size: '15sp' padding: '15dp', '15dp' background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1) foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1) hint_text_color: self.foreground_color background_active: 'atlas://gui/kivy/theming/light/create_act_text_active' background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active' size_hint_y: None height: '48sp' <WizardButton@Button>: root: None size_hint: 1, None height: '48sp' on_press: if self.root: self.root.dispatch('on_press', self) on_release: if self.root: self.root.dispatch('on_release', self) <BigLabel@Label> color: .854, .925, .984, 1 size_hint: 1, None text_size: self.width, None height: self.texture_size[1] bold: True <-WizardDialog> text_color: .854, .925, .984, 1 value: '' #auto_dismiss: False size_hint: None, None canvas.before: Color: rgba: .239, .588, .882, 1 Rectangle: size: Window.size crcontent: crcontent # add electrum icon BoxLayout: orientation: 'vertical' if self.width < self.height else 'horizontal' padding: min(dp(27), self.width/32), min(dp(27), self.height/32),\ min(dp(27), self.width/32), min(dp(27), self.height/32) spacing: '10dp' GridLayout: id: grid_logo cols: 1 pos_hint: {'center_y': .5} size_hint: 1, None height: self.minimum_height Label: color: root.text_color text: 'ELECTRUM' size_hint: 1, None height: self.texture_size[1] if self.opacity else 0 font_size: '33sp' font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf' GridLayout: cols: 1 id: crcontent spacing: '1dp' Widget: size_hint: 1, 0.3 GridLayout: rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height WizardButton: id: back text: _('Back') root: root WizardButton: id: next text: _('Next') root: root disabled: root.value == '' <WizardMultisigDialog> value: 'next' Widget size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: _("Choose the number of signatures needed to unlock funds in your wallet") Widget size_hint: 1, 1 GridLayout: orientation: 'vertical' cols: 2 spacing: '14dp' size_hint: 1, 1 height: self.minimum_height Label: color: root.text_color text: _('From {} cosigners').format(n.value) Slider: id: n range: 2, 5 step: 1 value: 2 Label: color: root.text_color text: _('Require {} signatures').format(m.value) Slider: id: m range: 1, n.value step: 1 value: 2 <WizardChoiceDialog> message : '' Widget: size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: root.message Widget size_hint: 1, 1 GridLayout: row_default_height: '48dp' orientation: 'vertical' id: choices cols: 1 spacing: '14dp' size_hint: 1, None <WizardConfirmDialog> message : '' Widget: size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: root.message Widget size_hint: 1, 1 <WizardTOSDialog> message : '' size_hint: 1, 1 ScrollView: size_hint: 1, 1 TextInput: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.minimum_height text: root.message disabled: True <WizardEmailDialog> Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: 'Please enter your email address' WizardTextInput: id: email on_text: Clock.schedule_once(root.on_text) multiline: False on_text_validate: Clock.schedule_once(root.on_enter) <WizardKnownOTPDialog> message : '' message2: '' Widget: size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: root.message Widget size_hint: 1, 1 WizardTextInput: id: otp on_text: Clock.schedule_once(root.on_text) multiline: False on_text_validate: Clock.schedule_once(root.on_enter) Widget size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: root.message2 Widget size_hint: 1, 1 height: '48sp' BoxLayout: orientation: 'horizontal' WizardButton: id: cb text: _('Request new secret') on_release: root.request_new_secret() size_hint: 1, None WizardButton: id: abort text: _('Abort creation') on_release: root.abort_wallet_creation() size_hint: 1, None <WizardNewOTPDialog> message : '' message2 : '' Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: root.message QRCodeWidget: id: qr size_hint: 1, 1 Label: color: root.text_color size_hint: 1, None text_size: self.width, None height: self.texture_size[1] text: root.message2 WizardTextInput: id: otp on_text: Clock.schedule_once(root.on_text) multiline: False on_text_validate: Clock.schedule_once(root.on_enter) <MButton@Button>: size_hint: 1, None height: '33dp' on_release: self.parent.update_amount(self.text) <WordButton@Button>: size_hint: None, None padding: '5dp', '5dp' text_size: None, self.height width: self.texture_size[0] height: '30dp' on_release: self.parent.new_word(self.text) <SeedButton@Button>: height: dp(100) border: 4, 4, 4, 4 halign: 'justify' valign: 'top' font_size: '18dp' text_size: self.width - dp(24), self.height - dp(12) color: .1, .1, .1, 1 background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top' background_down: self.background_normal size_hint_y: None <SeedLabel@Label>: font_size: '12sp' text_size: self.width, None size_hint: 1, None height: self.texture_size[1] halign: 'justify' valign: 'middle' border: 4, 4, 4, 4 <RestoreSeedDialog> message: '' word: '' BigLabel: text: "ENTER YOUR SEED PHRASE" GridLayout cols: 1 padding: 0, '12dp' orientation: 'vertical' spacing: '12dp' size_hint: 1, None height: self.minimum_height SeedButton: id: text_input_seed text: '' on_text: Clock.schedule_once(root.on_text) on_release: root.options_dialog() SeedLabel: text: root.message BoxLayout: id: suggestions height: '35dp' size_hint: 1, None new_word: root.on_word BoxLayout: id: line1 update_amount: root.update_text size_hint: 1, None height: '30dp' MButton: text: 'Q' MButton: text: 'W' MButton: text: 'E' MButton: text: 'R' MButton: text: 'T' MButton: text: 'Y' MButton: text: 'U' MButton: text: 'I' MButton: text: 'O' MButton: text: 'P' BoxLayout: id: line2 update_amount: root.update_text size_hint: 1, None height: '30dp' Widget: size_hint: 0.5, None height: '33dp' MButton: text: 'A' MButton: text: 'S' MButton: text: 'D' MButton: text: 'F' MButton: text: 'G' MButton: text: 'H' MButton: text: 'J' MButton: text: 'K' MButton: text: 'L' Widget: size_hint: 0.5, None height: '33dp' BoxLayout: id: line3 update_amount: root.update_text size_hint: 1, None height: '30dp' Widget: size_hint: 1, None MButton: text: 'Z' MButton: text: 'X' MButton: text: 'C' MButton: text: 'V' MButton: text: 'B' MButton: text: 'N' MButton: text: 'M' MButton: text: ' ' MButton: text: '<' <AddXpubDialog> title: '' message: '' BigLabel: text: root.title GridLayout cols: 1 padding: 0, '12dp' orientation: 'vertical' spacing: '12dp' size_hint: 1, None height: self.minimum_height SeedButton: id: text_input text: '' on_text: Clock.schedule_once(root.check_text) SeedLabel: text: root.message GridLayout rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height IconButton: id: scan height: '48sp' on_release: root.scan_xpub() icon: 'atlas://gui/kivy/theming/light/camera' size_hint: 1, None WizardButton: text: _('Paste') on_release: root.do_paste() WizardButton: text: _('Clear') on_release: root.do_clear() <ShowXpubDialog> xpub: '' message: _('Here is your master public key. Share it with your cosigners.') BigLabel: text: "MASTER PUBLIC KEY" GridLayout cols: 1 padding: 0, '12dp' orientation: 'vertical' spacing: '12dp' size_hint: 1, None height: self.minimum_height SeedButton: id: text_input text: root.xpub SeedLabel: text: root.message GridLayout rows: 1 spacing: '12dp' size_hint: 1, None height: self.minimum_height WizardButton: text: _('QR code') on_release: root.do_qr() WizardButton: text: _('Copy') on_release: root.do_copy() WizardButton: text: _('Share') on_release: root.do_share() <ShowSeedDialog> spacing: '12dp' value: 'next' BigLabel: text: "PLEASE WRITE DOWN YOUR SEED PHRASE" GridLayout: id: grid cols: 1 pos_hint: {'center_y': .5} size_hint_y: None height: self.minimum_height orientation: 'vertical' spacing: '12dp' SeedButton: text: root.seed_text on_release: root.options_dialog() SeedLabel: text: root.message <LineDialog> BigLabel: text: root.title SeedLabel: text: root.message TextInput: id: passphrase_input multiline: False size_hint: 1, None height: '27dp' SeedLabel: text: root.warning ''') class WizardDialog(EventsDialog): ''' Abstract dialog to be used as the base for all Create Account Dialogs ''' crcontent = ObjectProperty(None) def __init__(self, wizard, **kwargs): super(WizardDialog, self).__init__() self.wizard = wizard self.ids.back.disabled = not wizard.can_go_back() self.app = App.get_running_app() self.run_next = kwargs['run_next'] _trigger_size_dialog = Clock.create_trigger(self._size_dialog) Window.bind(size=_trigger_size_dialog, rotation=_trigger_size_dialog) _trigger_size_dialog() self._on_release = False def _size_dialog(self, dt): app = App.get_running_app() if app.ui_mode[0] == 'p': self.size = Window.size else: #tablet if app.orientation[0] == 'p': #portrait self.size = Window.size[0]/1.67, Window.size[1]/1.4 else: self.size = Window.size[0]/2.5, Window.size[1] def add_widget(self, widget, index=0): if not self.crcontent: super(WizardDialog, self).add_widget(widget) else: self.crcontent.add_widget(widget, index=index) def on_dismiss(self): app = App.get_running_app() if app.wallet is None and not self._on_release: app.stop() def get_params(self, button): return (None,) def on_release(self, button): self._on_release = True self.close() if not button: self.parent.dispatch('on_wizard_complete', None) return if button is self.ids.back: self.wizard.go_back() return params = self.get_params(button) self.run_next(*params) class WizardMultisigDialog(WizardDialog): def get_params(self, button): m = self.ids.m.value n = self.ids.n.value return m, n class WizardOTPDialogBase(WizardDialog): def get_otp(self): otp = self.ids.otp.text if len(otp) != 6: return try: return int(otp) except: return def on_text(self, dt): self.ids.next.disabled = self.get_otp() is None def on_enter(self, dt): # press next next = self.ids.next if not next.disabled: next.dispatch('on_release') class WizardKnownOTPDialog(WizardOTPDialogBase): def __init__(self, wizard, **kwargs): WizardOTPDialogBase.__init__(self, wizard, **kwargs) self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.") self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.") self.request_new = False def get_params(self, button): return (self.get_otp(), self.request_new) def request_new_secret(self): self.request_new = True self.on_release(True) def abort_wallet_creation(self): self._on_release = True os.unlink(self.wizard.storage.path) self.wizard.terminate() self.dismiss() class WizardNewOTPDialog(WizardOTPDialogBase): def __init__(self, wizard, **kwargs): WizardOTPDialogBase.__init__(self, wizard, **kwargs) otp_secret = kwargs['otp_secret'] uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret) self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret self.message2 = _('Then, enter your Google Authenticator code:') self.ids.qr.set_data(uri) def get_params(self, button): return (self.get_otp(), False) class WizardTOSDialog(WizardDialog): def __init__(self, wizard, **kwargs): WizardDialog.__init__(self, wizard, **kwargs) self.ids.next.text = 'Accept' self.ids.next.disabled = False self.message = kwargs['tos'] self.message2 = _('Enter your email address:') class WizardEmailDialog(WizardDialog): def get_params(self, button): return (self.ids.email.text,) def on_text(self, dt): self.ids.next.disabled = not is_valid_email(self.ids.email.text) def on_enter(self, dt): # press next next = self.ids.next if not next.disabled: next.dispatch('on_release') class WizardConfirmDialog(WizardDialog): def __init__(self, wizard, **kwargs): super(WizardConfirmDialog, self).__init__(wizard, **kwargs) self.message = kwargs.get('message', '') self.value = 'ok' def on_parent(self, instance, value): if value: app = App.get_running_app() self._back = _back = partial(app.dispatch, 'on_back') def get_params(self, button): return (True,) class WizardChoiceDialog(WizardDialog): def __init__(self, wizard, **kwargs): super(WizardChoiceDialog, self).__init__(wizard, **kwargs) self.message = kwargs.get('message', '') choices = kwargs.get('choices', []) layout = self.ids.choices layout.bind(minimum_height=layout.setter('height')) for action, text in choices: l = WizardButton(text=text) l.action = action l.height = '48dp' l.root = self layout.add_widget(l) def on_parent(self, instance, value): if value: app = App.get_running_app() self._back = _back = partial(app.dispatch, 'on_back') def get_params(self, button): return (button.action,) class LineDialog(WizardDialog): title = StringProperty('') message = StringProperty('') warning = StringProperty('') def __init__(self, wizard, **kwargs): WizardDialog.__init__(self, wizard, **kwargs) self.ids.next.disabled = False def get_params(self, b): return (self.ids.passphrase_input.text,) class ShowSeedDialog(WizardDialog): seed_text = StringProperty('') message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.") ext = False def __init__(self, wizard, **kwargs): super(ShowSeedDialog, self).__init__(wizard, **kwargs) self.seed_text = kwargs['seed_text'] def on_parent(self, instance, value): if value: app = App.get_running_app() self._back = _back = partial(self.ids.back.dispatch, 'on_release') def options_dialog(self): from .seed_options import SeedOptionsDialog def callback(status): self.ext = status d = SeedOptionsDialog(self.ext, callback) d.open() def get_params(self, b): return (self.ext,) class WordButton(Button): pass class WizardButton(Button): pass class RestoreSeedDialog(WizardDialog): def __init__(self, wizard, **kwargs): super(RestoreSeedDialog, self).__init__(wizard, **kwargs) self._test = kwargs['test'] from electrum_ltc.mnemonic import Mnemonic from electrum_ltc.old_mnemonic import words as old_wordlist self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist)) self.ids.text_input_seed.text = test_seed if is_test else '' self.message = _('Please type your seed phrase using the virtual keyboard.') self.title = _('Enter Seed') self.ext = False def options_dialog(self): from .seed_options import SeedOptionsDialog def callback(status): self.ext = status d = SeedOptionsDialog(self.ext, callback) d.open() def get_suggestions(self, prefix): for w in self.words: if w.startswith(prefix): yield w def on_text(self, dt): self.ids.next.disabled = not bool(self._test(self.get_text())) text = self.ids.text_input_seed.text if not text: last_word = '' elif text[-1] == ' ': last_word = '' else: last_word = text.split(' ')[-1] enable_space = False self.ids.suggestions.clear_widgets() suggestions = [x for x in self.get_suggestions(last_word)] if last_word in suggestions: b = WordButton(text=last_word) self.ids.suggestions.add_widget(b) enable_space = True for w in suggestions: if w != last_word and len(suggestions) < 10: b = WordButton(text=w) self.ids.suggestions.add_widget(b) i = len(last_word) p = set() for x in suggestions: if len(x)>i: p.add(x[i]) for line in [self.ids.line1, self.ids.line2, self.ids.line3]: for c in line.children: if isinstance(c, Button): if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': c.disabled = (c.text.lower() not in p) and bool(last_word) elif c.text == ' ': c.disabled = not enable_space def on_word(self, w): text = self.get_text() words = text.split(' ') words[-1] = w text = ' '.join(words) self.ids.text_input_seed.text = text + ' ' self.ids.suggestions.clear_widgets() def get_text(self): ti = self.ids.text_input_seed return ' '.join(ti.text.strip().split()) def update_text(self, c): c = c.lower() text = self.ids.text_input_seed.text if c == '<': text = text[:-1] else: text += c self.ids.text_input_seed.text = text def on_parent(self, instance, value): if value: tis = self.ids.text_input_seed tis.focus = True #tis._keyboard.bind(on_key_down=self.on_key_down) self._back = _back = partial(self.ids.back.dispatch, 'on_release') app = App.get_running_app() def on_key_down(self, keyboard, keycode, key, modifiers): if keycode[0] in (13, 271): self.on_enter() return True def on_enter(self): #self._remove_keyboard() # press next next = self.ids.next if not next.disabled: next.dispatch('on_release') def _remove_keyboard(self): tis = self.ids.text_input_seed if tis._keyboard: tis._keyboard.unbind(on_key_down=self.on_key_down) tis.focus = False def get_params(self, b): return (self.get_text(), False, self.ext) class ConfirmSeedDialog(RestoreSeedDialog): def get_params(self, b): return (self.get_text(),) def options_dialog(self): pass class ShowXpubDialog(WizardDialog): def __init__(self, wizard, **kwargs): WizardDialog.__init__(self, wizard, **kwargs) self.xpub = kwargs['xpub'] self.ids.next.disabled = False def do_copy(self): self.app._clipboard.copy(self.xpub) def do_share(self): self.app.do_share(self.xpub, _("Master Public Key")) def do_qr(self): from .qr_dialog import QRDialog popup = QRDialog(_("Master Public Key"), self.xpub, True) popup.open() class AddXpubDialog(WizardDialog): def __init__(self, wizard, **kwargs): WizardDialog.__init__(self, wizard, **kwargs) self.is_valid = kwargs['is_valid'] self.title = kwargs['title'] self.message = kwargs['message'] self.allow_multi = kwargs.get('allow_multi', False) def check_text(self, dt): self.ids.next.disabled = not bool(self.is_valid(self.get_text())) def get_text(self): ti = self.ids.text_input return ti.text.strip() def get_params(self, button): return (self.get_text(),) def scan_xpub(self): def on_complete(text): if self.allow_multi: self.ids.text_input.text += text + '\n' else: self.ids.text_input.text = text self.app.scan_qr(on_complete) def do_paste(self): self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste() def do_clear(self): self.ids.text_input.text = '' class InstallWizard(BaseWizard, Widget): ''' events:: `on_wizard_complete` Fired when the wizard is done creating/ restoring wallet/s. ''' __events__ = ('on_wizard_complete', ) def on_wizard_complete(self, wallet): """overriden by main_window""" pass def waiting_dialog(self, task, msg, on_finished=None): '''Perform a blocking task in the background by running the passed method in a thread. ''' def target(): # run your threaded function try: task() except Exception as err: self.show_error(str(err)) # on completion hide message Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1) if on_finished: Clock.schedule_once(lambda dt: on_finished(), -1) app = App.get_running_app() app.show_info_bubble( text=msg, icon='atlas://gui/kivy/theming/light/important', pos=Window.center, width='200sp', arrow_pos=None, modal=True) t = threading.Thread(target = target) t.start() def terminate(self, **kwargs): self.dispatch('on_wizard_complete', self.wallet) def choice_dialog(self, **kwargs): choices = kwargs['choices'] if len(choices) > 1: WizardChoiceDialog(self, **kwargs).open() else: f = kwargs['run_next'] f(choices[0][0]) def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open() def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open() def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open() def confirm_seed_dialog(self, **kwargs): kwargs['title'] = _('Confirm Seed') kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it') ConfirmSeedDialog(self, **kwargs).open() def restore_seed_dialog(self, **kwargs): RestoreSeedDialog(self, **kwargs).open() def confirm_dialog(self, **kwargs): WizardConfirmDialog(self, **kwargs).open() def tos_dialog(self, **kwargs): WizardTOSDialog(self, **kwargs).open() def email_dialog(self, **kwargs): WizardEmailDialog(self, **kwargs).open() def otp_dialog(self, **kwargs): if kwargs['otp_secret']: WizardNewOTPDialog(self, **kwargs).open() else: WizardKnownOTPDialog(self, **kwargs).open() def add_xpub_dialog(self, **kwargs): kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.') AddXpubDialog(self, **kwargs).open() def add_cosigner_dialog(self, **kwargs): kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index'] kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.') AddXpubDialog(self, **kwargs).open() def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open() def show_message(self, msg): self.show_error(msg) def show_error(self, msg): app = App.get_running_app() Clock.schedule_once(lambda dt: app.show_error(msg)) def request_password(self, run_next, force_disable_encrypt_cb=False): def on_success(old_pin, pin): assert old_pin is None run_next(pin, False) def on_failure(): self.show_error(_('PIN mismatch')) self.run('request_password', run_next) popup = PasswordDialog() app = App.get_running_app() popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2) popup.open() def action_dialog(self, action, run_next): f = getattr(self, action) f()
the-stack_0_14223
from urllib.request import urlopen, Request from urllib.parse import urlparse from bs4 import BeautifulSoup import re import socket import smtplib import dns.resolver import csv import multiprocessing TIMEOUT = 120 in_path = "alchemist_accelerator.csv" out_path = "alchemist_accelerator_result.csv" headers = {"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/5.0)"} def getinternalLinks(bsObj, includeUrl): includeUrl = urlparse(includeUrl).scheme + "://" + urlparse(includeUrl).netloc internalLinks = [] for link in bsObj.findAll("a", href=re.compile("^(/|.*" + includeUrl + ")")): if link.attrs['href'] is not None: if link.attrs['href'].startswith("/"): internalLinks.append(includeUrl + link.attrs['href']) else: internalLinks.append(link.attrs['href']) return internalLinks def getAllInternalLinks(siteUrl): try: req = Request(siteUrl, headers=headers) html = urlopen(req, timeout=20) domain = urlparse(siteUrl).scheme + "://" + urlparse(siteUrl).netloc bsObj = BeautifulSoup(html, "html.parser") internalLinks = getinternalLinks(bsObj, domain) for link in internalLinks: if link not in allIntLinks: allIntLinks.add(link) print(link) except: pass def verify_email(email): records = dns.resolver.query(email.split('@')[-1], 'MX') mxRecord = records[0].exchange mxRecord = str(mxRecord) host = socket.gethostname() server = smtplib.SMTP() server.set_debuglevel(0) server.connect(mxRecord) server.helo(host) server.mail('[email protected]') code, message = server.rcpt(str(email)) server.quit() if code == 250: return True else: return False def extractEmails(allIntLinks, return_dict): for intLink in allIntLinks: try: req = Request(intLink, headers=headers) html = urlopen(req, timeout=20).read().decode("utf-8") regex = r"([a-zA-Z0-9_.+-]+@[a-pr-zA-PRZ0-9-]+\.[a-zA-Z0-9-.]+)" for email in re.findall(regex, html): email = email.lower() if email not in allEmails: if not (email.endswith(('.', '.png', '.jpg', '.JPG', '.jpeg', '.gif', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.x', '.webm', '.webp', '.svg', "example.com", "email.com", "yourdomain.com", "yourself.com", "domain.com")) or "sentry" in email): # if verify_email(email): # takes a long time allEmails.add(email) except: pass return_dict[0] = "\n".join(list(allEmails)) manager = multiprocessing.Manager() return_dict = manager.dict() with open(in_path, "r") as f: fieldnames = ['portfolio', 'website', 'year', 'summary'] reader = csv.DictReader(f, fieldnames=fieldnames) next(reader) with open(out_path, "w") as f1: fieldnames1 = ['portfolio', 'website', 'year', 'emails', 'summary'] writer = csv.DictWriter(f1, fieldnames=fieldnames1) writer.writeheader() idx = 1 for row in reader: print(str(idx) + ". " + row['portfolio']) allIntLinks = set() allEmails = set() return_dict[0] = "" print(row['website']) if len(row['website']): allIntLinks.add(row['website']) getAllInternalLinks(row['website']) p = multiprocessing.Process(target=extractEmails, args=(allIntLinks, return_dict)) p.start() p.join(TIMEOUT) if p.is_alive(): print("Time out!") p.terminate() emails = return_dict.values()[0] row['emails'] = emails row['portfolio'] = row['portfolio'].strip() writer.writerow(row) f1.flush() print(emails) idx += 1
the-stack_0_14224
#!/usr/bin/env python3 import subprocess import os import tempfile import shutil import codecs import sys import time import json def makeDirs(path) : os.makedirs(path, exist_ok = True) def writeFile(fileName, content) : file = codecs.open(fileName, "w", "utf-8") file.write(str(content)) file.close() def executeCommand(*args) : process = subprocess.Popen( args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, universal_newlines=True ) cliStdout, cliStderr = process.communicate(input = None) returncode = process.poll() return (cliStdout + "\n" + cliStderr).strip() def decodeJson(content) : try : return json.loads(content) except Exception as e: return None def die(message) : print(message + "\n") sys.exit(1) def getNodePort(nodeIndex) : return 20000 + nodeIndex def getNodeRpcPort(nodeIndex) : return 30000 + nodeIndex def getNodeListenAddress(nodeIndex) : return '127.0.0.1:%d' % (getNodePort(nodeIndex)) def getNodeAlias(nodeIndex) : return 'node%d' % (nodeIndex) def checkError(result) : if result['error'] : print('executeCli error: %s, command: %s' % ( result['output'], result['command'] )) # command utility functions def getUnspent(node, address) : if address == None : return -1 json = '["' + address + '"]' result = node.executeCli('listunspent', 0, 999999, json) if result['error'] : print('getUnspent error: ' + result['output']) return -1 amount = 0 for item in result['json'] : amount += item['amount'] return amount def getBlockCount(node) : result = node.executeCli('getblockcount') if result['error'] : print('getBlockCount error: ' + result['output']) return None return result['json'] class Node : def __init__(self, app, nodeIndex) : self._app = app self._nodeIndex = nodeIndex self._nodePath = os.path.join(self._app.getRootPath(), 'node%d' % (nodeIndex)) self._rpcUser = 'rpcuser%d' % (nodeIndex) self._iqcashd = self._app.getIqcashd() self._iqcashCli = self._app.getIqcashCli() self._daemonProcess = None def createDataDir(self, nodeCount, masterNodePrivateKey = None) : makeDirs(self._nodePath) writeFile( os.path.join(self._nodePath, 'iqcash.conf'), self._generateIqcashConf(nodeCount, masterNodePrivateKey) ) def startNode(self) : self._daemonProcess = subprocess.Popen([ self._iqcashd, '-datadir=' + self._nodePath, '-daemon' ]) def stopNode(self) : self.executeCli('stop') time.sleep(0.1) if self._daemonProcess != None : self._daemonProcess.kill() time.sleep(2) def executeCli(self, *args) : normalizedArgs = [] for arg in args : normalizedArgs.append(str(arg)) output = executeCommand(self._iqcashCli, '-datadir=' + self._nodePath, *normalizedArgs) command = ' '.join(normalizedArgs) if output.find('error') >= 0 : return { 'error' : True, 'output' : output, 'json' : None, 'command' : command, } else : json = decodeJson(output) if json == None : json = output return { 'error' : False, 'output' : output, 'json' : json, 'command' : command, } def waitNodeStarting(self, timeoutSeconds = 15) : startTime = time.time() while time.time() - startTime < timeoutSeconds : if getBlockCount(self) != None : return True time.sleep(1) print('waitNodeStarting failed') return False def _generateIqcashConf(self, nodeCount, masterNodePrivateKey) : result = "" result += "regtest=1\n" result += "server=1\n" result += "debug=1\n" result += "debug=net\n" result += "debug=iqcash\n" result += "rpcuser=%s\n" % (self._rpcUser) result += "rpcpassword=%s\n" % (self._rpcPassword) result += "port=%d\n" % (getNodePort(self._nodeIndex)) result += "rpcport=%d\n" % (getNodeRpcPort(self._nodeIndex)) result += "listenonion=0\n" result += "txindex=1\n" result += "externalip=%s\n" % getNodeListenAddress(self._nodeIndex) result += "budgetvotemode=suggest\n" for i in range(nodeCount) : if i == self._nodeIndex : continue result += "addnode=%s\n" % getNodeListenAddress(i) if masterNodePrivateKey != None : result += "masternode=1\n" result += "masternodeprivkey=%s\n" % (masterNodePrivateKey) result += "masternodeaddr=%s\n" % getNodeListenAddress(i) return result def writeMasterNodeConfig(self, config) : writeFile( os.path.join(self._nodePath, 'regtest', 'masternode.conf'), config ) def isMasterNodeSynced(self) : json = self.executeCli('mnsync', 'status')['json'] if json == None : return False if json['RequestedMasternodeAssets'] > 100 : return True return False def dataDirExist(self) : return os.path.exists(self._nodePath) class Application : def __init__(self) : self._nodeCount = 4 self._nodeList = [] self._budgetCycle = 864 self._removeFolderAfterExit = not True def run(self) : self._setup() try : self._doRun() finally : self._cleanup() def _setup(self) : self._rootPath = self._makeRootPath() makeDirs(self._rootPath) print('Root path: %s' % (self._rootPath)) self._iqcashd = os.getenv('IQCASHD', None) if not self._iqcashd : die('Undefined IQCASHD') self._iqcashCli = os.getenv('IQCASHCLI', None) if not self._iqcashCli : die('Undefined IQCASH') print('iqcashd: %s' % (self._iqcashd)) def _cleanup(self) : self._stopAllNodes() if self._removeFolderAfterExit : shutil.rmtree(self._rootPath) def _doRun(self) : self._createNodes() node = self._nodeList[0] self._mineBlocks(node, 200) address = node.executeCli('getnewaddress')['json'] address = node.executeCli('getnewaddress')['json'] print('Before budget: ' + str(getUnspent(node, address))) blockCount = getBlockCount(node) superBlock = blockCount - blockCount % self._budgetCycle + self._budgetCycle wtx = node.executeCli('preparebudget', 'ppp1', 'http://test1.com', 5, superBlock, address, 100)['json'] print('preparebudget: ' + wtx) self._mineBlocks(node, 100) hash = node.executeCli('submitbudget', 'ppp1', 'http://test1.com', 5, superBlock, address, 100, wtx)['json'] print('submitbudget: ' + hash) self._mineBlocks(node, 100) result = node.executeCli('getbudgetinfo') for i in range(1, self._nodeCount) : result = self._nodeList[i].executeCli('mnbudgetvote', 'local', hash, 'yes') print(result['output']) for i in range(self._nodeCount) : masterNode = self._nodeList[i] blockCount = getBlockCount(node) blocksToMine = self._budgetCycle - blockCount % self._budgetCycle - 1 if blocksToMine == 0 : blocksToMine = self._budgetCycle blocksToMine = blocksToMine - 1 self._mineBlocks(masterNode, blocksToMine) previousBlockCount = getBlockCount(masterNode) if previousBlockCount == None : continue self._mineBlocks(masterNode, 1) self._mineBlocks(masterNode, 1) self._mineBlocks(masterNode, 1) newBlockCount = getBlockCount(masterNode) print( 'During super block: previousBlockCount=%d newBlockCount=%d expect=%d' % (previousBlockCount, newBlockCount, previousBlockCount + 3) ) self._mineBlocks(masterNode, 100) #print(node.executeCli('getbudgetinfo')['output']) print('After budget: ' + str(getUnspent(node, address))) def _mineBlocks(self, node, count) : node.executeCli('setgenerate', 'true', count) self._syncAllNodes() def _createNodes(self) : nodesExist = True for i in range(0, self._nodeCount) : if not Node(self, i).dataDirExist() : nodesExist = False break if nodesExist : print("All nodes data dirs exist, resuming") for i in range(0, self._nodeCount) : node = Node(self, i) self._nodeList.append(node) else : controllingNode = self._createControllingNode() for i in range(1, self._nodeCount) : node = Node(self, i) self._nodeList.append(node) key = controllingNode['masterNodePrivateKeyList'][i - 1] node.createDataDir(self._nodeCount, key) for node in reversed(self._nodeList) : node.startNode() node.waitNodeStarting() for node in self._nodeList : self._mineBlocks(node, 200) time.sleep(3) self._syncMasterNodes() for i in range(1, self._nodeCount) : result = self._nodeList[0].executeCli('startmasternode', 'alias', 'false', getNodeAlias(i)) print(result['output']) def _createControllingNode(self) : node = Node(self, 0) self._nodeList.append(node) node.createDataDir(self._nodeCount) node.startNode() node.waitNodeStarting() self._mineBlocks(node, 200) masterNodePrivateKeyList = [] masterNodeConfig = '' for i in range(1, self._nodeCount) : key = node.executeCli('masternode', 'genkey')['json'] masterNodePrivateKeyList.append(key) nodeName = getNodeAlias(i) # Intended to generate address twice address = node.executeCli('getnewaddress')['json'] address = node.executeCli('getnewaddress')['json'] result = node.executeCli('sendtoaddress', address, 10000) #print(getUnspent(node, address)) checkError(result) tx = result['json'] outputs = node.executeCli('masternode', 'outputs') outputsList = outputs['json'] txIndex = 0 for o in outputsList : if o['txhash'] == tx : txIndex = o['outputidx'] break masterNodeConfig += "%s %s %s %s %s\n" % (nodeName, getNodeListenAddress(i), key, tx, str(txIndex)) self._mineBlocks(node, 100) node.writeMasterNodeConfig(masterNodeConfig) node.stopNode() print('Created controlling node') return { 'node' : node, 'masterNodePrivateKeyList' : masterNodePrivateKeyList, } def _stopAllNodes(self) : for node in self._nodeList : node.stopNode() def _syncAllNodes(self, timeoutSeconds = 60) : if not self._syncBlocks(timeoutSeconds) : return False if not self._syncMemPools(timeoutSeconds) : return False return True def _syncBlocks(self, timeoutSeconds) : startTime = time.time() printError = True while time.time() - startTime < timeoutSeconds : tips = [] for node in self._nodeList : result = node.executeCli('getbestblockhash') tips.append(result['json']) if tips[-1] == None : if printError : print('getbestblockhash error: %s' % (result['output'])) printError = False if tips == [ tips[0] ]*len(tips): return True time.sleep(1) print(tips) print('_syncBlocks failed') return False def _syncMemPools(self, timeoutSeconds) : startTime = time.time() while time.time() - startTime < timeoutSeconds : pool = set(self._nodeList[0].executeCli('getrawmempool')['json']) matchedCount = 1 for i in range(1, len(self._nodeList)): if set(self._nodeList[i].executeCli('getrawmempool')['json']) == pool : matchedCount = matchedCount + 1 if matchedCount == len(self._nodeList): return True time.sleep(1) print('_syncMemPools failed') return False def _syncMasterNodes(self, timeoutSeconds = 60) : return # it never works, don't waste time to try startTime = time.time() while timeoutSeconds < 0 or time.time() - startTime < timeoutSeconds : allSynced = True for i in range(1, len(self._nodeList)): if not self._nodeList[i].isMasterNodeSynced() : #print('MN %d status %s' % (i, self._nodeList[i].executeCli('mnsync', 'status')['output'])) allSynced = False break if allSynced : return True time.sleep(1) print('_syncMasterNodes failed') return False def _makeRootPath(self) : return '/tmp/testbudget/' return tempfile.mkdtemp( suffix = None, prefix = 'testbudget_', dir = None ) def getRootPath(self) : return self._rootPath def getIqcashd(self) : return self._iqcashd def getIqcashCli(self) : return self._iqcashCli if __name__ == '__main__': Application().run()
the-stack_0_14225
# -*- coding: utf-8 -*- # Definition of item pipelines # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html from w3lib.html import remove_tags, remove_tags_with_content from scrapy.exceptions import DropItem class RemoveTagsPipeline(object): """removing formatting tags (span, a, ...) from extracted paragraphs""" def process_item(self, item, spider): ps = [remove_tags(remove_tags_with_content(p, ('script', ))).strip().replace(u'\xa0', u' ') for p in item['text']] item['text'] = '\n'.join(ps) # additional stripping for description if item['description']: item['description'] = item['description'].strip() return item class DropIfEmptyFieldPipeline(object): def process_item(self, item, spider): if not item['text']: raise DropItem() else: return item
the-stack_0_14226
import logging import os import time import h5py import numpy as np from scipy.sparse import coo_matrix, csr_matrix from implicit.datasets import _download log = logging.getLogger("implicit") URL = 'https://github.com/benfred/recommender_data/releases/download/v1.0/reddit.hdf5' def get_reddit(): """ Returns the reddit dataset, downloading locally if necessary. This dataset was released here: https://www.reddit.com/r/redditdev/comments/dtg4j/want_to_help_reddit_build_a_recommender_a_public/ and contains 23M up/down votes from 44K users on 3.4M links. Returns a CSR matrix of (item, user, rating """ filename = os.path.join(_download.LOCAL_CACHE_DIR, "reddit.hdf5") if not os.path.isfile(filename): log.info("Downloading dataset to '%s'", filename) _download.download_file(URL, filename) else: log.info("Using cached dataset at '%s'", filename) with h5py.File(filename, 'r') as f: m = f.get('item_user_ratings') return csr_matrix((m.get('data'), m.get('indices'), m.get('indptr'))) def generate_dataset(filename, outputfilename): """ Generates a hdf5 reddit datasetfile from the raw datafiles found at: https://www.reddit.com/r/redditdev/comments/dtg4j/want_to_help_reddit_build_a_recommender_a_public/ You shouldn't have to run this yourself, and can instead just download the output using the 'get_reddit' funciton. """ data = _read_dataframe(filename) _hfd5_from_dataframe(data, outputfilename) def _read_dataframe(filename): """ Reads the original dataset TSV as a pandas dataframe """ # delay importing this to avoid another dependency import pandas # read in triples of user/artist/playcount from the input dataset # get a model based off the input params start = time.time() log.debug("reading data from %s", filename) data = pandas.read_table(filename, usecols=[0, 1, 3], names=['user', 'item', 'rating']) # map each artist and user to a unique numeric value data['user'] = data['user'].astype("category") data['item'] = data['item'].astype("category") # store as a CSR matrix log.debug("read data file in %s", time.time() - start) return data def _hfd5_from_dataframe(data, outputfilename): ratings = coo_matrix((data['rating'].astype(np.float32), (data['item'].cat.codes.copy(), data['user'].cat.codes.copy()))).tocsr() print(repr(ratings)) print(repr(ratings.indices)) print(repr(ratings.indptr)) with h5py.File(outputfilename, "w") as f: g = f.create_group('item_user_ratings') g.create_dataset("data", data=ratings.data) g.create_dataset("indptr", data=ratings.indptr) g.create_dataset("indices", data=ratings.indices) # Note: not saving itemid strings or userid strings here # they are just salted hashes, and only lead to bloat/slowness for no benefit.
the-stack_0_14227
import numpy as np from scipy import ndimage import tifffile as tiff import matplotlib.pyplot as plt import pandas as pd from enum import Enum from skimage.transform import resize # Worldview-3 - Panchromatic (3349, 3338): 400nm - 800nm # Worldview-3 RGB (3350, 3338) # Worldview-3 - 8 Multispectral bands (838, 835): # Coastal: 400 - 450 nm (0, QGIS: 1, WV-3-Band-no:2) Red: 630 - 690 nm (4, QGIS: 5, WV-3-Band-no:6) # Blue: 450 - 510 nm (1, QGIS: 2, WV-3-Band-no:3) Red Edge: 705 - 745 nm (5, QGIS: 6, WV-3-Band-no:7) # Green: 510 - 580 nm (2, QGIS: 3, WV-3-Band-no:4) Near-IR1: 770 - 895 nm (6, QGIS: 7, WV-3-Band-no:8) # Yellow: 585 - 625 nm (3, QGIS: 4, WV-3-Band-no:5) Near-IR2: 860 - 1040 nm (7, QGIS: 8, WV-3-Band-no:9) # NIR - Near Infra Red: 750nm - 1400nm # MIR - Mid Infra Red: 3000nm - 8000nm # Worldview-3 - 8 SWIR bands (134, 133): # SWIR-1: 1195 - 1225 nm SWIR-5: 2145 - 2185 nm # SWIR-2: 1550 - 1590 nm SWIR-6: 2185 - 2225 nm # SWIR-3: 1640 - 1680 nm SWIR-7: 2235 - 2285 nm # SWIR-4: 1710 - 1750 nm SWIR-8: 2295 - 2365 nm class WV3ms(Enum): COASTAL = 0 BLUE = 1 GREEN = 2 YELLOW = 3 RED = 4 REDEDGE = 5 NEARIR1 = 6 NEARIR2 = 7 class WV3swir(Enum): SWIR_1 = 0 SWIR_2 = 1 SWIR_3 = 2 SWIR_4 = 3 SWIR_5 = 4 SWIR_6 = 5 SWIR_7 = 6 SWIR_8 = 7 CCCI_THRESHOLD_U = 0.5 CCCI_THRESHOLD_L = -4 FAUX_CCCI_THRESHOLD = 0.11 # CCCI_SWIR_THRESHOLD = 1.03 CCCI_SWIR_THRESHOLD = .94 NDWI_THRESHOLD = 0.07 NDVI_THRESHOLD = 0.07 def stretch_8bit(bands, lower_percent=2, higher_percent=98, depth=3): # contrast enhancement as per QGIS Stretch to MinMax # note that input image range is 0 .. 1 out = np.zeros_like(bands).astype(np.float32) for i in range(depth): a = 0 b = 1 if depth == 1: c = np.percentile(bands[:, :], lower_percent) d = np.percentile(bands[:, :], higher_percent) t = a + (bands[:, :] - c) * (b - a) / (d - c) else: c = np.percentile(bands[:, :, i], lower_percent) d = np.percentile(bands[:, :, i], higher_percent) t = a + (bands[:, :, i] - c) * (b - a) / (d - c) t[t < a] = a t[t > b] = b if depth == 1: out[:, :] = t else: out[:, :, i] = t return out.astype(np.float32) def EVI_index(msdata): # Enhanced Vegetation Index NIR2 = msdata[WV3ms.NEARIR2.value, :, :].astype(np.float32) R = msdata[WV3ms.RED.value, :, :].astype(np.float32) CB = msdata[WV3ms.COASTAL.value, :, :].astype(np.float32) # EVI = 2.5 * (NIR2 - R)/(NIR2 + 6.0*R - 7.5*CB + 1.0) a = 2.5 * (NIR2 - R) b = NIR2 + 6.0*R - 7.5*CB + 1.0 with np.errstate(divide='ignore', invalid='ignore'): EVI = np.true_divide(a, b) EVI[EVI == np.inf] = 0 EVI = np.nan_to_num(EVI) return EVI def SAVI_index(msdata): # Soil Adjusted Vegetation Index NIR1 = msdata[WV3ms.NEARIR1.value, :, :].astype(np.float32) R = msdata[WV3ms.RED.value, :, :].astype(np.float32) # The value of L varies by the amount or cover of green vegetation: in very high vegetation regions, # L=0; and in areas with no green vegetation, L=1. Generally, an L=0.5 works well in most situations # and is the default value used. When L=0, then SAVI = NDVI. L = 0.5 # SAVI = (1 + L) * (NIR1 - R)/(NIR1 + R + L) a = (1 + L) * (NIR1 - R) b = NIR1 + R + L with np.errstate(divide='ignore', invalid='ignore'): SAVI = np.true_divide(a, b) SAVI[SAVI == np.inf] = 0 SAVI = np.nan_to_num(SAVI) return SAVI def faux_CCCI_index(msdata, rgbdata): RE = resize(msdata[WV3ms.REDEDGE.value, :, :], (rgbdata.shape[0], rgbdata.shape[1]), mode='constant', preserve_range=False) NIR2 = resize(msdata[WV3ms.NEARIR2.value, :, :], (rgbdata.shape[0], rgbdata.shape[1]), mode='constant', preserve_range=False) R = rgbdata[:, :, 0] # resize: note that with the default preserve_range=False the input image is # converted according to the conventions of img_as_float (values in [0, 1]) # from the original 11 bits range [0, 2047]. preserve_range=True should be used. # faux_CCCI_index only works preserve_range=False - reason unknown # Canopy Chlorophyll Content Index # CCCI = ((NIR2 - RE) / (NIR2 + RE)) / ((NIR2 - R) / (NIR2 + R)) a = NIR2 - RE b = NIR2 + RE # c = NIR2 - R # d = NIR2 + R c = R * (-1) d = R with np.errstate(divide='ignore', invalid='ignore'): e = np.true_divide(a, b) e[e == np.inf] = 0 e = np.nan_to_num(e) f = np.true_divide(c, d) f[f == np.inf] = 0 f = np.nan_to_num(f) CCCI = np.true_divide(e, f) CCCI[CCCI == np.inf] = 0 CCCI = np.nan_to_num(CCCI) return CCCI def CCCI_NIR2_index(msdata): # Canopy Chlorophyll Content Index # uses NIR2 rather than SWIR_1 RE = msdata[WV3ms.REDEDGE.value, :, :].astype(np.float32) NIR2 = msdata[WV3ms.NEARIR2.value, :, :].astype(np.float32) R = msdata[WV3ms.RED.value, :, :].astype(np.float32) # CCCI = ((NIR2 - RE)/ NIR2 + RE)) / ((NIR2 - R)/(NIR2 + R)) a = NIR2 - RE b = NIR2 + RE c = NIR2 - R d = NIR2 + R with np.errstate(divide='ignore', invalid='ignore'): e = np.true_divide(a, b) e[e == np.inf] = 0 e = np.nan_to_num(e) f = np.true_divide(c, d) f[f == np.inf] = 0 f = np.nan_to_num(f) CCCI = np.true_divide(e, f) CCCI[CCCI == np.inf] = 0 CCCI = np.nan_to_num(CCCI) return CCCI def CCCI_SWIR_index(msdata, swirdata): # Canopy Chlorophyll Content Index # uses SWIR_1 RE = msdata[WV3ms.REDEDGE.value, :, :].astype(np.float32) SWIR1 = resize(swirdata[WV3swir.SWIR_1.value, :, :], (msdata.shape[1], msdata.shape[2]), mode='constant', preserve_range=True).astype(np.float32) R = msdata[WV3ms.RED.value, :, :].astype(np.float32) # CCCI = ((SWIR1 - RE)/ SWIR1 + RE)) / ((SWIR1 - R)/(SWIR1 + R)) a = SWIR1 - RE b = SWIR1 + RE c = SWIR1 - R d = SWIR1 + R with np.errstate(divide='ignore', invalid='ignore'): e = np.true_divide(a, b) e[e == np.inf] = 0 e = np.nan_to_num(e) f = np.true_divide(c, d) f[f == np.inf] = 0 f = np.nan_to_num(f) CCCI = np.true_divide(e, f) CCCI[CCCI == np.inf] = 0 CCCI = np.nan_to_num(CCCI) return CCCI def NDWI_index(msdata): # Normalized Difference Water Index # Uses McFeeter's NDWI based on MODIS band 2 and band 4 G = msdata[WV3ms.GREEN.value, :, :].astype(np.float32) NIR1 = msdata[WV3ms.NEARIR1.value, :, :].astype(np.float32) # NDWI = (G - NIR1)/(G + NIR1) a = G - NIR1 b = G + NIR1 with np.errstate(divide='ignore', invalid='ignore'): NDWI = np.true_divide(a, b) NDWI[NDWI == np.inf] = 0 NDWI = np.nan_to_num(NDWI) return NDWI def NDVI_index(msdata): # Normalized Difference Vegetation Index R = msdata[WV3ms.RED.value, :, :].astype(np.float32) NIR1 = msdata[WV3ms.NEARIR1.value, :, :].astype(np.float32) # NDVI = (NIR1 - R)/(NIR1 + R ) a = NIR1 - R b = NIR1 + R with np.errstate(divide='ignore', invalid='ignore'): NDVI = np.true_divide(a, b) NDVI[NDVI == np.inf] = 0 NDVI = np.nan_to_num(NDVI) return NDVI def display(IM_ID): # read rgb and m bands # tifffile RGB = ndarray shape (3, 3350, 3338) i.e. (colour, row, col) # [0] = red, [1] = green, [2] = blue, 16 bit depth rgb = tiff.imread('three_band/{}.tif'.format(IM_ID)) # change shape to regular (3350, 3338, 3) i.e. (row, col, colour) rgb = np.rollaxis(rgb, 0, 3) # tifffile M = ndarray shape (8, 838, 835) i.e. (spectrum, row, col) m = tiff.imread('sixteen_band/{}_M.tif'.format(IM_ID)) # tiffile panchrom = ndarray shape (3349, 3338) i.e. (row, col) panchrom = tiff.imread('sixteen_band/{}_P.tif'.format(IM_ID)) # tiffile SWIR = ndarray shape (8, 134, 133) i.e. (spectrum, row, col) swir = tiff.imread('sixteen_band/{}_A.tif'.format(IM_ID)) # get our indices myFauxCCCI = faux_CCCI_index(m, rgb) myCCCI = CCCI_NIR2_index(m) mySwirCCCI = CCCI_SWIR_index(m, swir) myNDWI = NDWI_index(m) myNDVI = NDVI_index(m) myEVI = EVI_index(m) mySAVI = SAVI_index(m) # you can look on histogram and pick your favorite threshold value # ccci_binary = (myCCCI < CCCI_THRESHOLD).astype(np.float32) ccci_binary_1 = (myCCCI < CCCI_THRESHOLD_U) ccci_binary_2 = (myCCCI > CCCI_THRESHOLD_L) ccci_binary_3 = np.logical_and(ccci_binary_1, ccci_binary_2) ccci_binary_4 = np.logical_not(ccci_binary_3) ccci_binary_5 = ndimage.binary_opening(ccci_binary_4) ccci_binary = ndimage.binary_closing(ccci_binary_5).astype(np.float32) ndwi_binary = (myNDWI > NDWI_THRESHOLD).astype(np.float32) ndvi_binary = (myNDWI > NDVI_THRESHOLD).astype(np.float32) faux_ccci_binary = (myFauxCCCI > FAUX_CCCI_THRESHOLD).astype(np.float32) ccci_swir_binary = (mySwirCCCI > CCCI_SWIR_THRESHOLD).astype(np.float32) fig, axes = plt.subplots(ncols=5, nrows=2, figsize=(18, 9)) ax = axes.ravel() ax[0].imshow(ccci_binary, cmap='binary_r') ax[0].set_title('CCCI NIR 2 Mask') ax[0].axis('off') ax[1].imshow(ndwi_binary, cmap='binary_r') ax[1].set_title('NDWI Mask') ax[1].axis('off') ax[2].imshow(ndvi_binary, cmap='binary_r') ax[2].set_title('NDVI Mask') ax[2].axis('off') ax[3].imshow(faux_ccci_binary, cmap='binary_r') ax[3].set_title('Faux CCCI Mask') ax[3].axis('off') ax[4].imshow(ccci_swir_binary, cmap='binary_r') ax[4].set_title('CCCI SWIR 1 Mask') ax[4].axis('off') hist, bins = np.histogram(myCCCI, range=(-2, 2), bins=50) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 ax[5].set_title('CCCI NIR 2 Histogram') ax[5].bar(center, hist, align='center', width=width) hist, bins = np.histogram(myNDWI, bins=50) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 ax[6].set_title('NDWI Histogram') ax[6].bar(center, hist, align='center', width=width) hist, bins = np.histogram(myNDVI, bins=50) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 ax[7].set_title('NDVI Histogram') ax[7].bar(center, hist, align='center', width=width) hist, bins = np.histogram(myFauxCCCI, range=(-.4, .4), bins=50) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 ax[8].set_title('Faux CCCI Histogram') ax[8].bar(center, hist, align='center', width=width) hist, bins = np.histogram(mySwirCCCI, range=(.4, 1.2), bins=50) width = 0.7 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 ax[9].set_title('CCCI SWIR 1 Histogram') ax[9].bar(center, hist, align='center', width=width) plt.tight_layout() plt.show() # fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(18, 10)) # ax = axes.ravel() # ax[0].imshow(stretch_8bit(rgb)) # ax[0].set_title('RGB {}'.format(IM_ID)) # ax[0].axis('off') # ax[1].imshow(stretch_8bit(panchrom, depth=1), cmap='gray') # ax[1].set_title('Panchromatic {}'.format(IM_ID)) # ax[1].axis('off') # plt.tight_layout() # plt.show() fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(18, 10)) ax = axes.ravel() ax[0].imshow(myCCCI, vmin=-.5, vmax=.5) ax[0].set_title('CCCI NIR 2') ax[0].axis('off') ax[1].imshow(myNDWI, vmin=-.3, vmax=.3) ax[1].set_title('NDWI') ax[1].axis('off') ax[2].imshow(myNDVI) ax[2].set_title('NDVI') ax[2].axis('off') ax[3].imshow(myEVI, vmin=-.5, vmax=.5) ax[3].set_title('EVI') ax[3].axis('off') ax[4].imshow(mySAVI) ax[4].set_title('SAVI') ax[4].axis('off') ax[5].imshow(mySwirCCCI, vmin=0.6, vmax=1.2) ax[5].set_title('CCCI SWIR 1') ax[5].axis('off') plt.tight_layout() plt.show() # -----Main------ data = pd.read_csv('train_wkt_v4.csv') data = data[data.MultipolygonWKT != 'MULTIPOLYGON EMPTY'] # display('6150_3_4') # use training data images for waterway for IMG_ID in data[data.ClassType == 7].ImageId: display(IMG_ID) # test images # take some pictures from test waterway_test = ['6080_4_3', '6080_4_0', '6080_1_3', '6080_1_1', '6150_3_4', '6050_2_1'] for IMG_ID in waterway_test: display(IMG_ID)
the-stack_0_14228
"""Base classes for all estimators.""" # Author: Gael Varoquaux <[email protected]> # License: BSD 3 clause import copy import warnings from collections import defaultdict import platform import inspect import re import numpy as np from . import __version__ from .utils import _IS_32BIT _DEFAULT_TAGS = { 'non_deterministic': False, 'requires_positive_X': False, 'requires_positive_y': False, 'X_types': ['2darray'], 'poor_score': False, 'no_validation': False, 'multioutput': False, "allow_nan": False, 'stateless': False, 'multilabel': False, '_skip_test': False, 'multioutput_only': False, 'binary_only': False, 'requires_fit': True} def clone(estimator, safe=True): """Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator : estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe : boolean, optional If safe is false, clone will fall back to a deep copy on objects that are not estimators. """ estimator_type = type(estimator) # XXX: not handling dictionaries if estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) elif not hasattr(estimator, 'get_params') or isinstance(estimator, type): if not safe: return copy.deepcopy(estimator) else: raise TypeError("Cannot clone object '%s' (type %s): " "it does not seem to be a scikit-learn estimator " "as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator))) klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for name, param in new_object_params.items(): new_object_params[name] = clone(param, safe=False) new_object = klass(**new_object_params) params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if param1 is not param2: raise RuntimeError('Cannot clone object %s, as the constructor ' 'either does not set or modifies parameter %s' % (estimator, name)) return new_object def _pprint(params, offset=0, printer=repr): """Pretty print the dictionary 'params' Parameters ---------- params : dict The dictionary to pretty print offset : int The offset in characters to add at the begin of each line. printer : callable The function to convert entries to strings, typically the builtin str or repr """ # Do a multi-line justified repr: options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) params_list = list() this_line_length = offset line_sep = ',\n' + (1 + offset // 2) * ' ' for i, (k, v) in enumerate(sorted(params.items())): if type(v) is float: # use str for representing floating point numbers # this way we get consistent representation across # architectures and versions. this_repr = '%s=%s' % (k, str(v)) else: # use repr of the rest this_repr = '%s=%s' % (k, printer(v)) if len(this_repr) > 500: this_repr = this_repr[:300] + '...' + this_repr[-100:] if i > 0: if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr): params_list.append(line_sep) this_line_length = len(line_sep) else: params_list.append(', ') this_line_length += 2 params_list.append(this_repr) this_line_length += len(this_repr) np.set_printoptions(**options) lines = ''.join(params_list) # Strip trailing space to avoid nightmare in doctests lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n')) return lines class BaseEstimator: """Base class for all estimators in scikit-learn Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). """ @classmethod def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = inspect.signature(init) # Consider the constructor parameters excluding 'self' parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError("scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature)) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): """ Get parameters for this estimator. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): try: value = getattr(self, key) except AttributeError: warnings.warn('From version 0.24, get_params will raise an ' 'AttributeError if a parameter cannot be ' 'retrieved as an instance attribute. Previously ' 'it would return None.', FutureWarning) value = None if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """ Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Parameters ---------- **params : dict Estimator parameters. Returns ------- self : object Estimator instance. """ if not params: # Simple optimization to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) nested_params = defaultdict(dict) # grouped by prefix for key, value in params.items(): key, delim, sub_key = key.partition('__') if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self)) if delim: nested_params[key][sub_key] = value else: setattr(self, key, value) valid_params[key] = value for key, sub_params in nested_params.items(): valid_params[key].set_params(**sub_params) return self def __repr__(self, N_CHAR_MAX=700): # N_CHAR_MAX is the (approximate) maximum number of non-blank # characters to render. We pass it as an optional parameter to ease # the tests. from .utils._pprint import _EstimatorPrettyPrinter N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences # use ellipsis for sequences with a lot of elements pp = _EstimatorPrettyPrinter( compact=True, indent=1, indent_at_name=True, n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW) repr_ = pp.pformat(self) # Use bruteforce ellipsis when there are a lot of non-blank characters n_nonblank = len(''.join(repr_.split())) if n_nonblank > N_CHAR_MAX: lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends regex = r'^(\s*\S){%d}' % lim # The regex '^(\s*\S){%d}' % n # matches from the start of the string until the nth non-blank # character: # - ^ matches the start of string # - (pattern){n} matches n repetitions of pattern # - \s*\S matches a non-blank char following zero or more blanks left_lim = re.match(regex, repr_).end() right_lim = re.match(regex, repr_[::-1]).end() if '\n' in repr_[left_lim:-right_lim]: # The left side and right side aren't on the same line. # To avoid weird cuts, e.g.: # categoric...ore', # we need to start the right side with an appropriate newline # character so that it renders properly as: # categoric... # handle_unknown='ignore', # so we add [^\n]*\n which matches until the next \n regex += r'[^\n]*\n' right_lim = re.match(regex, repr_[::-1]).end() ellipsis = '...' if left_lim + len(ellipsis) < len(repr_) - right_lim: # Only add ellipsis if it results in a shorter repr repr_ = repr_[:left_lim] + '...' + repr_[-right_lim:] return repr_ def __getstate__(self): try: state = super().__getstate__() except AttributeError: state = self.__dict__.copy() if type(self).__module__.startswith('sklearn.'): return dict(state.items(), _sklearn_version=__version__) else: return state def __setstate__(self, state): if type(self).__module__.startswith('sklearn.'): pickle_version = state.pop("_sklearn_version", "pre-0.18") if pickle_version != __version__: warnings.warn( "Trying to unpickle estimator {0} from version {1} when " "using version {2}. This might lead to breaking code or " "invalid results. Use at your own risk.".format( self.__class__.__name__, pickle_version, __version__), UserWarning) try: super().__setstate__(state) except AttributeError: self.__dict__.update(state) def _more_tags(self): return _DEFAULT_TAGS def _get_tags(self): collected_tags = {} for base_class in reversed(inspect.getmro(self.__class__)): if hasattr(base_class, '_more_tags'): # need the if because mixins might not have _more_tags # but might do redundant work in estimators # (i.e. calling more tags on BaseEstimator multiple times) more_tags = base_class._more_tags(self) collected_tags.update(more_tags) return collected_tags class ClassifierMixin: """Mixin class for all classifiers in scikit-learn.""" _estimator_type = "classifier" def score(self, X, y, sample_weight=None): """ Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) wrt. y. """ from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight) class RegressorMixin: """Mixin class for all regression estimators in scikit-learn.""" _estimator_type = "regressor" def score(self, X, y, sample_weight=None): """Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) ** 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix instead, shape = (n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for the estimator. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True values for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float R^2 of self.predict(X) wrt. y. Notes ----- The R2 score used when calling ``score`` on a regressor will use ``multioutput='uniform_average'`` from version 0.23 to keep consistent with :func:`~sklearn.metrics.r2_score`. This will influence the ``score`` method of all the multioutput regressors (except for :class:`~sklearn.multioutput.MultiOutputRegressor`). To specify the default value manually and avoid the warning, please either call :func:`~sklearn.metrics.r2_score` directly or make a custom scorer with :func:`~sklearn.metrics.make_scorer` (the built-in scorer ``'r2'`` uses ``multioutput='uniform_average'``). """ from .metrics import r2_score from .metrics._regression import _check_reg_targets y_pred = self.predict(X) # XXX: Remove the check in 0.23 y_type, _, _, _ = _check_reg_targets(y, y_pred, None) if y_type == 'continuous-multioutput': warnings.warn("The default value of multioutput (not exposed in " "score method) will change from 'variance_weighted' " "to 'uniform_average' in 0.23 to keep consistent " "with 'metrics.r2_score'. To specify the default " "value manually and avoid the warning, please " "either call 'metrics.r2_score' directly or make a " "custom scorer with 'metrics.make_scorer' (the " "built-in scorer 'r2' uses " "multioutput='uniform_average').", FutureWarning) return r2_score(y, y_pred, sample_weight=sample_weight, multioutput='variance_weighted') class ClusterMixin: """Mixin class for all cluster estimators in scikit-learn.""" _estimator_type = "clusterer" def fit_predict(self, X, y=None): """ Perform clustering on X and returns cluster labels. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data. y : Ignored Not used, present for API consistency by convention. Returns ------- labels : ndarray, shape (n_samples,) Cluster labels. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm self.fit(X) return self.labels_ class BiclusterMixin: """Mixin class for all bicluster estimators in scikit-learn""" @property def biclusters_(self): """Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members. """ return self.rows_, self.columns_ def get_indices(self, i): """Row and column indices of the i'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Parameters ---------- i : int The index of the cluster. Returns ------- row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the dataset that belong to the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] return np.nonzero(rows)[0], np.nonzero(columns)[0] def get_shape(self, i): """Shape of the i'th bicluster. Parameters ---------- i : int The index of the cluster. Returns ------- shape : (int, int) Number of rows and columns (resp.) in the bicluster. """ indices = self.get_indices(i) return tuple(len(i) for i in indices) def get_submatrix(self, i, data): """Returns the submatrix corresponding to bicluster `i`. Parameters ---------- i : int The index of the cluster. data : array The data. Returns ------- submatrix : array The submatrix corresponding to bicluster i. Notes ----- Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist. """ from .utils.validation import check_array data = check_array(data, accept_sparse='csr') row_ind, col_ind = self.get_indices(i) return data[row_ind[:, np.newaxis], col_ind] class TransformerMixin: """Mixin class for all transformers in scikit-learn.""" def fit_transform(self, X, y=None, **fit_params): """ Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters ---------- X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. **fit_params : dict Additional fit parameters. Returns ------- X_new : numpy array of shape [n_samples, n_features_new] Transformed array. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm if y is None: # fit method of arity 1 (unsupervised transformation) return self.fit(X, **fit_params).transform(X) else: # fit method of arity 2 (supervised transformation) return self.fit(X, y, **fit_params).transform(X) class DensityMixin: """Mixin class for all density estimators in scikit-learn.""" _estimator_type = "DensityEstimator" def score(self, X, y=None): """Returns the score of the model on the data X Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- score : float """ pass class OutlierMixin: """Mixin class for all outlier detection estimators in scikit-learn.""" _estimator_type = "outlier_detector" def fit_predict(self, X, y=None): """Perform fit on X and returns labels for X. Returns -1 for outliers and 1 for inliers. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data. y : Ignored Not used, present for API consistency by convention. Returns ------- y : ndarray, shape (n_samples,) 1 for inliers, -1 for outliers. """ # override for transductive outlier detectors like LocalOulierFactor return self.fit(X).predict(X) class MetaEstimatorMixin: _required_parameters = ["estimator"] """Mixin class for all meta estimators in scikit-learn.""" class MultiOutputMixin: """Mixin to mark estimators that support multioutput.""" def _more_tags(self): return {'multioutput': True} class _UnstableArchMixin: """Mark estimators that are non-determinstic on 32bit or PowerPC""" def _more_tags(self): return {'non_deterministic': ( _IS_32BIT or platform.machine().startswith(('ppc', 'powerpc')))} def is_classifier(estimator): """Returns True if the given estimator is (probably) a classifier. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if estimator is a classifier and False otherwise. """ return getattr(estimator, "_estimator_type", None) == "classifier" def is_regressor(estimator): """Returns True if the given estimator is (probably) a regressor. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if estimator is a regressor and False otherwise. """ return getattr(estimator, "_estimator_type", None) == "regressor" def is_outlier_detector(estimator): """Returns True if the given estimator is (probably) an outlier detector. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if estimator is an outlier detector and False otherwise. """ return getattr(estimator, "_estimator_type", None) == "outlier_detector"
the-stack_0_14231
import numpy as np WARMRATE=0.5 class NSB: def apply(self,data): self.df = data def iterrt(self): df = self.df # First we'll calculate the hourly RT for NSB: nrt df['nrt']=0.0 rt=22 newtonK=0.036 for i,row in df.iterrows(): rtsp = row.nrtsp delta = float(rtsp-rt) #print("D: %f"%delta) if delta > 0: # increase temperature by delta to a max of 1 if delta > 1: delta = WARMRATE elif delta < 0: # newton's law of cooling for T=+1 rt1 = row.OAT + (rt-row.OAT)*np.exp(-newtonK) CDRATE = rt1-rt #print("CD: %f"%CDRATE) if delta < CDRATE: delta = CDRATE rt = rt + delta df.nrt.at[i]= rt #print("%d rtsp=%f rt=%f delta=%f oat=%f" % (i.hour,rtsp,rt,delta,row.OAT)) def calchds(self): df = self.df # Now calculate the sum of delta-T for BAU vs NSB (heating only) df['bauhd']=df.rt-df.OAT # hourly bau delta T df['nsbhd']=df.nrt-df.OAT # hourly nsb delta T # Eliminate negative delta T df.loc[df['bauhd']<0,'bauhd']=0 df.loc[df['nsbhd']<0,'nsbhd']=0 #project savings self.heatDTsaved = (df.bauhd.sum() - df.nsbhd.sum()) self.heatfracsaved = self.heatDTsaved/df.bauhd.sum() print("Heating savings: %.2f%%"%(100*heatfracsaved)) #df.nsbhd.describe() #df.bauhd.describe() def saveDT(row): RTN=row.nrt RTB=row.rt OAT=row.OAT # Computes the fractional reduction in delta T for NSB as compared to BAU DTN=RTN-OAT DTB=RTB-OAT if(DTB<=0): return 0 # There are no savings possible if the BAU DT indicates no heating. if(DTN<=0): DTN=0 # Best we can do is turn off the heat and save 100% return (DTB-DTN)/DTB def calcCoolAndSS(self): # Categorize periods and calculate new heating load for each period h = df[(df.hload!=0)].dropna() # Set default values for cooling h['cooldown']= h.nrt > h.nrtsp h.loc[h.cooldown,'savefrac'] = 1 h.loc[h.cooldown,'nhload'] = 0 #compute savings fraction and hload for the Steady State periods # I think there's an error here. h['ss']= h.nrt == h.nrtsp #h['savefrac'] = h.loc[h.ss,'savefrac']=h[h.ss].apply(saveDT,axis=1) h.loc[h.ss,'nhload']=h[h.ss]['hload'] * (1-h[h.ss]['savefrac']) #now get ready for the warmup rows h['warmup']= h.nrt < h.nrtsp self.h = h def calcWarmups(self): h = self.h # calculate the expected total NSB heating load, and the number of hours over which it should be distributed hw = pd.DataFrame(h[h.warmup]) # Count the whole and fractional hours where warmup occurs hw['whrs'] = hw.nrtsp - hw.nrt # This is correct where DT<=1 hw.loc[hw.whrs>1,'whrs'] = 1 # Because whrs is 1 where DT>1 whrs = hw.whrs.sum() # Total Warmup heating is expected NSB total, less expected NSB at SS. expectedHnsb = h.hload.sum() * (1-heatfracsaved) expectedHss = h[h.ss].hload.sum() expectHwarmup = expectedHnsb - expectedHss # Now distribute the warmup load HwarmPerWhr = expectHwarmup / whrs h.loc[hw.index,'nhload'] = (hw.whrs * HwarmPerWhr) # There is an error here: h.nhload.sum() / h.hload.sum()
the-stack_0_14232
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * import re from base64 import b64decode from flask import Flask, Response, request from netaddr import IPAddress, IPSet from typing import Callable, Any, cast, Dict, Tuple from math import ceil import dateparser ''' GLOBAL VARIABLES ''' INTEGRATION_NAME: str = 'Export Indicators Service' PAGE_SIZE: int = 200 APP: Flask = Flask('demisto-export_iocs') CTX_VALUES_KEY: str = 'dmst_export_iocs_values' CTX_MIMETYPE_KEY: str = 'dmst_export_iocs_mimetype' FORMAT_CSV: str = 'csv' FORMAT_TEXT: str = 'text' FORMAT_JSON_SEQ: str = 'json-seq' FORMAT_JSON: str = 'json' FORMAT_ARG_MWG = 'mwg' FORMAT_ARG_PANOSURL = 'panosurl' FORMAT_ARG_BLUECOAT = 'bluecoat' FORMAT_ARG_PROXYSG = 'proxysg' FORMAT_MWG: str = 'McAfee Web Gateway' FORMAT_PROXYSG: str = "Symantec ProxySG" FORMAT_PANOSURL: str = "PAN-OS URL" FORMAT_XSOAR_JSON: str = 'XSOAR json' FORMAT_ARG_XSOAR_JSON: str = 'xsoar-json' FORMAT_XSOAR_JSON_SEQ: str = 'XSOAR json-seq' FORAMT_ARG_XSOAR_JSON_SEQ: str = 'xsoar-seq' FORMAT_XSOAR_CSV: str = 'XSOAR csv' FORMAT_ARG_XSOAR_CSV: str = 'xsoar-csv' MWG_TYPE_OPTIONS = ["string", "applcontrol", "dimension", "category", "ip", "mediatype", "number", "regex"] CTX_FORMAT_ERR_MSG: str = 'Please provide a valid format from: text, json, json-seq, csv, mgw, panosurl and proxysg' CTX_LIMIT_ERR_MSG: str = 'Please provide a valid integer for List Size' CTX_OFFSET_ERR_MSG: str = 'Please provide a valid integer for Starting Index' CTX_MWG_TYPE_ERR_MSG: str = 'The McAFee Web Gateway type can only be one of the following: string,' \ ' applcontrol, dimension, category, ip, mediatype, number, regex' CTX_COLLAPSE_ERR_MSG: str = 'The Collapse parameter can only get the following: 0 - Dont Collapse, ' \ '1 - Collapse to Ranges, 2 - Collapse to CIDRS' CTX_MISSING_REFRESH_ERR_MSG: str = 'Refresh Rate must be "number date_range_unit", examples: (2 hours, 4 minutes, ' \ '6 months, 1 day, etc.)' CTX_NO_URLS_IN_PROXYSG_FORMAT = 'ProxySG format only outputs URLs - no URLs found in the current query' MIMETYPE_JSON_SEQ: str = 'application/json-seq' MIMETYPE_JSON: str = 'application/json' MIMETYPE_CSV: str = 'text/csv' MIMETYPE_TEXT: str = 'text/plain' DONT_COLLAPSE = "Don't Collapse" COLLAPSE_TO_CIDR = "To CIDRs" COLLAPSE_TO_RANGES = "To Ranges" SORT_ASCENDING = 'asc' SORT_DESCENDING = 'desc' _PROTOCOL_REMOVAL = re.compile(r'^(?:[a-z]+:)*//') _PORT_REMOVAL = re.compile(r'^([a-z0-9\-\.]+)(?:\:[0-9]+)*') _INVALID_TOKEN_REMOVAL = re.compile(r'(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)') _BROAD_PATTERN = re.compile(r'^(?:\*\.)+[a-zA-Z]+(?::[0-9]+)?$') '''Request Arguments Class''' class RequestArguments: def __init__(self, query: str, out_format: str = FORMAT_TEXT, limit: int = 10000, offset: int = 0, mwg_type: str = 'string', strip_port: bool = False, drop_invalids: bool = False, category_default: str = 'bc_category', category_attribute: str = '', collapse_ips: str = DONT_COLLAPSE, csv_text: bool = False, sort_field: str = '', sort_order: str = ''): self.query = query self.out_format = out_format self.limit = limit self.offset = offset self.mwg_type = mwg_type self.strip_port = strip_port self.drop_invalids = drop_invalids self.category_default = category_default self.category_attribute = [] # type:List self.collapse_ips = collapse_ips self.csv_text = csv_text self.sort_field = sort_field self.sort_order = sort_order if category_attribute is not None: category_attribute_list = category_attribute.split(',') if len(category_attribute_list) != 1 or '' not in category_attribute_list: self.category_attribute = category_attribute_list def is_request_change(self, last_update_data: Dict): if self.limit != last_update_data.get('last_limit'): return True elif self.offset != last_update_data.get('last_offset'): return True elif self.out_format != last_update_data.get('last_format'): return True elif self.mwg_type != last_update_data.get('mwg_type'): return True elif self.drop_invalids != last_update_data.get('drop_invalids'): return True elif self.strip_port != last_update_data.get('strip_port'): return True elif self.category_default != last_update_data.get('category_default'): return True elif self.category_attribute != last_update_data.get('category_attribute'): return True elif self.collapse_ips != last_update_data.get('collapse_ips'): return True elif self.csv_text != last_update_data.get('csv_text'): return True elif self.sort_field != last_update_data.get('sort_field'): return True elif self.sort_order != last_update_data.get('sort_order'): return True return False ''' HELPER FUNCTIONS ''' def list_to_str(inp_list: list, delimiter: str = ',', map_func: Callable = str) -> str: """ Transforms a list to an str, with a custom delimiter between each list item """ str_res = "" if inp_list: if isinstance(inp_list, list): str_res = delimiter.join(map(map_func, inp_list)) else: raise AttributeError('Invalid inp_list provided to list_to_str') return str_res def sort_iocs(request_args: RequestArguments, iocs: list) -> list: """ Sorts the IoCs according to the sort field and order. Returns: Sorted List of IoCs, if sorting arguments are defined. """ try: if request_args.sort_field: if request_args.sort_order == SORT_ASCENDING: return sorted(iocs, key=lambda ioc: ioc[request_args.sort_field], reverse=False) elif request_args.sort_order == SORT_DESCENDING: return sorted(iocs, key=lambda ioc: ioc[request_args.sort_field], reverse=True) except KeyError: demisto.debug('ExportIndicators - Could not sort IoCs, please verify that you entered the correct field name.\n' f'Field used: {request_args.sort_field}') except Exception as e: demisto.debug(f'ExportIndicators - Could not sort IoCs due to an unknown error.\n{e}') return iocs def refresh_outbound_context(request_args: RequestArguments, on_demand: bool = False) -> str: """ Refresh the values and format using an indicator_query to call demisto.searchIndicators Update integration cache only in case of running on demand Returns: List(IoCs in output format) """ now = datetime.now() # poll indicators into list from demisto iocs = find_indicators_with_limit(request_args.query, request_args.limit, request_args.offset) iocs = sort_iocs(request_args, iocs) out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args) # if in CSV format - the "indicator" header if request_args.out_format in [FORMAT_CSV, FORMAT_XSOAR_CSV]: actual_indicator_amount = actual_indicator_amount - 1 # re-polling in case formatting or ip collapse caused a lack in results while actual_indicator_amount < request_args.limit: # from where to start the new poll and how many results should be fetched new_offset = len(iocs) + request_args.offset + actual_indicator_amount - 1 new_limit = request_args.limit - actual_indicator_amount # poll additional indicators into list from demisto new_iocs = find_indicators_with_limit(request_args.query, new_limit, new_offset) # in case no additional indicators exist - exit if len(new_iocs) == 0: break # add the new results to the existing results iocs += new_iocs iocs = sort_iocs(request_args, iocs) # reformat the output out_dict, actual_indicator_amount = create_values_for_returned_dict(iocs, request_args) if request_args.out_format == FORMAT_CSV: actual_indicator_amount = actual_indicator_amount - 1 if request_args.out_format == FORMAT_JSON: out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_JSON elif request_args.out_format in [FORMAT_CSV, FORMAT_XSOAR_CSV]: if request_args.csv_text: out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_TEXT else: out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_CSV elif request_args.out_format in [FORMAT_JSON_SEQ, FORMAT_XSOAR_JSON_SEQ]: out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_JSON_SEQ else: out_dict[CTX_MIMETYPE_KEY] = MIMETYPE_TEXT if on_demand: set_integration_context({ "last_output": out_dict, 'last_run': date_to_timestamp(now), 'last_limit': request_args.limit, 'last_offset': request_args.offset, 'last_format': request_args.out_format, 'last_query': request_args.query, 'current_iocs': iocs, 'mwg_type': request_args.mwg_type, 'drop_invalids': request_args.drop_invalids, 'strip_port': request_args.strip_port, 'category_default': request_args.category_default, 'category_attribute': request_args.category_attribute, 'collapse_ips': request_args.collapse_ips, 'csv_text': request_args.csv_text, 'sort_field': request_args.sort_field, 'sort_order': request_args.sort_order, }) return out_dict[CTX_VALUES_KEY] def find_indicators_with_limit(indicator_query: str, limit: int, offset: int) -> list: """ Finds indicators using demisto.searchIndicators """ # calculate the starting page (each page holds 200 entries) if offset: next_page = int(offset / PAGE_SIZE) # set the offset from the starting page offset_in_page = offset - (PAGE_SIZE * next_page) else: next_page = 0 offset_in_page = 0 iocs, _ = find_indicators_with_limit_loop(indicator_query, limit, next_page=next_page) # if offset in page is bigger than the amount of results returned return empty list if len(iocs) <= offset_in_page: return [] return iocs[offset_in_page:limit + offset_in_page] def find_indicators_with_limit_loop(indicator_query: str, limit: int, total_fetched: int = 0, next_page: int = 0, last_found_len: int = PAGE_SIZE): """ Finds indicators using while loop with demisto.searchIndicators, and returns result and last page """ iocs: List[dict] = [] if not last_found_len: last_found_len = total_fetched search_indicators = IndicatorsSearcher(page=next_page) while last_found_len == PAGE_SIZE and limit and total_fetched < limit: fetched_iocs = search_indicators.search_indicators_by_version(query=indicator_query, size=PAGE_SIZE).get('iocs') iocs.extend(fetched_iocs) last_found_len = len(fetched_iocs) total_fetched += last_found_len return iocs, search_indicators.page def ip_groups_to_cidrs(ip_range_groups: list): """Collapse ip groups list to CIDRs Args: ip_range_groups (list): a list of lists containing connected IPs Returns: list. a list of CIDRs. """ ip_ranges = [] # type:List for cidr in ip_range_groups: # handle single ips if len(cidr) == 1: # CIDR with a single IP appears with "/32" suffix so handle them differently ip_ranges.append(str(cidr[0])) continue ip_ranges.append(str(cidr)) return ip_ranges def ip_groups_to_ranges(ip_range_groups: list): """Collapse ip groups list to ranges. Args: ip_range_groups (list): a list of lists containing connected IPs Returns: list. a list of Ranges. """ ip_ranges = [] # type:List for group in ip_range_groups: # handle single ips if len(group) == 1: ip_ranges.append(str(group[0])) continue ip_ranges.append(str(group)) return ip_ranges def ips_to_ranges(ips: list, collapse_ips: str): """Collapse IPs to Ranges or CIDRs. Args: ips (list): a list of IP strings. collapse_ips (str): Whether to collapse to Ranges or CIDRs. Returns: list. a list to Ranges or CIDRs. """ if collapse_ips == COLLAPSE_TO_RANGES: ips_range_groups = IPSet(ips).iter_ipranges() return ip_groups_to_ranges(ips_range_groups) else: cidrs = IPSet(ips).iter_cidrs() return ip_groups_to_cidrs(cidrs) def panos_url_formatting(iocs: list, drop_invalids: bool, strip_port: bool): formatted_indicators = [] # type:List for indicator_data in iocs: # only format URLs and Domains indicator = indicator_data.get('value') if not indicator: continue if indicator_data.get('indicator_type') in ['URL', 'Domain', 'DomainGlob']: indicator = indicator.lower() # remove initial protocol - http/https/ftp/ftps etc indicator = _PROTOCOL_REMOVAL.sub('', indicator) indicator_with_port = indicator # remove port from indicator - from demisto.com:369/rest/of/path -> demisto.com/rest/of/path indicator = _PORT_REMOVAL.sub(r'\g<1>', indicator) # check if removing the port changed something about the indicator if indicator != indicator_with_port and not strip_port: # if port was in the indicator and strip_port param not set - ignore the indicator continue with_invalid_tokens_indicator = indicator # remove invalid tokens from indicator indicator = _INVALID_TOKEN_REMOVAL.sub('*', indicator) # check if the indicator held invalid tokens if with_invalid_tokens_indicator != indicator: # invalid tokens in indicator- if drop_invalids is set - ignore the indicator if drop_invalids: continue # check if after removing the tokens the indicator is too broad if so - ignore # example of too broad terms: "*.paloalto", "*.*.paloalto", "*.paloalto:60" hostname = indicator if '/' in hostname: hostname, _ = hostname.split('/', 1) if _BROAD_PATTERN.match(hostname) is not None: continue # for PAN-OS "*.domain.com" does not match "domain.com" - we should provide both if indicator.startswith('*.'): formatted_indicators.append(indicator[2:]) formatted_indicators.append(indicator) return {CTX_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators) def create_json_out_format(iocs: list): formatted_indicators = [] # type:List for indicator_data in iocs: if indicator_data.get("value"): json_format_indicator = json_format_single_indicator(indicator_data) formatted_indicators.append(json_format_indicator) return {CTX_VALUES_KEY: json.dumps(formatted_indicators)} def json_format_single_indicator(indicator: dict): json_format_indicator = { "indicator": indicator.get("value") } indicator.pop("value", None) json_format_indicator["value"] = indicator return json_format_indicator def add_indicator_to_category(indicator, category, category_dict): if category in category_dict.keys(): category_dict[category].append(indicator) else: category_dict[category] = [indicator] return category_dict def create_proxysg_out_format(iocs: list, category_attribute: list, category_default: str = 'bc_category'): formatted_indicators = '' category_dict = {} # type:Dict num_of_returned_indicators = 0 for indicator in iocs: if indicator.get('indicator_type') in ['URL', 'Domain', 'DomainGlob'] and indicator.get('value'): indicator_proxysg_category = indicator.get('proxysgcategory') # if a ProxySG Category is set and it is in the category_attribute list or that the attribute list is empty # than list add the indicator to it's category list if indicator_proxysg_category is not None and \ (indicator_proxysg_category in category_attribute or len(category_attribute) == 0): category_dict = add_indicator_to_category(indicator.get('value'), indicator_proxysg_category, category_dict) else: # if ProxySG Category is not set or does not exist in the category_attribute list category_dict = add_indicator_to_category(indicator.get('value'), category_default, category_dict) for category, indicator_list in category_dict.items(): sub_output_string = f"define category {category}\n" sub_output_string += list_to_str(indicator_list, '\n') sub_output_string += "\nend\n" formatted_indicators += sub_output_string num_of_returned_indicators = num_of_returned_indicators + len(indicator_list) if len(formatted_indicators) == 0: raise Exception(CTX_NO_URLS_IN_PROXYSG_FORMAT) return {CTX_VALUES_KEY: formatted_indicators}, num_of_returned_indicators def create_mwg_out_format(iocs: list, mwg_type: str) -> dict: formatted_indicators = [] # type:List for indicator in iocs: if not indicator.get('value'): continue value = "\"" + indicator.get('value') + "\"" sources = indicator.get('sourceBrands') if sources: sources_string = "\"" + ','.join(sources) + "\"" else: sources_string = "\"from CORTEX XSOAR\"" formatted_indicators.append(value + " " + sources_string) string_formatted_indicators = list_to_str(formatted_indicators, '\n') if isinstance(mwg_type, list): mwg_type = mwg_type[0] string_formatted_indicators = "type=" + mwg_type + "\n" + string_formatted_indicators return {CTX_VALUES_KEY: string_formatted_indicators} def create_values_for_returned_dict(iocs: list, request_args: RequestArguments) -> Tuple[dict, int]: """ Create a dictionary for output values using the selected format (json, json-seq, text, csv, McAfee Web Gateway, Symantec ProxySG, panosurl) """ if request_args.out_format == FORMAT_PANOSURL: return panos_url_formatting(iocs, request_args.drop_invalids, request_args.strip_port) if request_args.out_format == FORMAT_PROXYSG: return create_proxysg_out_format(iocs, request_args.category_attribute, request_args.category_default) if request_args.out_format == FORMAT_MWG: return create_mwg_out_format(iocs, request_args.mwg_type), len(iocs) if request_args.out_format == FORMAT_JSON: return create_json_out_format(iocs), len(iocs) if request_args.out_format == FORMAT_XSOAR_JSON: iocs_list = [ioc for ioc in iocs] return {CTX_VALUES_KEY: json.dumps(iocs_list)}, len(iocs) else: ipv4_formatted_indicators = [] ipv6_formatted_indicators = [] formatted_indicators = [] if request_args.out_format == FORMAT_XSOAR_CSV and len(iocs) > 0: # add csv keys as first item headers = list(iocs[0].keys()) formatted_indicators.append(list_to_str(headers)) elif request_args.out_format == FORMAT_CSV and len(iocs) > 0: formatted_indicators.append('indicator') for ioc in iocs: value = ioc.get('value') type = ioc.get('indicator_type') if value: if request_args.out_format in [FORMAT_TEXT, FORMAT_CSV]: if type == 'IP' and request_args.collapse_ips != DONT_COLLAPSE: ipv4_formatted_indicators.append(IPAddress(value)) elif type == 'IPv6' and request_args.collapse_ips != DONT_COLLAPSE: ipv6_formatted_indicators.append(IPAddress(value)) else: formatted_indicators.append(value) elif request_args.out_format == FORMAT_XSOAR_JSON_SEQ: formatted_indicators.append(json.dumps(ioc)) elif request_args.out_format == FORMAT_JSON_SEQ: json_format_indicator = json_format_single_indicator(ioc) formatted_indicators.append(json.dumps(json_format_indicator)) elif request_args.out_format == FORMAT_XSOAR_CSV: # wrap csv values with " to escape them values = list(ioc.values()) formatted_indicators.append(list_to_str(values, map_func=lambda val: f'"{val}"')) if len(ipv4_formatted_indicators) > 0: ipv4_formatted_indicators = ips_to_ranges(ipv4_formatted_indicators, request_args.collapse_ips) formatted_indicators.extend(ipv4_formatted_indicators) if len(ipv6_formatted_indicators) > 0: ipv6_formatted_indicators = ips_to_ranges(ipv6_formatted_indicators, request_args.collapse_ips) formatted_indicators.extend(ipv6_formatted_indicators) return {CTX_VALUES_KEY: list_to_str(formatted_indicators, '\n')}, len(formatted_indicators) def get_outbound_mimetype() -> str: """Returns the mimetype of the export_iocs""" ctx = get_integration_context().get('last_output', {}) return ctx.get(CTX_MIMETYPE_KEY, 'text/plain') def get_outbound_ioc_values(on_demand, request_args: RequestArguments, last_update_data=None, cache_refresh_rate=None) -> str: """ Get the ioc list to return in the list """ if last_update_data is None: last_update_data = {} last_update = last_update_data.get('last_run') last_query = last_update_data.get('last_query') current_iocs = last_update_data.get('current_iocs') # on_demand ignores cache if on_demand: if request_args.is_request_change(last_update_data): values_str = get_ioc_values_str_from_context(request_args=request_args, iocs=current_iocs) else: values_str = get_ioc_values_str_from_context(request_args=request_args) else: if last_update: # takes the cache_refresh_rate amount of time back since run time. cache_time, _ = parse_date_range(cache_refresh_rate, to_timestamp=True) if last_update <= cache_time or request_args.is_request_change(last_update_data) or \ request_args.query != last_query: values_str = refresh_outbound_context(request_args=request_args) else: values_str = get_ioc_values_str_from_context(request_args=request_args) else: values_str = refresh_outbound_context(request_args) return values_str def get_ioc_values_str_from_context(request_args: RequestArguments, iocs=None) -> str: """ Extracts output values from cache """ if iocs: if request_args.offset > len(iocs): return '' iocs = iocs[request_args.offset: request_args.limit + request_args.offset] returned_dict, _ = create_values_for_returned_dict(iocs, request_args=request_args) current_cache = get_integration_context() current_cache['last_output'] = returned_dict set_integration_context(current_cache) else: returned_dict = get_integration_context().get('last_output', {}) return returned_dict.get(CTX_VALUES_KEY, '') def try_parse_integer(int_to_parse: Any, err_msg: str) -> int: """ Tries to parse an integer, and if fails will throw DemistoException with given err_msg """ try: res = int(int_to_parse) except (TypeError, ValueError): raise DemistoException(err_msg) return res def validate_basic_authentication(headers: dict, username: str, password: str) -> bool: """ Checks whether the authentication is valid. :param headers: The headers of the http request :param username: The integration's username :param password: The integration's password :return: Boolean which indicates whether the authentication is valid or not """ credentials: str = headers.get('Authorization', '') if not credentials or 'Basic ' not in credentials: return False encoded_credentials: str = credentials.split('Basic ')[1] credentials: str = b64decode(encoded_credentials).decode('utf-8') if ':' not in credentials: return False credentials_list = credentials.split(':') if len(credentials_list) != 2: return False user, pwd = credentials_list return user == username and pwd == password ''' ROUTE FUNCTIONS ''' def get_request_args(params): limit = try_parse_integer(request.args.get('n', params.get('list_size', 10000)), CTX_LIMIT_ERR_MSG) offset = try_parse_integer(request.args.get('s', 0), CTX_OFFSET_ERR_MSG) out_format = request.args.get('v', params.get('format', 'text')) query = request.args.get('q', params.get('indicators_query')) mwg_type = request.args.get('t', params.get('mwg_type', "string")) strip_port = request.args.get('sp', params.get('strip_port', False)) drop_invalids = request.args.get('di', params.get('drop_invalids', False)) category_default = request.args.get('cd', params.get('category_default', 'bc_category')) category_attribute = request.args.get('ca', params.get('category_attribute', '')) collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE)) csv_text = request.args.get('tx', params.get('csv_text', False)) sort_field = request.args.get('sf', params.get('sort_field')) sort_order = request.args.get('so', params.get('sort_order')) # handle flags if strip_port is not None and strip_port == '': strip_port = True if drop_invalids is not None and drop_invalids == '': drop_invalids = True if csv_text is not None and csv_text == '': csv_text = True if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]: collapse_ips = try_parse_integer(collapse_ips, CTX_COLLAPSE_ERR_MSG) if collapse_ips == 0: collapse_ips = DONT_COLLAPSE elif collapse_ips == 1: collapse_ips = COLLAPSE_TO_RANGES elif collapse_ips == 2: collapse_ips = COLLAPSE_TO_CIDR # prevent given empty params if len(query) == 0: query = params.get('indicators_query') if len(out_format) == 0: out_format = params.get('format', 'text') if out_format not in [FORMAT_PROXYSG, FORMAT_PANOSURL, FORMAT_TEXT, FORMAT_JSON, FORMAT_CSV, FORMAT_JSON_SEQ, FORMAT_MWG, FORMAT_ARG_BLUECOAT, FORMAT_ARG_MWG, FORMAT_ARG_PANOSURL, FORMAT_ARG_PROXYSG, FORMAT_ARG_PANOSURL, FORMAT_XSOAR_JSON, FORMAT_ARG_XSOAR_JSON, FORMAT_XSOAR_JSON_SEQ, FORAMT_ARG_XSOAR_JSON_SEQ, FORMAT_XSOAR_CSV, FORMAT_ARG_XSOAR_CSV]: raise DemistoException(CTX_FORMAT_ERR_MSG) elif out_format in [FORMAT_ARG_PROXYSG, FORMAT_ARG_BLUECOAT]: out_format = FORMAT_PROXYSG elif out_format == FORMAT_ARG_MWG: out_format = FORMAT_MWG elif out_format == FORMAT_ARG_PANOSURL: out_format = FORMAT_PANOSURL elif out_format == FORMAT_ARG_XSOAR_JSON: out_format = FORMAT_XSOAR_JSON elif out_format == FORAMT_ARG_XSOAR_JSON_SEQ: out_format = FORMAT_XSOAR_JSON_SEQ elif out_format == FORMAT_ARG_XSOAR_CSV: out_format = FORMAT_XSOAR_CSV if out_format == FORMAT_MWG: if mwg_type not in MWG_TYPE_OPTIONS: raise DemistoException(CTX_MWG_TYPE_ERR_MSG) return RequestArguments(query, out_format, limit, offset, mwg_type, strip_port, drop_invalids, category_default, category_attribute, collapse_ips, csv_text, sort_field, sort_order) @APP.route('/', methods=['GET']) def route_list_values() -> Response: """ Main handler for values saved in the integration context """ try: params = demisto.params() credentials = params.get('credentials') if params.get('credentials') else {} username: str = credentials.get('identifier', '') password: str = credentials.get('password', '') if username and password: headers: dict = cast(Dict[Any, Any], request.headers) if not validate_basic_authentication(headers, username, password): err_msg: str = 'Basic authentication failed. Make sure you are using the right credentials.' demisto.debug(err_msg) return Response(err_msg, status=401, mimetype='text/plain', headers=[ ('WWW-Authenticate', 'Basic realm="Login Required"'), ]) request_args = get_request_args(params) created = datetime.now(timezone.utc) cache_refresh_rate = params.get('cache_refresh_rate') values = get_outbound_ioc_values( on_demand=params.get('on_demand'), last_update_data=get_integration_context(), cache_refresh_rate=cache_refresh_rate, request_args=request_args ) query_time = (datetime.now(timezone.utc) - created).total_seconds() if not get_integration_context() and params.get('on_demand'): values = 'You are running in On-Demand mode - please run !eis-update command to initialize the ' \ 'export process' elif not values: values = "No Results Found For the Query" # if the case there are strings to add to the EDL, add them if the output type is text if request_args.out_format == FORMAT_TEXT: append_str = params.get("append_string") prepend_str = params.get("prepend_string") if append_str: append_str = append_str.replace("\\n", "\n") values = f"{values}{append_str}" if prepend_str: prepend_str = prepend_str.replace("\\n", "\n") values = f"{prepend_str}\n{values}" mimetype = get_outbound_mimetype() list_size = 0 if values.strip(): list_size = values.count('\n') + 1 # add 1 as last line doesn't have a \n max_age = ceil((datetime.now() - dateparser.parse(cache_refresh_rate)).total_seconds()) # type: ignore[operator] demisto.debug(f'Returning exported indicators list of size: [{list_size}], created: [{created}], ' f'query time seconds: [{query_time}], max age: [{max_age}]') resp = Response(values, status=200, mimetype=mimetype, headers=[ ('X-ExportIndicators-Created', created.isoformat()), ('X-ExportIndicators-Query-Time-Secs', "{:.3f}".format(query_time)), ('X-ExportIndicators-Size', str(list_size)) ]) resp.cache_control.max_age = max_age resp.cache_control[ 'stale-if-error'] = '600' # number of seconds we are willing to serve stale content when there is an error return resp except Exception: return Response(traceback.format_exc(), status=400, mimetype='text/plain') ''' COMMAND FUNCTIONS ''' def test_module(args, params): """ Validates: 1. Valid port. 2. Valid cache_refresh_rate """ get_params_port(params) on_demand = params.get('on_demand', None) if not on_demand: try_parse_integer(params.get('list_size'), CTX_LIMIT_ERR_MSG) # validate export_iocs Size was set query = params.get('indicators_query') # validate indicators_query isn't empty if not query: raise ValueError('"Indicator Query" is required. Provide a valid query.') cache_refresh_rate = params.get('cache_refresh_rate', '') if not cache_refresh_rate: raise ValueError(CTX_MISSING_REFRESH_ERR_MSG) # validate cache_refresh_rate value range_split = cache_refresh_rate.split(' ') if len(range_split) != 2: raise ValueError(CTX_MISSING_REFRESH_ERR_MSG) try_parse_integer(range_split[0], 'Invalid time value for the Refresh Rate. Must be a valid integer.') if not range_split[1] in ['minute', 'minutes', 'hour', 'hours', 'day', 'days', 'month', 'months', 'year', 'years']: raise ValueError( 'Invalid time unit for the Refresh Rate. Must be minutes, hours, days, months, or years.') parse_date_range(cache_refresh_rate, to_timestamp=True) run_long_running(params, is_test=True) return 'ok' def update_outbound_command(args, params): """ Updates the export_iocs values and format on demand """ on_demand = params.get('on_demand') if not on_demand: raise DemistoException( '"Update exported IOCs On Demand" is off. If you want to update manually please toggle it on.') limit = try_parse_integer(args.get('list_size', params.get('list_size')), CTX_LIMIT_ERR_MSG) print_indicators = args.get('print_indicators') query = args.get('query') # in case no query is entered take the query in the integration params if not query: query = params.get('indicators_query') out_format = args.get('format') offset = try_parse_integer(args.get('offset', 0), CTX_OFFSET_ERR_MSG) mwg_type = args.get('mwg_type') strip_port = args.get('strip_port') == 'True' drop_invalids = args.get('drop_invalids') == 'True' category_attribute = args.get('category_attribute') category_default = args.get('category_default') collapse_ips = args.get('collapse_ips') csv_text = args.get('csv_text') == 'True' sort_field = args.get('sort_field') sort_order = args.get('sort_order') request_args = RequestArguments(query, out_format, limit, offset, mwg_type, strip_port, drop_invalids, category_default, category_attribute, collapse_ips, csv_text, sort_field, sort_order) indicators = refresh_outbound_context(request_args, on_demand=on_demand) if indicators: hr = tableToMarkdown('List was updated successfully with the following values', indicators, ['Indicators']) if print_indicators == 'true' else 'List was updated successfully' else: hr = "No Results Found For the Query" return CommandResults(readable_output=hr, raw_response=indicators) def main(): """ Main """ params = demisto.params() credentials = params.get('credentials') if params.get('credentials') else {} username: str = credentials.get('identifier', '') password: str = credentials.get('password', '') if (username and not password) or (password and not username): err_msg: str = 'If using credentials, both username and password should be provided.' demisto.debug(err_msg) raise DemistoException(err_msg) command = demisto.command() demisto.debug('Command being called is {}'.format(command)) commands = { 'test-module': test_module, 'eis-update': update_outbound_command } try: if command == 'long-running-execution': run_long_running(params) elif command in commands: return_results(commands[command](demisto.args(), params)) else: raise NotImplementedError(f'Command "{command}" is not implemented.') except Exception as e: demisto.error(traceback.format_exc()) err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]' return_error(err_msg) from NGINXApiModule import * # noqa: E402 if __name__ in ['__main__', '__builtin__', 'builtins']: main()
the-stack_0_14234
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys sys.path = [ os.path.abspath('../..'), os.path.abspath('../../bin') ] + sys.path # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '2.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'sphinx.ext.viewcode', 'openstackdocstheme' ] # geeneral information about project openstackdocs_repo_name = u'openstack/python-monascaclient' project = u'Monasca Client Dev Docs' openstackdocs_use_storyboard = True copyright = u'2014-present, OpenStack Foundation' author = u'OpenStack Foundation' openstackdocs_auto_name = False # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ 'common', 'doc', 'documentation', 'etc', 'java' ] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # If false, no index is generated. html_use_index = True # If false, no module index is generated. html_use_modindex = True # Output file base name for HTML help builder. htmlhelp_basename = 'python-monascaclientdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'python-monascaclient.tex', u'python-monascaclient Documentation', u'Openstack Foundation \\textless{}[email protected]\\textgreater{}', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'python-monascaclient', u'python-monascaclient Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'python-monascaclient', u'python-monascaclient Documentation', author, 'python-monascaclient', 'Rest-API to collect logs from your cloud.', 'Miscellaneous'), ]
the-stack_0_14235
from django import forms from django.utils.translation import gettext_lazy as _ from i18nfield.forms import I18nModelForm from pretalx.common.mixins.forms import ReadOnlyFlag from pretalx.mail.context import get_context_explanation from pretalx.mail.models import MailTemplate, QueuedMail from pretalx.person.models import User class MailTemplateForm(ReadOnlyFlag, I18nModelForm): def __init__(self, *args, event=None, **kwargs): self.event = event if event: kwargs['locales'] = event.locales super().__init__(*args, **kwargs) def clean_text(self): text = self.cleaned_data['text'] if self.instance and self.instance.id: _is_template_with_submission_context = self.instance in [ t for t in self.instance.event.fixed_templates if t != self.event.update_template ] if _is_template_with_submission_context: context = {item['name']: 'test' for item in get_context_explanation()} try: for language, local_text in text.data.items(): local_text.format(**context) except KeyError as e: msg = _('Unknown template key: "{key}", locale: {locale}').format( key=e.args[0], locale=language ) raise forms.ValidationError(msg) return text class Meta: model = MailTemplate fields = ['subject', 'text', 'reply_to', 'bcc'] class MailDetailForm(ReadOnlyFlag, forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self.instance or not self.instance.to_users.all().count(): self.fields.pop('to_users') else: self.fields['to_users'].queryset = self.instance.to_users.all() self.fields['to_users'].required = False def clean(self, *args, **kwargs): cleaned_data = super().clean(*args, **kwargs) if not cleaned_data['to'] and not cleaned_data.get('to_users'): self.add_error('to', forms.ValidationError(_('An email needs to have at least one recipient.'))) return cleaned_data def save(self, *args, **kwargs): obj = super().save(*args, **kwargs) if self.has_changed() and 'to' in self.changed_data: addresses = list(set(a.strip().lower() for a in (obj.to or '').split(',') if a.strip())) for address in addresses: user = User.objects.filter(email__iexact=address).first() if user: addresses.remove(address) obj.to_users.add(user) addresses = ','.join(addresses) if addresses else '' obj.to = addresses obj.save() return obj class Meta: model = QueuedMail fields = ['to', 'to_users', 'reply_to', 'cc', 'bcc', 'subject', 'text'] widgets = {'to_users': forms.CheckboxSelectMultiple} class WriteMailForm(forms.ModelForm): recipients = forms.MultipleChoiceField( label=_('Recipient groups'), choices=( ( 'submitted', _( 'Everyone with submission(s) that have not been accepted/rejected yet' ), ), ( 'accepted', _('All accepted speakers (who have not confirmed their talk yet)'), ), ('confirmed', _('All confirmed speakers')), ('rejected', _('All rejected speakers')), ('reviewers', _('All reviewers in your team')), ), widget=forms.CheckboxSelectMultiple, required=False, ) tracks = forms.MultipleChoiceField(label=_('All submissions in these tracks'), required=False) submission_types = forms.MultipleChoiceField(label=_('All submissions of these types'), required=False) submissions = forms.MultipleChoiceField(required=False) additional_recipients = forms.CharField( label=_('Recipients'), required=False, help_text=_('One email address or several addresses separated by commas.'), ) reply_to = forms.CharField(required=False) def __init__(self, event, **kwargs): super().__init__(**kwargs) self.fields['submissions'].choices = [ (sub.code, sub.title) for sub in event.submissions.all() ] if event.settings.use_tracks and event.tracks.all().exists(): self.fields['tracks'].choices = [ (track.pk, track.name) for track in event.tracks.all() ] else: del self.fields['tracks'] self.fields['submission_types'].choices = [ (submission_type.pk, submission_type.name) for submission_type in event.submission_types.all() ] self.fields['text'].help_text = _( 'Please note: Placeholders will not be substituted, this is an upcoming feature. ' 'Leave no placeholders in this field.' ) class Meta: model = QueuedMail fields = ['cc', 'bcc', 'subject', 'text']
the-stack_0_14236
# -*- coding: utf-8 -*- import hashlib, json, os, sys, socket, traceback, time, struct, collections from datetime import datetime, timedelta from struct import calcsize from google.protobuf.json_format import MessageToJson from threading import RLock from futu.common.conn_mng import * from futu.common.sys_config import * from futu.common.pbjson import json2pb ProtoInfo = collections.namedtuple('ProtoInfo', ['proto_id', 'serial_no']) def get_message_head_len(): return calcsize(MESSAGE_HEAD_FMT) def check_date_str_format(s, default_time="00:00:00"): """Check the format of date string""" try: str_fmt = s if ":" not in s: str_fmt = '{} {}'.format(s, default_time) dt_obj = datetime.strptime(str_fmt, "%Y-%m-%d %H:%M:%S") return RET_OK, dt_obj except ValueError: error_str = ERROR_STR_PREFIX + "wrong time or time format" return RET_ERROR, error_str def normalize_date_format(date_str, default_time="00:00:00"): """normalize the format of data""" ret_code, ret_data = check_date_str_format(date_str, default_time) if ret_code != RET_OK: return ret_code, ret_data return RET_OK, ret_data.strftime("%Y-%m-%d %H:%M:%S") def normalize_start_end_date(start, end, delta_days=0, default_time_start="00:00:00", default_time_end="23:59:59", prefer_end_now=True): """ :param start: :param end: :param delta_days: :param default_time_start: :param default_time_end: :param prefer_end_now: 为True时,当start和end都为None时,end设为当前时间,为False则start设为当前时间 :return: """ if start is not None and is_str(start) is False: error_str = ERROR_STR_PREFIX + "the type of start param is wrong" return RET_ERROR, error_str, None, None if end is not None and is_str(end) is False: error_str = ERROR_STR_PREFIX + "the type of end param is wrong" return RET_ERROR, error_str, None, None dt_start = None dt_end = None delta = timedelta(days=delta_days) hour_end, min_end, sec_end = [int(x) for x in default_time_end.split(':')] hour_start, min_start, sec_start = [int(x) for x in default_time_start.split(':')] if start: ret_code, ret_data = check_date_str_format(start, default_time_start) if ret_code != RET_OK: return ret_code, ret_data, start, end dt_start = ret_data if end: ret_code, ret_data = check_date_str_format(end, default_time_end) if ret_code != RET_OK: return ret_code, ret_data, start, end dt_end = ret_data if end and not start: dt_tmp = dt_end - delta dt_start = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_start, minute=min_start, second=sec_start) if start and not end: dt_tmp = dt_start + delta dt_end = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_end, minute=min_end, second=sec_end) if not start and not end: if prefer_end_now: dt_now = datetime.now() dt_end = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=hour_end, minute=min_end, second=sec_end) dt_tmp = dt_end - delta dt_start = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_start, minute=min_start, second=sec_start) else: dt_now = datetime.now() dt_start = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=hour_start, minute=min_start, second=sec_start) dt_tmp = dt_start + delta dt_end = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_end, minute=min_end, second=sec_end) start = dt_start.strftime("%Y-%m-%d %H:%M:%S") end = dt_end.strftime("%Y-%m-%d %H:%M:%S") return RET_OK, '', start, end def extract_pls_rsp(rsp_str): """Extract the response of PLS""" try: rsp = json.loads(rsp_str) except ValueError: traceback.print_exc() err = sys.exc_info()[1] err_str = ERROR_STR_PREFIX + str(err) return RET_ERROR, err_str, None error_code = int(rsp['retType']) if error_code != 1: error_str = ERROR_STR_PREFIX + rsp['retMsg'] return RET_ERROR, error_str, None return RET_OK, "", rsp def split_stock_str(stock_str_param): """split the stock string""" stock_str = str(stock_str_param) split_loc = stock_str.find(".") '''do not use the built-in split function in python. The built-in function cannot handle some stock strings correctly. for instance, US..DJI, where the dot . itself is a part of original code''' if 0 <= split_loc < len( stock_str) - 1 and stock_str[0:split_loc] in MKT_MAP: market_str = stock_str[0:split_loc] market_code = MKT_MAP[market_str] partial_stock_str = stock_str[split_loc + 1:] return RET_OK, (market_code, partial_stock_str) else: error_str = ERROR_STR_PREFIX + "format of %s is wrong. (US.AAPL, HK.00700, SZ.000001)" % stock_str return RET_ERROR, error_str def merge_qot_mkt_stock_str(qot_mkt, partial_stock_str): """ Merge the string of stocks :param market: market code :param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001" :return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001" """ market_str = QUOTE.REV_MKT_MAP[qot_mkt] stock_str = '.'.join([market_str, partial_stock_str]) return stock_str def merge_trd_mkt_stock_str(trd_mkt, partial_stock_str): """ Merge the string of stocks :param market: market code :param partial_stock_str: original stock code string. i.e. "AAPL","00700", "000001" :return: unified representation of a stock code. i.e. "US.AAPL", "HK.00700", "SZ.000001" """ mkt_qot = Market.NONE mkt = TRADE.REV_TRD_MKT_MAP[trd_mkt] if trd_mkt in TRADE.REV_TRD_MKT_MAP else TrdMarket.NONE if mkt == TrdMarket.HK: mkt_qot = Market.HK elif mkt == TrdMarket.US: mkt_qot = Market.US elif mkt == TrdMarket.HKCC or mkt == TrdMarket.CN: if partial_stock_str.startswith('6') or partial_stock_str.startswith('9'): mkt_qot = Market.SH else: mkt_qot = Market.SZ else: raise Exception("merge_trd_mkt_stock_str: unknown trd_mkt.") return merge_qot_mkt_stock_str(MKT_MAP[mkt_qot], partial_stock_str) def str2binary(s): """ Transfer string to binary :param s: string content to be transformed to binary :return: binary """ return s.encode('utf-8') def is_str(obj): if sys.version_info.major == 3: return isinstance(obj, str) or isinstance(obj, bytes) else: return isinstance(obj, basestring) def price_to_str_int1000(price): return str(int(round(float(price) * 1000, 0))) if str(price) is not '' else '' # 1000*int price to float val def int1000_price_to_float(price): return round(float(price) / 1000.0, 3) if str(price) is not '' else float(0) # 10^9 int price to float val def int10_9_price_to_float(price): return round(float(price) / float(10**9), 3) if str(price) is not '' else float(0) # list 参数除重及规整 def unique_and_normalize_list(lst): ret = [] if not lst: return ret tmp = lst if isinstance(lst, list) else [lst] [ret.append(x) for x in tmp if x not in ret] return ret def md5_transform(raw_str): h1 = hashlib.md5() h1.update(raw_str.encode(encoding='utf-8')) return h1.hexdigest() g_unique_id = int(time.time() % 10000) g_unique_lock = RLock() def get_unique_id32(): global g_unique_id with g_unique_lock: g_unique_id += 1 if g_unique_id >= 4294967295: g_unique_id = int(time.time() % 10000) ret_id = g_unique_id return ret_id class ProtobufMap(dict): created_protobuf_map = {} def __init__(self): """ InitConnect = 1001 # 初始化连接 """ from futu.common.pb.InitConnect_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.InitConnect] = Response() """ GetGlobalState = 1002 # 获取全局状态 """ from futu.common.pb.GetGlobalState_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.GetGlobalState] = Response() """ Notify = 1003 # 通知推送 """ from futu.common.pb.Notify_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Notify] = Response() """ KeepAlive = 1004 # 通知推送 """ from futu.common.pb.KeepAlive_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.KeepAlive] = Response() """ GetUserInfo = 1005 # 获取全局状态 """ from futu.common.pb.GetUserInfo_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.GetUserInfo] = Response() """ GetUserInfo = 1006 # 获取用户信息 """ from futu.common.pb.Verification_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Verification] = Response() """ Trd_GetAccList = 2001 # 获取业务账户列表 """ from futu.common.pb.Trd_GetAccList_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetAccList] = Response() """ Trd_UnlockTrade = 2005 # 解锁或锁定交易 """ from futu.common.pb.Trd_UnlockTrade_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_UnlockTrade] = Response() """ Trd_SubAccPush = 2008 # 订阅业务账户的交易推送数据 """ from futu.common.pb.Trd_SubAccPush_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_SubAccPush] = Response() """ Trd_GetFunds = 2101 # 获取账户资金 """ from futu.common.pb.Trd_GetFunds_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetFunds] = Response() """ Trd_GetPositionList = 2102 # 获取账户持仓 """ from futu.common.pb.Trd_GetPositionList_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetPositionList] = Response() """ Trd_GetOrderList = 2201 # 获取订单列表 """ from futu.common.pb.Trd_GetOrderList_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetOrderList] = Response() """ Trd_PlaceOrder = 2202 # 下单 """ from futu.common.pb.Trd_PlaceOrder_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_PlaceOrder] = Response() """ Trd_ModifyOrder = 2205 # 修改订单 """ from futu.common.pb.Trd_ModifyOrder_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_ModifyOrder] = Response() """ Trd_UpdateOrder = 2208 # 订单状态变动通知(推送) """ from futu.common.pb.Trd_UpdateOrder_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_UpdateOrder] = Response() """ Trd_GetOrderFillList = 2211 # 获取成交列表 """ from futu.common.pb.Trd_GetOrderFillList_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetOrderFillList] = Response() """ Trd_UpdateOrderFill = 2218 # 成交通知(推送) """ from futu.common.pb.Trd_UpdateOrderFill_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_UpdateOrderFill] = Response() """ Trd_GetHistoryOrderList = 2221 # 获取历史订单列表 """ from futu.common.pb.Trd_GetHistoryOrderList_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetHistoryOrderList] = Response() """ Trd_GetHistoryOrderFillList = 2222 # 获取历史成交列表 """ from futu.common.pb.Trd_GetHistoryOrderFillList_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetHistoryOrderFillList] = Response() """ Qot_Sub = 3001 # 订阅或者反订阅 """ from futu.common.pb.Qot_Sub_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_Sub] = Response() """ Qot_RegQotPush = 3002 # 注册推送 """ from futu.common.pb.Qot_RegQotPush_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_RegQotPush] = Response() """ Qot_GetSubInfo = 3003 # 获取订阅信息 """ from futu.common.pb.Qot_GetSubInfo_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSubInfo] = Response() """ Qot_GetBasicQot = 3004 # 获取股票基本行情 """ from futu.common.pb.Qot_GetBasicQot_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetBasicQot] = Response() """ Qot_UpdateBasicQot = 3005 # 推送股票基本行情 """ from futu.common.pb.Qot_UpdateBasicQot_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateBasicQot] = Response() """ Qot_GetKL = 3006 # 获取K线 """ from futu.common.pb.Qot_GetKL_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetKL] = Response() """ Qot_UpdateKL = 3007 # 推送K线 """ from futu.common.pb.Qot_UpdateKL_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateKL] = Response() """ Qot_GetRT = 3008 # 获取分时 """ from futu.common.pb.Qot_GetRT_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetRT] = Response() """ Qot_UpdateRT = 3009 # 推送分时 """ from futu.common.pb.Qot_UpdateRT_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateRT] = Response() """ Qot_GetTicker = 3010 # 获取逐笔 """ from futu.common.pb.Qot_GetTicker_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetTicker] = Response() """ Qot_UpdateTicker = 3011 # 推送逐笔 """ from futu.common.pb.Qot_UpdateTicker_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateTicker] = Response() """ Qot_GetOrderBook = 3012 # 获取买卖盘 """ from futu.common.pb.Qot_GetOrderBook_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOrderBook] = Response() """ Qot_UpdateOrderBook = 3013 # 推送买卖盘 """ from futu.common.pb.Qot_UpdateOrderBook_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateOrderBook] = Response() """ Qot_GetBroker = 3014 # 获取经纪队列 """ from futu.common.pb.Qot_GetBroker_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetBroker] = Response() """ Qot_UpdateBroker = 3015 # 推送经纪队列 """ from futu.common.pb.Qot_UpdateBroker_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateBroker] = Response() """ Qot_GetHistoryKL = 3100 # 获取历史K线 """ from futu.common.pb.Qot_GetHistoryKL_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetHistoryKL] = Response() """ Qot_GetHistoryKLPoints = 3101 # 获取多只股票历史单点K线 """ from futu.common.pb.Qot_GetHistoryKLPoints_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetHistoryKLPoints] = Response() """ Qot_GetRehab = 3102 # 获取复权信息 """ from futu.common.pb.Qot_GetRehab_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetRehab] = Response() """ Qot_GetTradeDate = 3200 # 获取市场交易日 """ from futu.common.pb.Qot_GetTradeDate_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetTradeDate] = Response() """ Qot_GetSuspend = 3201 # 获取股票停牌信息 """ from futu.common.pb.Qot_GetSuspend_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSuspend] = Response() """ Qot_GetStaticInfo = 3202 # 获取股票列表 """ from futu.common.pb.Qot_GetStaticInfo_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetStaticInfo] = Response() """ Qot_GetSecuritySnapshot = 3203 # 获取股票快照 """ from futu.common.pb.Qot_GetSecuritySnapshot_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSecuritySnapshot] = Response() """ Qot_GetPlateSet = 3204 # 获取板块集合下的板块 """ from futu.common.pb.Qot_GetPlateSet_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetPlateSet] = Response() """ Qot_GetPlateSecurity = 3205 # 获取板块下的股票 """ from futu.common.pb.Qot_GetPlateSecurity_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetPlateSecurity] = Response() """ Trd_GetMaxTrdQtys = 2111 查询最大买卖数量 """ from futu.common.pb.Trd_GetMaxTrdQtys_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Trd_GetAccTradingInfo] = Response() """ Qot_GetReference = 3206 获取正股相关股票,暂时只有窝轮""" from futu.common.pb.Qot_GetReference_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetReference] = Response() """ Qot_GetOwnerPlate = 3207 获取股票所属板块""" from futu.common.pb.Qot_GetOwnerPlate_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOwnerPlate] = Response() """ Qot_GetOwnerPlate = 3208 获取高管持股变动""" from futu.common.pb.Qot_GetHoldingChangeList_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetHoldingChangeList] = Response() from futu.common.pb.Qot_RequestHistoryKL_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestHistoryKL] = Response() from futu.common.pb.Qot_GetOptionChain_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOptionChain] = Response() """ Qot_GetOrderDetail = 3016 获取委托明细 """ from futu.common.pb.Qot_GetOrderDetail_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOrderDetail] = Response() """ Qot_UpdateOrderDetail = 3017 推送委托明细 """ from futu.common.pb.Qot_UpdateOrderDetail_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateOrderDetail] = Response() """ Qot_GetWarrantData = 3210 获取涡轮 """ from futu.common.pb.Qot_GetWarrant_pb2 import Response as GetWarrantPBResponse ProtobufMap.created_protobuf_map[ProtoId.Qot_GetWarrantData] = GetWarrantPBResponse() """ Qot_GetOrderDetail = 3104 已使用过的额度 """ from futu.common.pb.Qot_RequestHistoryKLQuota_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestHistoryKLQuota] = Response() """获取除权信息""" from futu.common.pb.Qot_RequestRehab_pb2 import Response ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestRehab] = Response() def __getitem__(self, key): return ProtobufMap.created_protobuf_map[key] if key in ProtobufMap.created_protobuf_map else None pb_map = ProtobufMap() def binary2str(b, proto_id, proto_fmt_type): """ Transfer binary to string :param b: binary content to be transformed to string :return: string """ if proto_fmt_type == ProtoFMT.Json: return b.decode('utf-8') elif proto_fmt_type == ProtoFMT.Protobuf: rsp = pb_map[proto_id] if IS_PY2: rsp.ParseFromString(str(b)) else: rsp.ParseFromString(b) return MessageToJson(rsp) else: raise Exception("binary2str: unknown proto format.") def binary2pb(b, proto_id, proto_fmt_type): """ Transfer binary to pb message :param b: binary content to be transformed to pb message :return: pb message """ rsp = pb_map[proto_id] if rsp is None: return None if proto_fmt_type == ProtoFMT.Json: return json2pb(type(rsp), b.decode('utf-8')) elif proto_fmt_type == ProtoFMT.Protobuf: rsp = type(rsp)() # logger.debug((proto_id)) if IS_PY2: rsp.ParseFromString(str(b)) else: rsp.ParseFromString(b) return rsp else: raise Exception("binary2str: unknown proto format.") def pack_pb_req(pb_req, proto_id, conn_id, serial_no=0): proto_fmt = SysConfig.get_proto_fmt() serial_no = serial_no if serial_no else get_unique_id32() is_encrypt = FutuConnMng.is_conn_encrypt(conn_id) if proto_fmt == ProtoFMT.Json: req_json = MessageToJson(pb_req) ret, msg, req = _joint_head(proto_id, proto_fmt, len(req_json), req_json.encode(), conn_id, serial_no, is_encrypt) return ret, msg, req elif proto_fmt == ProtoFMT.Protobuf: ret, msg, req = _joint_head(proto_id, proto_fmt, pb_req.ByteSize(), pb_req, conn_id, serial_no, is_encrypt) return ret, msg, req else: error_str = ERROR_STR_PREFIX + 'unknown protocol format, %d' % proto_fmt return RET_ERROR, error_str, None def _joint_head(proto_id, proto_fmt_type, body_len, str_body, conn_id, serial_no, is_encrypt): # sha20 = b'00000000000000000000' reserve8 = b'\x00\x00\x00\x00\x00\x00\x00\x00' if proto_fmt_type == ProtoFMT.Protobuf: str_body = str_body.SerializeToString() if type(str_body) is not bytes: str_body = bytes_utf8(str_body) sha20 = hashlib.sha1(str_body).digest() # init connect 需要用rsa加密 try: if proto_id == ProtoId.InitConnect: if SysConfig.INIT_RSA_FILE != '': str_body = RsaCrypt.encrypt(str_body) body_len = len(str_body) else: if is_encrypt: ret, msg, str_body = FutuConnMng.encrypt_conn_data(conn_id, str_body) body_len = len(str_body) if ret != RET_OK: return ret, msg, str_body except Exception as e: return RET_ERROR, str(e), None fmt = "%s%ds" % (MESSAGE_HEAD_FMT, body_len) bin_head = struct.pack(fmt, b'F', b'T', proto_id, proto_fmt_type, API_PROTO_VER, serial_no, body_len, sha20, reserve8, str_body) return RET_OK, "", bin_head def parse_head(head_bytes): head_dict = {} head_dict['head_1'], head_dict['head_2'], head_dict['proto_id'], \ head_dict['proto_fmt_type'], head_dict['proto_ver'], \ head_dict['serial_no'], head_dict['body_len'], head_dict['sha20'], \ head_dict['reserve8'], = struct.unpack(MESSAGE_HEAD_FMT, head_bytes) return head_dict def parse_proto_info(head_bytes): unpacked = struct.unpack(MESSAGE_HEAD_FMT, head_bytes) return ProtoInfo(unpacked[2], unpacked[5]) def decrypt_rsp_body(rsp_body, head_dict, conn_id, is_encrypt): ret_code = RET_OK msg = '' sha20 = head_dict['sha20'] proto_id = head_dict['proto_id'] if is_encrypt: try: if proto_id == ProtoId.InitConnect: rsp_body = RsaCrypt.decrypt(rsp_body) else: ret_code, msg, decrypt_data = FutuConnMng.decrypt_conn_data(conn_id, rsp_body) rsp_body = decrypt_data except Exception as e: msg = sys.exc_info()[1] ret_code = RET_ERROR # check sha20 if ret_code == RET_OK: sha20_check = hashlib.sha1(rsp_body).digest() if sha20_check != sha20: ret_code = RET_ERROR msg = "proto id:{} conn_id:{} check sha error!".format(proto_id, conn_id) return ret_code, msg, rsp_body def make_from_namedtuple(t, **kwargs): """ t是namedtuple,复制一份t,但其中部分字段更新为kwargs的值 :param t: :param kwargs: :return: """ d = t._asdict() d.update(kwargs) cls = type(t) return cls(**d)
the-stack_0_14237
#!/usr/bin/env python # Lint as: python3 from absl.testing import absltest from grr_response_core.lib.rdfvalues import osquery as rdf_osquery from grr_response_server.gui.api_plugins import osquery as api_osquery class UtilsTest(absltest.TestCase): """Test for osquery utils.""" def testListToCSVBytes(self): output_bytes = api_osquery._LineToCsvBytes(["a", "b", "c", "d"]) output_text = output_bytes.decode("utf-8") self.assertEqual("a,b,c,d\r\n", output_text) def testSomeTextToCsvBytes(self): table = rdf_osquery.OsqueryTable() table.header.columns.append(rdf_osquery.OsqueryColumn(name="A")) table.header.columns.append(rdf_osquery.OsqueryColumn(name="B")) table.rows.append(rdf_osquery.OsqueryRow(values=["1-A", "1-B"])) table.rows.append(rdf_osquery.OsqueryRow(values=["2-A", "2-B"])) result = rdf_osquery.OsqueryResult() result.table = table output_bytes = api_osquery._ParseToCsvBytes([result]) output_text = list(map(lambda b: b.decode("utf-8"), output_bytes)) self.assertListEqual(["A,B\r\n", "1-A,1-B\r\n", "2-A,2-B\r\n"], output_text) def testTextWithCommasToCsvBytes(self): table = rdf_osquery.OsqueryTable() table.header.columns.append(rdf_osquery.OsqueryColumn(name="c,o,l,u,m,n")) table.rows.append(rdf_osquery.OsqueryRow(values=["c,e,l,l"])) result = rdf_osquery.OsqueryResult() result.table = table output_bytes = api_osquery._ParseToCsvBytes([result]) output_text = list(map(lambda b: b.decode("utf-8"), output_bytes)) self.assertListEqual(["\"c,o,l,u,m,n\"\r\n", "\"c,e,l,l\"\r\n"], output_text) if __name__ == "__main__": absltest.main()
the-stack_0_14239
# -*- coding: utf-8 -*- """ Package setup """ import sys import os import io import setuptools from setuptools import setup def read(fname): with io.open( os.path.join(os.path.dirname(__file__), fname), encoding="utf-8" ) as _in: return _in.read() if __name__ == "__main__": import versioneer with open("README.md", "r") as fh: long_description = fh.read() setup( name="kitchen", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description="Manipulate counts matrix files and cook scRNA-seq data from command line", long_description=long_description, author="Cody Heiser", author_email="[email protected]", url="https://github.com/codyheiser/kitchen", install_requires=read("requirements.txt").splitlines(), packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "Operating System :: OS Independent", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Topic :: Scientific/Engineering", ], python_requires=">=3.6", entry_points={ "console_scripts": ["kitchen = kitchen.kitchen:main"] }, )
the-stack_0_14240
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example adds a text ad with ad parameters. To get ad groups, run get_ad_groups.py. To get keywords, run add_keywords.py. The LoadFromStorage method is pulling credentials and properties from a "googleads.yaml" file. By default, it looks for this file in your home directory. For more information, see the "Caching authentication information" section of our README. """ from googleads import adwords AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE' CRITERION_ID = 'INSERT_KEYWORD_CRITERION_ID_HERE' def main(client, ad_group_id, criterion_id): # Initialize appropriate service. ad_group_ad_service = client.GetService('AdGroupAdService', version='v201601') ad_param_service = client.GetService('AdParamService', version='v201601') # Construct operations for adding text ad object and add to an ad group. operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'AdGroupAd', 'adGroupId': ad_group_id, 'ad': { 'xsi_type': 'TextAd', 'finalUrls': ['http://www.example.com'], 'displayUrl': 'example.com', 'description1': 'Low-gravity fun for {param1:cheap}.', 'description2': 'Only {param2:a few} seats left!', 'headline': 'Luxury Mars Cruises' }, 'status': 'ENABLED' } }] ads = ad_group_ad_service.mutate(operations)['value'] # Display results. for ad in ads: print ('Text ad with id \'%s\' was successfully added to an ad group with ' 'id \'%s\'.' % (ad['adGroupId'], ad['ad']['id'])) # Construct operations for setting ad parameters. operations = [ { 'operator': 'SET', 'operand': { 'adGroupId': ad_group_id, 'criterionId': criterion_id, 'insertionText': u'£100', 'paramIndex': '1' } }, { 'operator': 'SET', 'operand': { 'adGroupId': ad_group_id, 'criterionId': criterion_id, 'insertionText': '50', 'paramIndex': '2' } } ] ad_params = ad_param_service.mutate(operations) # Display results. for ad_param in ad_params: print ('Ad parameter with text \'%s\' was successfully set for criterion ' 'with id \'%s\' and ad group id \'%s\'.' % (ad_param['insertionText'], ad_param['criterionId'], ad_param['adGroupId'])) if __name__ == '__main__': # Initialize client object. adwords_client = adwords.AdWordsClient.LoadFromStorage() main(adwords_client, AD_GROUP_ID, CRITERION_ID)
the-stack_0_14242
from ..ch09.adaptable_heap_priority_queue import AdaptableHeapPriorityQueue def shortest_path_lengths(g, src): """Compute shortest-path distances from src to reachable vertices of g. Graph g can be undirected or directed, but must be weighted such that e.element() returns a numeric weight for each edge e. Return dictionary mapping each reachable vertex to its distance from src. """ d = {} # d[v] is upper bound from s to v cloud = {} # map reachable v to its d[v] value pq = AdaptableHeapPriorityQueue() # vertex v will have key d[v] pqlocator = {} # map from vertex to its pq locator # for each vertex v of the graph, add an entry to the priority queue, with # the source having distance 0 and all others having infinite distance for v in g.vertices(): if v is src: d[v] = 0 else: d[v] = float('inf') # syntax for positive infinity pqlocator[v] = pq.add(d[v], v) # save locator for future updates while not pq.is_empty(): key, u = pq.remove_min() cloud[u] = key # its correct d[u] value del pqlocator[u] # u is no longer in pq for e in g.incident_edges(u): # outgoing edges (u,v) v = e.opposite(u) if v not in cloud: # perform relaxation step on edge (u,v) wgt = e.element() if d[u] + wgt < d[v]: # better path to v? d[v] = d[u] + wgt # update the distance pq.update(pqlocator[v], d[v], v) # update the pq entry return cloud # only includes reachable vertices def shortest_path_tree(g, s, d): """Reconstruct shortest-path tree rooted at vertex s, given distance map d. Return tree as a map from each reachable vertex v (other than s) to the edge e=(u,v) that is used to reach v from its parent u in the tree. """ tree = {} for v in d: if v is not s: for e in g.incident_edges(v, False): u = e.opposite(v) wgt = e.element() if d[v] == d[u] + wgt: tree[v] = e # edge e is used to reach v return tree
the-stack_0_14243
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 5/15/20 4:49 PM # @File : grover.py # qubit number=4 # total number=12 import cirq import cirq.google as cg from typing import Optional import sys from math import log2 import numpy as np #thatsNoCode from cirq.contrib.svg import SVGCircuit # Symbols for the rotation angles in the QAOA circuit. def make_circuit(n: int, input_qubit): c = cirq.Circuit() # circuit begin c.append(cirq.H.on(input_qubit[0])) # number=1 c.append(cirq.H.on(input_qubit[1])) # number=2 c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6 c.append(cirq.Z.on(input_qubit[3])) # number=7 c.append(cirq.H.on(input_qubit[0])) # number=9 c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=10 c.append(cirq.H.on(input_qubit[0])) # number=11 c.append(cirq.H.on(input_qubit[2])) # number=3 c.append(cirq.H.on(input_qubit[3])) # number=4 # circuit end c.append(cirq.measure(*input_qubit, key='result')) return c def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count,input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count =2000 simulator = cirq.Simulator() result = simulator.run(circuit, repetitions=circuit_sample_count) frequencies = result.histogram(key='result', fold_func=bitstring) writefile = open("../data/startCirq145.csv","w+") print(format(frequencies),file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit,file=writefile) writefile.close()
the-stack_0_14245
""" Parser and serializer for file formats supported by compare-locales library: https://hg.mozilla.org/l10n/compare-locales/ """ from __future__ import absolute_import import logging from collections import OrderedDict from compare_locales import ( parser, serializer, ) from pontoon.sync import SyncError from pontoon.sync.exceptions import ParseError from pontoon.sync.formats.base import ParsedResource from pontoon.sync.utils import create_parent_directory from pontoon.sync.vcs.models import VCSTranslation log = logging.getLogger(__name__) class CompareLocalesEntity(VCSTranslation): """ Represents an entity in a file handled by compare-locales. """ def __init__(self, key, string, comment, order): self.key = key self.source_string = string self.source_string_plural = '' self.strings = {None: self.source_string} if self.source_string is not None else {} self.comments = comment.val.split('\n') if comment else [] self.order = order self.fuzzy = False self.source = [] class CompareLocalesResource(ParsedResource): def __init__(self, path, source_resource=None): self.path = path self.entities = OrderedDict() # Preserve entity order. self.source_resource = source_resource try: self.parser = parser.getParser(self.path) except UserWarning as err: raise ParseError(err) self.parsed_objects = [] # A monolingual l10n file might not contain all entities, but the code # expects ParsedResource to contain representations of all of them. So # when parsing the l10n resource, we first create empty entity for each # source resource entity. if source_resource: for key, entity in source_resource.entities.items(): self.entities[key] = CompareLocalesEntity( entity.key, None, None, None, ) try: self.parser.readFile(self.path) except IOError as err: # If the file doesn't exist, but we have a source resource, # we can keep going, we'll just not have any translations. if source_resource: return else: raise ParseError(err) self.parsed_objects = list(self.parser.walk()) order = 0 for entity in self.parsed_objects: if isinstance(entity, parser.Entity): self.entities[entity.key] = CompareLocalesEntity( entity.key, entity.unwrap(), entity.pre_comment, order, ) order += 1 @property def translations(self): return sorted(self.entities.values(), key=lambda e: e.order) def save(self, locale): if not self.source_resource: raise SyncError( 'Cannot save resource {0}: No source resource given.' .format(self.path) ) # A dictionary of new translations new_l10n = { key: entity.strings[None] if entity.strings else None for key, entity in self.entities.items() } # Create parent folders if necessary create_parent_directory(self.path) with open(self.path, 'w') as output_file: log.debug('Saving file: %s', self.path) output_file.write( serializer.serialize( self.path, self.source_resource.parsed_objects, self.parsed_objects, new_l10n, ) ) def parse(path, source_path=None, locale=None): if source_path is not None: source_resource = CompareLocalesResource(source_path) else: source_resource = None return CompareLocalesResource(path, source_resource)
the-stack_0_14246
#!/usr/bin/env python # -*- coding: UTF-8 -*- from hanlp import * """ File: source/inputters/voc.py """ # Default word tokens PAD_token = 0 # Used for padding short sentences # SOS_token = 1 # Start-of-sentence token # EOS_token = 2 # End-of-sentence token # UNK_token = 3 # unknown token # # tokenizer = None # # def getTokenizer(tokenizer): tokenizer = hanlp.load("PKU_NAME_MERGED_SIX_MONTHS_CONVSEG") class Voc: """ Voc 词典类 @param name 词典名称 """ def __init__(self, name): self.name = name self.trimmed = False self.word2index = {} self.word2count = {} self.index2word = {PAD_token: "PAD"} self.num_words = 4 # Count SOS, EOS, PAD, UNK def add_sentence(self, sentence): self.add_word(tokenizer(sentence)) def add_word(self, word_list): for word in word_list: if word not in self.word2index: self.word2index[word] = self.num_words self.word2count[word] = 1 self.index2word[self.num_words] = word self.num_words += 1 else: self.word2count[word] += 1 # Remove words below a certain count threshold def trim(self, min_count): if self.trimmed: return self.trimmed = True keep_words = [] for k, v in self.word2count.items(): if v >= min_count: keep_words.append(k) print('keep_words {} / {} = {:.4f}'.format( len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index) )) # Reinitialize dictionaries self.word2index = {} self.word2count = {} self.index2word = {PAD_token: "PAD"} self.num_words = 4 # Count default tokens for word in keep_words: self.add_word(word) def load(self, file_path): """ 从文件加载词典 @param file_path: 文件路径 @return: """ with open(file_path, 'r', encoding='utf-8') as f: for line in f: attr = line.split() index = int(attr[0]) word = attr[1] self.word2index[word] = index self.word2count[word] = 1 self.index2word[index] = word
the-stack_0_14247
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import range from builtins import super import mock import string import unittest import random from pprint import pprint from bhive import Hive from bhive.exceptions import ( InsufficientAuthorityError, MissingKeyError, InvalidWifError, WalletLocked ) from bhiveapi import exceptions from bhive.amount import Amount from bhive.witness import Witness from bhive.account import Account from bhive.instance import set_shared_hive_instance, shared_hive_instance from bhive.blockchain import Blockchain from bhive.block import Block from bhive.memo import Memo from bhive.transactionbuilder import TransactionBuilder from bhivebase.operations import Transfer from bhivegraphenebase.account import PasswordKey, PrivateKey, PublicKey from bhive.utils import parse_time, formatTimedelta from bhiveapi.rpcutils import NumRetriesReached from bhive.nodelist import NodeList # Py3 compatibility import sys core_unit = "STX" class Testcases(unittest.TestCase): @classmethod def setUpClass(cls): nodelist = NodeList() # hv = shared_hive_instance() # hv.config.refreshBackup() # nodes = nodelist.get_testnet() cls.nodes = nodelist.get_nodes() cls.bts = Hive( node=cls.nodes, nobroadcast=True, num_retries=10, expiration=120, ) # from getpass import getpass # self.bts.wallet.unlock(getpass()) cls.bts.set_default_account("bhive") # Test account "bhive" cls.active_key = "5Jt2wTfhUt5GkZHV1HYVfkEaJ6XnY8D2iA4qjtK9nnGXAhThM3w" cls.posting_key = "5Jh1Gtu2j4Yi16TfhoDmg8Qj3ULcgRi7A49JXdfUUTVPkaFaRKz" cls.memo_key = "5KPbCuocX26aMxN9CDPdUex4wCbfw9NoT5P7UhcqgDwxXa47bit" # Test account "bhive1" cls.active_key1 = "5Jo9SinzpdAiCDLDJVwuN7K5JcusKmzFnHpEAtPoBHaC1B5RDUd" cls.posting_key1 = "5JGNhDXuDLusTR3nbmpWAw4dcmE8WfSM8odzqcQ6mDhJHP8YkQo" cls.memo_key1 = "5KA2ddfAffjfRFoe1UhQjJtKnGsBn9xcsdPQTfMt1fQuErDAkWr" cls.active_private_key_of_bhive4 = '5JkZZEUWrDsu3pYF7aknSo7BLJx7VfxB3SaRtQaHhsPouDYjxzi' cls.active_private_key_of_bhive5 = '5Hvbm9VjRbd1B3ft8Lm81csaqQudwFwPGdiRKrCmTKcomFS3Z9J' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) raise unittest.SkipTest() hv = self.bts hv.nobroadcast = True hv.wallet.wipe(True) hv.wallet.create("123") hv.wallet.unlock("123") hv.wallet.addPrivateKey(self.active_key1) hv.wallet.addPrivateKey(self.memo_key1) hv.wallet.addPrivateKey(self.posting_key1) hv.wallet.addPrivateKey(self.active_key) hv.wallet.addPrivateKey(self.memo_key) hv.wallet.addPrivateKey(self.posting_key) hv.wallet.addPrivateKey(self.active_private_key_of_bhive4) hv.wallet.addPrivateKey(self.active_private_key_of_bhive5) @classmethod def tearDownClass(cls): hv = shared_hive_instance() hv.config.recover_with_latest_backup() def test_wallet_keys(self): hv = self.bts hv.wallet.unlock("123") priv_key = hv.wallet.getPrivateKeyForPublicKey(str(PrivateKey(self.posting_key, prefix=hv.prefix).pubkey)) self.assertEqual(str(priv_key), self.posting_key) priv_key = hv.wallet.getKeyForAccount("bhive", "active") self.assertEqual(str(priv_key), self.active_key) priv_key = hv.wallet.getKeyForAccount("bhive1", "posting") self.assertEqual(str(priv_key), self.posting_key1) priv_key = hv.wallet.getPrivateKeyForPublicKey(str(PrivateKey(self.active_private_key_of_bhive4, prefix=hv.prefix).pubkey)) self.assertEqual(str(priv_key), self.active_private_key_of_bhive4) priv_key = hv.wallet.getKeyForAccount("bhive4", "active") self.assertEqual(str(priv_key), self.active_private_key_of_bhive4) priv_key = hv.wallet.getPrivateKeyForPublicKey(str(PrivateKey(self.active_private_key_of_bhive5, prefix=hv.prefix).pubkey)) self.assertEqual(str(priv_key), self.active_private_key_of_bhive5) priv_key = hv.wallet.getKeyForAccount("bhive5", "active") self.assertEqual(str(priv_key), self.active_private_key_of_bhive5) def test_transfer(self): bts = self.bts bts.nobroadcast = False bts.wallet.unlock("123") # bts.wallet.addPrivateKey(self.active_key) # bts.prefix ="STX" acc = Account("bhive", hive_instance=bts) tx = acc.transfer( "bhive1", 1.33, "HBD", memo="Foobar") self.assertEqual( tx["operations"][0][0], "transfer" ) self.assertEqual(len(tx['signatures']), 1) op = tx["operations"][0][1] self.assertIn("memo", op) self.assertEqual(op["from"], "bhive") self.assertEqual(op["to"], "bhive1") amount = Amount(op["amount"], hive_instance=bts) self.assertEqual(float(amount), 1.33) bts.nobroadcast = True def test_transfer_memo(self): bts = self.bts bts.nobroadcast = False bts.wallet.unlock("123") acc = Account("bhive", hive_instance=bts) tx = acc.transfer( "bhive1", 1.33, "HBD", memo="#Foobar") self.assertEqual( tx["operations"][0][0], "transfer" ) op = tx["operations"][0][1] self.assertIn("memo", op) self.assertIn("#", op["memo"]) m = Memo(from_account=op["from"], to_account=op["to"], hive_instance=bts) memo = m.decrypt(op["memo"]) self.assertEqual(memo, "Foobar") self.assertEqual(op["from"], "bhive") self.assertEqual(op["to"], "bhive1") amount = Amount(op["amount"], hive_instance=bts) self.assertEqual(float(amount), 1.33) bts.nobroadcast = True def test_transfer_1of1(self): hive = self.bts hive.nobroadcast = False tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive) tx.appendOps(Transfer(**{"from": 'bhive', "to": 'bhive1', "amount": Amount("0.01 HIVE", hive_instance=hive), "memo": '1 of 1 transaction'})) self.assertEqual( tx["operations"][0]["type"], "transfer_operation" ) tx.appendWif(self.active_key) tx.sign() tx.sign() self.assertEqual(len(tx['signatures']), 1) tx.broadcast() hive.nobroadcast = True def test_transfer_2of2_simple(self): # Send a 2 of 2 transaction from elf which needs bhive4's cosign to send funds hive = self.bts hive.nobroadcast = False tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive) tx.appendOps(Transfer(**{"from": 'bhive5', "to": 'bhive1', "amount": Amount("0.01 HIVE", hive_instance=hive), "memo": '2 of 2 simple transaction'})) tx.appendWif(self.active_private_key_of_bhive5) tx.sign() tx.clearWifs() tx.appendWif(self.active_private_key_of_bhive4) tx.sign(reconstruct_tx=False) self.assertEqual(len(tx['signatures']), 2) tx.broadcast() hive.nobroadcast = True def test_transfer_2of2_wallet(self): # Send a 2 of 2 transaction from bhive5 which needs bhive4's cosign to send # priv key of bhive5 and bhive4 are stored in the wallet # appendSigner fetches both keys and signs automatically with both keys. hive = self.bts hive.nobroadcast = False hive.wallet.unlock("123") tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive) tx.appendOps(Transfer(**{"from": 'bhive5', "to": 'bhive1', "amount": Amount("0.01 HIVE", hive_instance=hive), "memo": '2 of 2 serialized/deserialized transaction'})) tx.appendSigner("bhive5", "active") tx.sign() self.assertEqual(len(tx['signatures']), 2) tx.broadcast() hive.nobroadcast = True def test_transfer_2of2_serialized_deserialized(self): # Send a 2 of 2 transaction from bhive5 which needs bhive4's cosign to send # funds but sign the transaction with bhive5's key and then serialize the transaction # and deserialize the transaction. After that, sign with bhive4's key. hive = self.bts hive.nobroadcast = False hive.wallet.unlock("123") # hive.wallet.removeAccount("bhive4") hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive4, prefix=core_unit))) tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive) tx.appendOps(Transfer(**{"from": 'bhive5', "to": 'bhive1', "amount": Amount("0.01 HIVE", hive_instance=hive), "memo": '2 of 2 serialized/deserialized transaction'})) tx.appendSigner("bhive5", "active") tx.addSigningInformation("bhive5", "active") tx.sign() tx.clearWifs() self.assertEqual(len(tx['signatures']), 1) # hive.wallet.removeAccount("bhive5") hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive5, prefix=core_unit))) tx_json = tx.json() del tx new_tx = TransactionBuilder(tx=tx_json, hive_instance=hive) self.assertEqual(len(new_tx['signatures']), 1) hive.wallet.addPrivateKey(self.active_private_key_of_bhive4) new_tx.appendMissingSignatures() new_tx.sign(reconstruct_tx=False) self.assertEqual(len(new_tx['signatures']), 2) new_tx.broadcast() hive.nobroadcast = True def test_transfer_2of2_offline(self): # Send a 2 of 2 transaction from bhive5 which needs bhive4's cosign to send # funds but sign the transaction with bhive5's key and then serialize the transaction # and deserialize the transaction. After that, sign with bhive4's key. hive = self.bts hive.nobroadcast = False hive.wallet.unlock("123") # hive.wallet.removeAccount("bhive4") hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive4, prefix=core_unit))) tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive) tx.appendOps(Transfer(**{"from": 'bhive5', "to": 'bhive', "amount": Amount("0.01 HIVE", hive_instance=hive), "memo": '2 of 2 serialized/deserialized transaction'})) tx.appendSigner("bhive5", "active") tx.addSigningInformation("bhive5", "active") tx.sign() tx.clearWifs() self.assertEqual(len(tx['signatures']), 1) # hive.wallet.removeAccount("bhive5") hive.wallet.removePrivateKeyFromPublicKey(str(PublicKey(self.active_private_key_of_bhive5, prefix=core_unit))) hive.wallet.addPrivateKey(self.active_private_key_of_bhive4) tx.appendMissingSignatures() tx.sign(reconstruct_tx=False) self.assertEqual(len(tx['signatures']), 2) tx.broadcast() hive.nobroadcast = True hive.wallet.addPrivateKey(self.active_private_key_of_bhive5) def test_transfer_2of2_wif(self): nodelist = NodeList() # Send a 2 of 2 transaction from elf which needs bhive4's cosign to send # funds but sign the transaction with elf's key and then serialize the transaction # and deserialize the transaction. After that, sign with bhive4's key. hive = Hive( node=self.nodes, num_retries=10, keys=[self.active_private_key_of_bhive5], expiration=360, ) tx = TransactionBuilder(use_condenser_api=True, hive_instance=hive) tx.appendOps(Transfer(**{"from": 'bhive5', "to": 'bhive', "amount": Amount("0.01 HIVE", hive_instance=hive), "memo": '2 of 2 serialized/deserialized transaction'})) tx.appendSigner("bhive5", "active") tx.addSigningInformation("bhive5", "active") tx.sign() tx.clearWifs() self.assertEqual(len(tx['signatures']), 1) tx_json = tx.json() del hive del tx hive = Hive( node=self.nodes, num_retries=10, keys=[self.active_private_key_of_bhive4], expiration=360, ) new_tx = TransactionBuilder(tx=tx_json, hive_instance=hive) new_tx.appendMissingSignatures() new_tx.sign(reconstruct_tx=False) self.assertEqual(len(new_tx['signatures']), 2) new_tx.broadcast() def test_verifyAuthority(self): hv = self.bts hv.wallet.unlock("123") tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv) tx.appendOps(Transfer(**{"from": "bhive", "to": "bhive1", "amount": Amount("1.300 HBD", hive_instance=hv), "memo": "Foobar"})) account = Account("bhive", hive_instance=hv) tx.appendSigner(account, "active") self.assertTrue(len(tx.wifs) > 0) tx.sign() tx.verify_authority() self.assertTrue(len(tx["signatures"]) > 0) def test_create_account(self): bts = self.bts name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12)) key1 = PrivateKey() key2 = PrivateKey() key3 = PrivateKey() key4 = PrivateKey() key5 = PrivateKey() tx = bts.create_account( name, creator="bhive", owner_key=format(key1.pubkey, core_unit), active_key=format(key2.pubkey, core_unit), posting_key=format(key3.pubkey, core_unit), memo_key=format(key4.pubkey, core_unit), additional_owner_keys=[format(key5.pubkey, core_unit)], additional_active_keys=[format(key5.pubkey, core_unit)], additional_owner_accounts=["bhive1"], # 1.2.0 additional_active_accounts=["bhive1"], storekeys=False ) self.assertEqual( tx["operations"][0][0], "account_create" ) op = tx["operations"][0][1] role = "active" self.assertIn( format(key5.pubkey, core_unit), [x[0] for x in op[role]["key_auths"]]) self.assertIn( format(key5.pubkey, core_unit), [x[0] for x in op[role]["key_auths"]]) self.assertIn( "bhive1", [x[0] for x in op[role]["account_auths"]]) role = "owner" self.assertIn( format(key5.pubkey, core_unit), [x[0] for x in op[role]["key_auths"]]) self.assertIn( format(key5.pubkey, core_unit), [x[0] for x in op[role]["key_auths"]]) self.assertIn( "bhive1", [x[0] for x in op[role]["account_auths"]]) self.assertEqual( op["creator"], "bhive") def test_connect(self): nodelist = NodeList() self.bts.connect(node=self.nodes) bts = self.bts self.assertEqual(bts.prefix, "STX") def test_set_default_account(self): self.bts.set_default_account("bhive") def test_info(self): info = self.bts.info() for key in ['current_witness', 'head_block_id', 'head_block_number', 'id', 'last_irreversible_block_num', 'current_witness', 'total_pow', 'time']: self.assertTrue(key in info) def test_finalizeOps(self): bts = self.bts tx1 = bts.new_tx() tx2 = bts.new_tx() acc = Account("bhive", hive_instance=bts) acc.transfer("bhive1", 1, "HIVE", append_to=tx1) acc.transfer("bhive1", 2, "HIVE", append_to=tx2) acc.transfer("bhive1", 3, "HIVE", append_to=tx1) tx1 = tx1.json() tx2 = tx2.json() ops1 = tx1["operations"] ops2 = tx2["operations"] self.assertEqual(len(ops1), 2) self.assertEqual(len(ops2), 1) def test_weight_threshold(self): bts = self.bts auth = {'account_auths': [['test', 1]], 'extensions': [], 'key_auths': [ ['STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n', 1], ['STX7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv', 1]], 'weight_threshold': 3} # threshold fine bts._test_weights_treshold(auth) auth = {'account_auths': [['test', 1]], 'extensions': [], 'key_auths': [ ['STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n', 1], ['STX7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv', 1]], 'weight_threshold': 4} # too high with self.assertRaises(ValueError): bts._test_weights_treshold(auth) def test_allow(self): bts = self.bts self.assertIn(bts.prefix, "STX") acc = Account("bhive", hive_instance=bts) self.assertIn(acc.hive.prefix, "STX") tx = acc.allow( "STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n", account="bhive", weight=1, threshold=1, permission="active", ) self.assertEqual( (tx["operations"][0][0]), "account_update" ) op = tx["operations"][0][1] self.assertIn("active", op) self.assertIn( ["STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n", '1'], op["active"]["key_auths"]) self.assertEqual(op["active"]["weight_threshold"], 1) def test_disallow(self): bts = self.bts acc = Account("bhive", hive_instance=bts) if sys.version > '3': _assertRaisesRegex = self.assertRaisesRegex else: _assertRaisesRegex = self.assertRaisesRegexp with _assertRaisesRegex(ValueError, ".*Changes nothing.*"): acc.disallow( "STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n", weight=1, threshold=1, permission="active" ) with _assertRaisesRegex(ValueError, ".*Changes nothing!.*"): acc.disallow( "STX6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", weight=1, threshold=1, permission="active" ) def test_update_memo_key(self): bts = self.bts bts.wallet.unlock("123") self.assertEqual(bts.prefix, "STX") acc = Account("bhive", hive_instance=bts) tx = acc.update_memo_key("STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n") self.assertEqual( (tx["operations"][0][0]), "account_update" ) op = tx["operations"][0][1] self.assertEqual( op["memo_key"], "STX55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n") def test_approvewitness(self): bts = self.bts w = Account("bhive", hive_instance=bts) tx = w.approvewitness("bhive1") self.assertEqual( (tx["operations"][0][0]), "account_witness_vote" ) op = tx["operations"][0][1] self.assertIn( "bhive1", op["witness"]) def test_appendWif(self): nodelist = NodeList() hv = Hive(node=self.nodes, nobroadcast=True, expiration=120, num_retries=10) tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv) tx.appendOps(Transfer(**{"from": "bhive", "to": "bhive1", "amount": Amount("1 HIVE", hive_instance=hv), "memo": ""})) with self.assertRaises( MissingKeyError ): tx.sign() with self.assertRaises( InvalidWifError ): tx.appendWif("abcdefg") tx.appendWif(self.active_key) tx.sign() self.assertTrue(len(tx["signatures"]) > 0) def test_appendSigner(self): nodelist = NodeList() hv = Hive(node=self.nodes, keys=[self.active_key], nobroadcast=True, expiration=120, num_retries=10) tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv) tx.appendOps(Transfer(**{"from": "bhive", "to": "bhive1", "amount": Amount("1 HIVE", hive_instance=hv), "memo": ""})) account = Account("bhive", hive_instance=hv) with self.assertRaises( AssertionError ): tx.appendSigner(account, "abcdefg") tx.appendSigner(account, "active") self.assertTrue(len(tx.wifs) > 0) tx.sign() self.assertTrue(len(tx["signatures"]) > 0) def test_verifyAuthorityException(self): nodelist = NodeList() hv = Hive(node=self.nodes, keys=[self.posting_key], nobroadcast=True, expiration=120, num_retries=10) tx = TransactionBuilder(use_condenser_api=True, hive_instance=hv) tx.appendOps(Transfer(**{"from": "bhive", "to": "bhive1", "amount": Amount("1 HIVE", hive_instance=hv), "memo": ""})) account = Account("bhive2", hive_instance=hv) tx.appendSigner(account, "active") tx.appendWif(self.posting_key) self.assertTrue(len(tx.wifs) > 0) tx.sign() with self.assertRaises( exceptions.MissingRequiredActiveAuthority ): tx.verify_authority() self.assertTrue(len(tx["signatures"]) > 0) def test_Transfer_broadcast(self): nodelist = NodeList() hv = Hive(node=self.nodes, keys=[self.active_key], nobroadcast=True, expiration=120, num_retries=10) tx = TransactionBuilder(use_condenser_api=True, expiration=10, hive_instance=hv) tx.appendOps(Transfer(**{"from": "bhive", "to": "bhive1", "amount": Amount("1 HIVE", hive_instance=hv), "memo": ""})) tx.appendSigner("bhive", "active") tx.sign() tx.broadcast() def test_TransactionConstructor(self): hv = self.bts opTransfer = Transfer(**{"from": "bhive", "to": "bhive1", "amount": Amount("1 HIVE", hive_instance=hv), "memo": ""}) tx1 = TransactionBuilder(use_condenser_api=True, hive_instance=hv) tx1.appendOps(opTransfer) tx = TransactionBuilder(tx1, hive_instance=hv) self.assertFalse(tx.is_empty()) self.assertTrue(len(tx.list_operations()) == 1) self.assertTrue(repr(tx) is not None) self.assertTrue(str(tx) is not None) account = Account("bhive", hive_instance=hv) tx.appendSigner(account, "active") self.assertTrue(len(tx.wifs) > 0) tx.sign() self.assertTrue(len(tx["signatures"]) > 0) def test_follow_active_key(self): nodelist = NodeList() hv = Hive(node=self.nodes, keys=[self.active_key], nobroadcast=True, expiration=120, num_retries=10) account = Account("bhive", hive_instance=hv) account.follow("bhive1") def test_follow_posting_key(self): nodelist = NodeList() hv = Hive(node=self.nodes, keys=[self.posting_key], nobroadcast=True, expiration=120, num_retries=10) account = Account("bhive", hive_instance=hv) account.follow("bhive1")
the-stack_0_14248
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from airflow.contrib.hooks.wasb_hook import WasbHook from airflow.operators.sensors import BaseSensorOperator from airflow.utils.decorators import apply_defaults class WasbBlobSensor(BaseSensorOperator): """ Waits for a blob to arrive on Azure Blob Storage. :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param wasb_conn_id: Reference to the wasb connection. :type wasb_conn_id: str :param check_options: Optional keyword arguments that `WasbHook.check_for_blob()` takes. :type check_options: dict """ template_fields = ('container_name', 'blob_name') @apply_defaults def __init__(self, container_name, blob_name, wasb_conn_id='wasb_default', check_options=None, *args, **kwargs): super(WasbBlobSensor, self).__init__(*args, **kwargs) if check_options is None: check_options = {} self.wasb_conn_id = wasb_conn_id self.container_name = container_name self.blob_name = blob_name self.check_options = check_options def poke(self, context): self.logger.info( 'Poking for blob: {self.blob_name}\n' 'in wasb://{self.container_name}'.format(**locals()) ) hook = WasbHook(wasb_conn_id=self.wasb_conn_id) return hook.check_for_blob(self.container_name, self.blob_name, **self.check_options) class WasbPrefixSensor(BaseSensorOperator): """ Waits for blobs matching a prefix to arrive on Azure Blob Storage. :param container_name: Name of the container. :type container_name: str :param prefix: Prefix of the blob. :type prefix: str :param wasb_conn_id: Reference to the wasb connection. :type wasb_conn_id: str :param check_options: Optional keyword arguments that `WasbHook.check_for_prefix()` takes. :type check_options: dict """ template_fields = ('container_name', 'prefix') @apply_defaults def __init__(self, container_name, prefix, wasb_conn_id='wasb_default', check_options=None, *args, **kwargs): super(WasbPrefixSensor, self).__init__(*args, **kwargs) if check_options is None: check_options = {} self.wasb_conn_id = wasb_conn_id self.container_name = container_name self.prefix = prefix self.check_options = check_options def poke(self, context): self.logger.info( 'Poking for prefix: {self.prefix}\n' 'in wasb://{self.container_name}'.format(**locals()) ) hook = WasbHook(wasb_conn_id=self.wasb_conn_id) return hook.check_for_prefix(self.container_name, self.prefix, **self.check_options)
the-stack_0_14249
from typing import Dict, Optional, Tuple from kale.types.blockchain_format.program import Program, INFINITE_COST from kale.types.condition_opcodes import ConditionOpcode from kale.types.spend_bundle import SpendBundle from kale.util.condition_tools import conditions_dict_for_solution from kale.wallet.cc_wallet import cc_utils from kale.wallet.trade_record import TradeRecord from kale.wallet.trading.trade_status import TradeStatus def trade_status_ui_string(status: TradeStatus): if status is TradeStatus.PENDING_CONFIRM: return "Pending Confirmation" elif status is TradeStatus.CANCELED: return "Canceled" elif status is TradeStatus.CONFIRMED: return "Confirmed" elif status is TradeStatus.PENDING_CANCEL: return "Pending Cancellation" elif status is TradeStatus.FAILED: return "Failed" elif status is TradeStatus.PENDING_ACCEPT: return "Pending" def trade_record_to_dict(record: TradeRecord) -> Dict: """Convenience function to return only part of trade record we care about and show correct status to the ui""" result = {} result["trade_id"] = record.trade_id.hex() result["sent"] = record.sent result["my_offer"] = record.my_offer result["created_at_time"] = record.created_at_time result["accepted_at_time"] = record.accepted_at_time result["confirmed_at_index"] = record.confirmed_at_index result["status"] = trade_status_ui_string(TradeStatus(record.status)) success, offer_dict, error = get_discrepancies_for_spend_bundle(record.spend_bundle) if success is False or offer_dict is None: raise ValueError(error) result["offer_dict"] = offer_dict return result # Returns the relative difference in value between the amount outputted by a puzzle and solution and a coin's amount def get_output_discrepancy_for_puzzle_and_solution(coin, puzzle, solution): discrepancy = coin.amount - get_output_amount_for_puzzle_and_solution(puzzle, solution) return discrepancy # Returns the amount of value outputted by a puzzle and solution def get_output_amount_for_puzzle_and_solution(puzzle: Program, solution: Program) -> int: error, conditions, cost = conditions_dict_for_solution(puzzle, solution, INFINITE_COST) total = 0 if conditions: for _ in conditions.get(ConditionOpcode.CREATE_COIN, []): total += Program.to(_.vars[1]).as_int() return total def get_discrepancies_for_spend_bundle( trade_offer: SpendBundle, ) -> Tuple[bool, Optional[Dict], Optional[Exception]]: try: cc_discrepancies: Dict[str, int] = dict() for coinsol in trade_offer.coin_spends: puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal)) solution: Program = Program.from_bytes(bytes(coinsol.solution)) # work out the deficits between coin amount and expected output for each r = cc_utils.uncurry_cc(puzzle) if r: # Calculate output amounts mod_hash, genesis_checker, inner_puzzle = r innersol = solution.first() total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol) colour = bytes(genesis_checker).hex() if colour in cc_discrepancies: cc_discrepancies[colour] += coinsol.coin.amount - total else: cc_discrepancies[colour] = coinsol.coin.amount - total else: coin_amount = coinsol.coin.amount out_amount = get_output_amount_for_puzzle_and_solution(puzzle, solution) diff = coin_amount - out_amount if "kale" in cc_discrepancies: cc_discrepancies["kale"] = cc_discrepancies["kale"] + diff else: cc_discrepancies["kale"] = diff return True, cc_discrepancies, None except Exception as e: return False, None, e
the-stack_0_14250
import xarray as xr from matplotlib import pyplot as plt from matplotlib import dates as mdates def _get_labels(da, label): # Add legend label if label is None: try: # Set the label for each line so that they can # be returned by Legend.get_legend_handles_labels() label = da.attrs['label'] except KeyError: pass if not isinstance(label, list): label = [label] return label def plot(data, ax=None, labels=None, legend=True, title='', xaxis='on', xlabel=None, ylabel=None, **kwargs): # Make sure we can iterate over the data arrays, # not the data within them if isinstance(data, xr.DataArray): data = [data] if labels is None: labels = [None]*len(data) # Get a set of axes in which to plot if ax is None: ax = plt.axes() # Plot each data array lines = [] for da, label in zip(data, labels): da_lines = da.plot(ax=ax) da_labels = _get_labels(da, label) for da_line, da_label in zip(da_lines, da_labels): da_line.set_label(da_label) lines.append(*da_lines) # Annotate axes ax.set_title(title) if xaxis == 'on': ax.set_xlabel(xlabel) locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(formatter) for tick in ax.get_xticklabels(): tick.set_rotation(45) else: ax.set_xticklabels([]) ax.set_xlabel('') if ylabel is None: try: ax.set_ylabel('{0}\n({1})'.format(data[0].attrs['ylabel'], data[0].attrs['units']) ) except KeyError: pass else: ax.set_ylabel(ylabel) # Add a legend if legend: # Create the legend outside the right-most axes leg = ax.legend(bbox_to_anchor=(1.05, 1), borderaxespad=0.0, frameon=False, handlelength=0, handletextpad=0, loc='upper left') # Color the text the same as the lines for line, text in zip(lines, leg.get_texts()): text.set_color(line.get_color()) return ax
the-stack_0_14251
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_wireless_controller_hotspot20_anqp_network_auth_type short_description: Configure network authentication type in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify wireless_controller_hotspot20 feature and anqp_network_auth_type category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.9" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true state: description: - Indicates whether to create or remove the object. type: str required: true choices: - present - absent wireless_controller_hotspot20_anqp_network_auth_type: description: - Configure network authentication type. default: null type: dict suboptions: auth_type: description: - Network authentication type. type: str choices: - acceptance-of-terms - online-enrollment - http-redirection - dns-redirection name: description: - Authentication type name. required: true type: str url: description: - Redirect URL. type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure network authentication type. fortios_wireless_controller_hotspot20_anqp_network_auth_type: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" wireless_controller_hotspot20_anqp_network_auth_type: auth_type: "acceptance-of-terms" name: "default_name_4" url: "myurl.com" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_wireless_controller_hotspot20_anqp_network_auth_type_data(json): option_list = ['auth_type', 'name', 'url'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def wireless_controller_hotspot20_anqp_network_auth_type(data, fos): vdom = data['vdom'] state = data['state'] wireless_controller_hotspot20_anqp_network_auth_type_data = data['wireless_controller_hotspot20_anqp_network_auth_type'] filtered_data = \ underscore_to_hyphen(filter_wireless_controller_hotspot20_anqp_network_auth_type_data(wireless_controller_hotspot20_anqp_network_auth_type_data)) if state == "present": return fos.set('wireless-controller.hotspot20', 'anqp-network-auth-type', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('wireless-controller.hotspot20', 'anqp-network-auth-type', mkey=filtered_data['name'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_wireless_controller_hotspot20(data, fos): if data['wireless_controller_hotspot20_anqp_network_auth_type']: resp = wireless_controller_hotspot20_anqp_network_auth_type(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": True, "type": "str", "choices": ["present", "absent"]}, "wireless_controller_hotspot20_anqp_network_auth_type": { "required": False, "type": "dict", "default": None, "options": { "auth_type": {"required": False, "type": "str", "choices": ["acceptance-of-terms", "online-enrollment", "http-redirection", "dns-redirection"]}, "name": {"required": True, "type": "str"}, "url": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_wireless_controller_hotspot20(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
the-stack_0_14252
# -*- coding: utf-8 -*- # @Time : 2022/3/31 13:52 # @Author : ZhaoXiangPeng # @File : create_setting.py import os import shutil class CreateSetting: def create(self): if os.path.exists('setting.py'): confirm = input("配置文件已存在 是否覆盖 (y/n). ") if confirm != "y": print("取消覆盖 退出") return template_file_path = os.path.abspath( os.path.join(__file__, "../../../templates/project_template/setting.py") ) shutil.copy(template_file_path, "./", follow_symlinks=False) print("配置文件生成成功, 如有旧版setting文件(settings.py), 请删除.")
the-stack_0_14255
# coding=utf-8 """ 2 2 1 4 3 4 1 2 0 2 """ def solver(n, init, m, nums): ret = [] for i in nums: gap = 2**i init = reverseNum(init, gap) # 每gap间隔翻转 cur = computeReversePair(init) ret.append(cur) return ret def reverseNum(init, gap): ret = [] for i in range(0, len(init), gap): c = init[i:i+gap][::-1] ret.extend(c) # print(ret, gap) return ret def computeReversePair(init): count = 0 for i in range(len(init)-1): for j in range(i, len(init)): if init[i] > init[j]: count += 1 return count def test(): n = 2 init = [2,1,4,3] m = 4 nums = [1,2,0,2] ret = solver(n, init, m, nums) print(ret) if __name__ == '__main__': test()
the-stack_0_14257
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from textwrap import dedent from typing import Dict from twitter.common.collections import OrderedSet from pants.option.config import Config from pants.testutil.test_base import TestBase from pants.util.contextutil import temporary_file class ConfigTest(TestBase): def _setup_config(self, config1_content: str, config2_content: str, *, suffix: str) -> Config: with temporary_file(binary_mode=False, suffix=suffix) as config1, \ temporary_file(binary_mode=False, suffix=suffix) as config2: config1.write(config1_content) config1.close() config2.write(config2_content) config2.close() parsed_config = Config.load( config_paths=[config1.name, config2.name], seed_values={"buildroot": self.build_root} ) assert [config1.name, config2.name] == parsed_config.sources() return parsed_config def setUp(self) -> None: ini1_content = dedent( """ [DEFAULT] name: foo answer: 42 scale: 1.2 path: /a/b/%(answer)s embed: %(path)s::foo disclaimer: Let it be known that. [a] list: [1, 2, 3, %(answer)s] list2: +[7, 8, 9] [b] preempt: True [b.nested] dict: { 'a': 1, 'b': %(answer)s, 'c': ['%(answer)s', '%(answer)s'], } [b.nested.nested-again] movie: inception """ ) ini2_content = dedent( """ [a] fast: True [b] preempt: False [c.child] no_values_in_parent: True [defined_section] """ ) self.config = self._setup_config(ini1_content, ini2_content, suffix=".ini") self.default_seed_values = Config._determine_seed_values( seed_values={"buildroot": self.build_root}, ) self.default_file1_values = { "name": "foo", "answer": "42", "scale": "1.2", "path": "/a/b/42", "embed": "/a/b/42::foo", "disclaimer": "\nLet it be known\nthat.", } self.expected_file1_options = { "a": { "list": "[1, 2, 3, 42]", "list2": "+[7, 8, 9]", }, "b": { "preempt": "True", }, "b.nested": { "dict": "{\n'a': 1,\n'b': 42,\n'c': ['42', '42'],\n}" }, "b.nested.nested-again": { "movie": "inception", }, } self.expected_file2_options: Dict[str, Dict[str, str]] = { "a": { "fast": "True", }, "b": { "preempt": "False", }, "c.child": { "no_values_in_parent": "True", }, "defined_section": {}, } self.expected_combined_values: Dict[str, Dict[str, str]] = { **self.expected_file1_options, **self.expected_file2_options, "a": { **self.expected_file2_options["a"], **self.expected_file1_options["a"], }, } def test_sections(self) -> None: expected_sections = list( OrderedSet([*self.expected_file2_options.keys(), *self.expected_file1_options.keys()]) ) assert self.config.sections() == expected_sections for section in expected_sections: assert self.config.has_section(section) is True # We should only look at explicitly defined sections. For example, if `cache.java` is defined # but `cache` is not, then `cache` should not be included in the sections. assert self.config.has_section('c') is False def test_has_option(self) -> None: # Check has all DEFAULT values for default_option in (*self.default_seed_values.keys(), *self.default_file1_values.keys()): assert self.config.has_option(section="DEFAULT", option=default_option) is True # Check every explicitly defined section has its options + the seed defaults for section, options in self.expected_combined_values.items(): for option in (*options, *self.default_seed_values): assert self.config.has_option(section=section, option=option) is True # Check every section for file1 also has file1's DEFAULT values for section in self.expected_file1_options: for option in self.default_file1_values: assert self.config.has_option(section=section, option=option) is True # Check that file1's DEFAULT values don't apply to sections only defined in file2 sections_only_in_file2 = set(self.expected_file2_options.keys()) - set( self.expected_file1_options.keys() ) for section in sections_only_in_file2: for option in self.default_file1_values: assert self.config.has_option(section=section, option=option) is False # Check that non-existent options are False nonexistent_options = { "DEFAULT": "fake", "a": "fake", "b": "fast", } for section, option in nonexistent_options.items(): assert self.config.has_option(section=section, option=option) is False def test_list_all_options(self) -> None: # This is used in `options_bootstrapper.py` to validate that every option is recognized. file1_config = self.config.configs()[1] file2_config = self.config.configs()[0] for section, options in self.expected_file1_options.items(): assert file1_config.values.options(section=section) == [ *options.keys(), *self.default_seed_values.keys(), *self.default_file1_values.keys(), ] for section, options in self.expected_file2_options.items(): assert file2_config.values.options(section=section) == [ *options.keys(), *self.default_seed_values.keys()] def test_default_values(self) -> None: # This is used in `options_bootstrapper.py` to ignore default values when validating options. file1_config = self.config.configs()[1] file2_config = self.config.configs()[0] # NB: string interpolation should only happen when calling _ConfigValues.get_value(). The # values for _ConfigValues.defaults() are not yet interpolated. default_file1_values_unexpanded = { **self.default_file1_values, "path": "/a/b/%(answer)s", "embed": "%(path)s::foo", } assert file1_config.values.defaults() == { **self.default_seed_values, **default_file1_values_unexpanded, } assert file2_config.values.defaults() == self.default_seed_values def test_get(self) -> None: # Check the DEFAULT section for option, value in {**self.default_seed_values, **self.default_file1_values}.items(): assert self.config.get(section="DEFAULT", option=option) == value # Check the combined values, including that each section has the default seed values for section, section_values in self.expected_combined_values.items(): for option, value in {**section_values, **self.default_seed_values}.items(): assert self.config.get(section=section, option=option) == value # Check that each section from file1 also has file1's default values for section in self.expected_file1_options: for option, value in self.default_file1_values.items(): assert self.config.get(section=section, option=option) == value def check_defaults(default: str) -> None: assert self.config.get(section='c', option='fast') is None assert self.config.get(section='c', option='preempt', default=None) is None assert self.config.get(section='c', option='jake', default=default) == default check_defaults('') check_defaults('42') def test_empty(self) -> None: config = Config.load([]) assert config.sections() == [] assert config.sources() == [] assert config.has_section("DEFAULT") is False assert config.has_option(section="DEFAULT", option="name") is False
the-stack_0_14259
# Copyright (c) 2017 The Verde Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause # # This code is part of the Fatiando a Terra project (https://www.fatiando.org) # """ Test the utility functions. """ from unittest import mock import numpy as np import numpy.testing as npt import pytest import xarray as xr from scipy.spatial import cKDTree from .. import utils from ..coordinates import grid_coordinates, scatter_points from ..utils import ( dummy_jit, get_ndim_horizontal_coords, grid_to_table, kdtree, make_xarray_grid, meshgrid_from_1d, meshgrid_to_1d, parse_engine, partition_by_sum, ) def test_parse_engine(): "Check that it works for common input" assert parse_engine("numba") == "numba" assert parse_engine("numpy") == "numpy" with mock.patch.object(utils, "numba", None): assert parse_engine("auto") == "numpy" with mock.patch.object(utils, "numba", mock.MagicMock()): assert parse_engine("auto") == "numba" def test_parse_engine_fails(): "Check that the exception is raised for invalid engines" with pytest.raises(ValueError): parse_engine("some invalid engine") def test_dummy_jit(): "Make sure the dummy function raises an exception" @dummy_jit(target="cpt") def function(): "Some random function" return 0 with pytest.raises(RuntimeError): function() def test_kdtree(): "Test that the kdtree returned works for query" coords = grid_coordinates((-10, 0, 0, 20), spacing=1) for use_pykdtree in [True, False]: tree = kdtree(coords, use_pykdtree=use_pykdtree) dist, labels = tree.query(np.array([[-10, 0.1]])) assert labels.size == 1 assert labels[0] == 0 npt.assert_allclose(dist, 0.1) if not use_pykdtree: assert isinstance(tree, cKDTree) def test_grid_to_table_order(): "Check that coordinates are in the right order when converting to tables" lon, lat = grid_coordinates(region=(1, 10, -10, -1), shape=(3, 4)) data = lon ** 2 # If the DataArray is created with coords in an order that doesn't match # the dims (which is valid), we were getting it wrong because we were # relying on the order of the coords instead of dims. This test would have # caught that bug. grid = xr.DataArray( data=data, coords={"longitude": lon[0, :], "latitude": lat[:, 0]}, dims=("latitude", "longitude"), ).to_dataset(name="field") table = grid_to_table(grid) true_lat = [-10, -10, -10, -10, -5.5, -5.5, -5.5, -5.5, -1, -1, -1, -1] true_lon = [1, 4, 7, 10, 1, 4, 7, 10, 1, 4, 7, 10] true_field = [1, 16, 49, 100, 1, 16, 49, 100, 1, 16, 49, 100] npt.assert_allclose(true_lat, table.latitude) npt.assert_allclose(true_lon, table.longitude) npt.assert_allclose(true_field, table.field) def test_partition_by_sum_fails_size(): "Should raise an exception if given more parts than elements." with pytest.raises(ValueError) as error: partition_by_sum(np.arange(10), 11) assert "array of size 10 into 11 parts" in str(error) def test_partition_by_sum_fails_no_partitions(): "Should raise an exception if could not find unique partition points" with pytest.raises(ValueError) as error: partition_by_sum(np.arange(10), 8) assert "Could not find partition points" in str(error) def test_make_xarray_grid(): """ Check if xarray.Dataset is correctly created """ region = (-10, -5, 6, 10) spacing = 1 coordinates = grid_coordinates(region, spacing=spacing) data = np.ones_like(coordinates[0]) grid = make_xarray_grid(coordinates, data, data_names="dummy") npt.assert_allclose(grid.easting, [-10, -9, -8, -7, -6, -5]) npt.assert_allclose(grid.northing, [6, 7, 8, 9, 10]) npt.assert_allclose(grid.dummy, 1) assert grid.dummy.shape == (5, 6) # Change dims grid = make_xarray_grid( coordinates, data, data_names="dummy", dims=("latitude", "longitude") ) npt.assert_allclose(grid.longitude, [-10, -9, -8, -7, -6, -5]) npt.assert_allclose(grid.latitude, [6, 7, 8, 9, 10]) npt.assert_allclose(grid.dummy, 1) assert grid.dummy.shape == (5, 6) def test_make_xarray_grid_multiple_data(): """ Check if xarray.Dataset with multiple data is correctly created """ region = (-10, -5, 6, 10) spacing = 1 coordinates = grid_coordinates(region, spacing=spacing) data_arrays = tuple(i * np.ones_like(coordinates[0]) for i in range(1, 4)) data_names = list("data_{}".format(i) for i in range(1, 4)) dataset = make_xarray_grid(coordinates, data_arrays, data_names=data_names) npt.assert_allclose(dataset.easting, [-10, -9, -8, -7, -6, -5]) npt.assert_allclose(dataset.northing, [6, 7, 8, 9, 10]) for i in range(1, 4): npt.assert_allclose(dataset["data_{}".format(i)], i) assert dataset["data_{}".format(i)].shape == (5, 6) def test_make_xarray_grid_no_data(): """ Check if the function creates a xarray.Dataset with no data """ region = (-10, -5, 6, 10) spacing = 1 coordinates = grid_coordinates(region, spacing=spacing) dataset = make_xarray_grid(coordinates, data=None, data_names=None) # Check if no data is present in the grid assert len(dataset.data_vars) == 0 # Check if coordinates are in the grid npt.assert_allclose(dataset.easting, [-10, -9, -8, -7, -6, -5]) npt.assert_allclose(dataset.northing, [6, 7, 8, 9, 10]) def test_make_xarray_grid_extra_coords(): """ Check if xarray.Dataset with extra coords is correctly created """ region = (-10, -5, 6, 10) spacing = 1 extra_coords = [1, 2] coordinates = grid_coordinates(region, spacing=spacing, extra_coords=extra_coords) data = np.ones_like(coordinates[0]) dataset = make_xarray_grid( coordinates, data, data_names="dummy", extra_coords_names=["upward", "time"], ) npt.assert_allclose(dataset.easting, [-10, -9, -8, -7, -6, -5]) npt.assert_allclose(dataset.northing, [6, 7, 8, 9, 10]) npt.assert_allclose(dataset.upward, 1) npt.assert_allclose(dataset.time, 2) npt.assert_allclose(dataset.dummy, 1) assert dataset.dummy.shape == (5, 6) assert dataset.upward.shape == (5, 6) assert dataset.time.shape == (5, 6) def test_make_xarray_grid_invalid_names(): """ Check if errors are raise after invalid data names """ region = (-10, -5, 6, 10) spacing = 1 coordinates = grid_coordinates(region, spacing=spacing) # Single data, multiple data_name data = np.ones_like(coordinates[0]) with pytest.raises(ValueError): make_xarray_grid(coordinates, data, data_names=["bla_1", "bla_2"]) # data_names equal to None with pytest.raises(ValueError): make_xarray_grid(coordinates, data, data_names=None) # Multiple data, single data_name data = tuple(i * np.ones_like(coordinates[0]) for i in (1, 2)) with pytest.raises(ValueError): make_xarray_grid(coordinates, data, data_names="blabla") def test_make_xarray_grid_invalid_extra_coords(): """ Check if errors are raise after invalid extra coords """ region = (-10, -5, 6, 10) spacing = 1 # No extra coords, extra_coords_name should be ignored coordinates = grid_coordinates(region, spacing=spacing) data = np.ones_like(coordinates[0]) make_xarray_grid(coordinates, data, data_names="dummy", extra_coords_names="upward") # Single extra coords, extra_coords_name equal to None coordinates = grid_coordinates(region, spacing=spacing, extra_coords=1) data = np.ones_like(coordinates[0]) with pytest.raises(ValueError): make_xarray_grid(coordinates, data, data_names="dummy", extra_coords_names=None) # Multiple extra coords, single extra_coords_name as a str coordinates = grid_coordinates(region, spacing=spacing, extra_coords=[1, 2]) data = np.ones_like(coordinates[0]) with pytest.raises(ValueError): make_xarray_grid( coordinates, data, data_names="dummy", extra_coords_names="upward" ) # Multiple extra coords, multiple extra_coords_name but not equal coordinates = grid_coordinates(region, spacing=spacing, extra_coords=[1, 2, 3]) data = np.ones_like(coordinates[0]) with pytest.raises(ValueError): make_xarray_grid( coordinates, data, data_names="dummy", extra_coords_names=["upward", "time"] ) def test_make_xarray_grid_invalid_2d_coordinates(): """ Check if error is raised if invaild 2d coordinates array are passed """ region = (-10, -5, 6, 10) spacing = 1 easting, northing = grid_coordinates(region, spacing=spacing) # Change only one element of the easting array easting[2, 2] = -1000 data = np.ones_like(easting) with pytest.raises(ValueError): make_xarray_grid((easting, northing), data, data_names="dummy") def test_make_xarray_grid_coordinates_as_1d_arrays(): """ Check if it can handle coordinates as 1d-arrays """ region = (-10, -5, 6, 10) easting = np.linspace(*region[:2], 6, dtype=float) northing = np.linspace(*region[2:], 5, dtype=float) data = np.ones((northing.size, easting.size)) grid = make_xarray_grid((easting, northing), data, data_names="dummy") npt.assert_allclose(grid.easting, [-10, -9, -8, -7, -6, -5]) npt.assert_allclose(grid.northing, [6, 7, 8, 9, 10]) npt.assert_allclose(grid.dummy, 1) assert grid.dummy.shape == (5, 6) def test_make_xarray_grid_invalid_mixed_coordinates(): """ Check if error is raised when horizontal coordinates have mixed dimensions """ region = (-10, -5, 6, 10) spacing = 1 easting, northing = grid_coordinates(region, spacing=spacing) data = np.ones_like(easting) # easting is 1d, but northing is 2d with pytest.raises(ValueError): make_xarray_grid((easting[0, :], northing), data, data_names="dummy") # northing is 1d, but easting is 2d with pytest.raises(ValueError): make_xarray_grid((easting, northing[:, 0]), data, data_names="dummy") def test_meshgrid_to_1d_invalid(): """ Check if error is raised after invalid meshgrid """ region = (-10, -5, 6, 10) # Modify one element of easting easting, northing = grid_coordinates(region=region, spacing=1) easting[2, 2] = -9999 with pytest.raises(ValueError): meshgrid_to_1d((easting, northing)) # Modify one element of northing easting, northing = grid_coordinates(region=region, spacing=1) northing[2, 3] = -9999 with pytest.raises(ValueError): meshgrid_to_1d((easting, northing)) # Pass invalid shapes easting = np.arange(16).reshape(4, 4) northing = np.arange(9).reshape(3, 3) with pytest.raises(ValueError): meshgrid_to_1d((easting, northing)) # Pass 1d arrays easting = np.linspace(0, 10, 11) northing = np.linspace(-4, -4, 9) with pytest.raises(ValueError): meshgrid_to_1d((easting, northing)) def test_meshgrid_from_1d_invalid(): """ Check if error is raised after non 1d arrays passed to meshgrid_from_1d """ coordinates = grid_coordinates(region=(0, 10, -5, 5), shape=(11, 11)) with pytest.raises(ValueError): meshgrid_from_1d(coordinates) def test_check_ndim_easting_northing(): """ Test if check_ndim_easting_northing works as expected """ # Easting and northing as 1d arrays easting, northing = scatter_points((-5, 5, 0, 4), 50, random_state=42) assert get_ndim_horizontal_coords(easting, northing) == 1 # Easting and northing as 2d arrays easting, northing = grid_coordinates((-5, 5, 0, 4), spacing=1) assert get_ndim_horizontal_coords(easting, northing) == 2 # Check if error is raised after easting and northing with different ndims easting = np.linspace(0, 5, 6) northing = np.linspace(-5, 5, 16).reshape(4, 4) with pytest.raises(ValueError): get_ndim_horizontal_coords(easting, northing)
the-stack_0_14260
import warnings import pytest import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, temppath, ) from numpy.core.tests._locales import CommaDecimalPointLocale LD_INFO = np.finfo(np.longdouble) longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) _o = 1 + LD_INFO.eps string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o))) del _o def test_scalar_extraction(): """Confirm that extracting a value doesn't convert to python float""" o = 1 + LD_INFO.eps a = np.array([o, o, o]) assert_equal(a[1], o) # Conversions string -> long double # 0.1 not exactly representable in base 2 floating point. repr_precision = len(repr(np.longdouble(0.1))) # +2 from macro block starting around line 842 in scalartypes.c.src. @pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, reason="repr precision not enough to show eps") def test_repr_roundtrip(): # We will only see eps in repr if within printing precision. o = 1 + LD_INFO.eps assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o)) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_repr_roundtrip_bytes(): o = 1 + LD_INFO.eps assert_equal(np.longdouble(repr(o).encode("ascii")), o) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") @pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes)) def test_array_and_stringlike_roundtrip(strtype): """ Test that string representations of long-double roundtrip both for array casting and scalar coercion, see also gh-15608. """ o = 1 + LD_INFO.eps if strtype in (np.bytes_, bytes): o_str = strtype(repr(o).encode("ascii")) else: o_str = strtype(repr(o)) # Test that `o` is correctly coerced from the string-like assert o == np.longdouble(o_str) # Test that arrays also roundtrip correctly: o_strarr = np.asarray([o] * 3, dtype=strtype) assert (o == o_strarr.astype(np.longdouble)).all() # And array coercion and casting to string give the same as scalar repr: assert (o_strarr == o_str).all() assert (np.asarray([o] * 3).astype(strtype) == o_str).all() def test_bogus_string(): assert_raises(ValueError, np.longdouble, "spam") assert_raises(ValueError, np.longdouble, "1.0 flub") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_fromstring(): o = 1 + LD_INFO.eps s = (" " + repr(o))*5 a = np.array([o]*5) assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, err_msg="reading '%s'" % s) def test_fromstring_complex(): for ctype in ["complex", "cdouble", "cfloat"]: # Check spacing between separator assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), np.array([1., 2., 3., 4.])) # Real component not specified assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), np.array([1.j, -2.j, 3.j, 40.j])) # Both components specified assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) # Spaces at wrong places with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+j", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1+", dtype=ctype, sep=","), np.array([1.])) with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","), np.array([1j])) def test_fromstring_bogus(): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "), np.array([1., 2., 3.])) def test_fromstring_empty(): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("xxxxx", sep="x"), np.array([])) def test_fromstring_missing(): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1xx3x4x5x6", sep="x"), np.array([1])) class TestFileBased: ldbl = 1 + LD_INFO.eps tgt = np.array([ldbl]*5) out = ''.join([repr(t) + '\n' for t in tgt]) def test_fromfile_bogus(self): with temppath() as path: with open(path, 'wt') as f: f.write("1. 2. 3. flop 4.\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=float, sep=" ") assert_equal(res, np.array([1., 2., 3.])) def test_fromfile_complex(self): for ctype in ["complex", "cdouble", "cfloat"]: # Check spacing between separator and only real component specified with temppath() as path: with open(path, 'wt') as f: f.write("1, 2 , 3 ,4\n") res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1., 2., 3., 4.])) # Real component not specified with temppath() as path: with open(path, 'wt') as f: f.write("1j, -2j, 3j, 4e1j\n") res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) # Both components specified with temppath() as path: with open(path, 'wt') as f: f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+2 j,3\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+ 2j,3\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1 +2j,3\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+j\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1+\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.])) # Spaces at wrong places with temppath() as path: with open(path, 'wt') as f: f.write("1j+1\n") with assert_warns(DeprecationWarning): res = np.fromfile(path, dtype=ctype, sep=",") assert_equal(res, np.array([1.j])) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_fromfile(self): with temppath() as path: with open(path, 'wt') as f: f.write(self.out) res = np.fromfile(path, dtype=np.longdouble, sep="\n") assert_equal(res, self.tgt) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_genfromtxt(self): with temppath() as path: with open(path, 'wt') as f: f.write(self.out) res = np.genfromtxt(path, dtype=np.longdouble) assert_equal(res, self.tgt) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_loadtxt(self): with temppath() as path: with open(path, 'wt') as f: f.write(self.out) res = np.loadtxt(path, dtype=np.longdouble) assert_equal(res, self.tgt) @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_tofile_roundtrip(self): with temppath() as path: self.tgt.tofile(path, sep=" ") res = np.fromfile(path, dtype=np.longdouble, sep=" ") assert_equal(res, self.tgt) # Conversions long double -> string def test_repr_exact(): o = 1 + LD_INFO.eps assert_(repr(o) != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_format(): o = 1 + LD_INFO.eps assert_("{0:.40g}".format(o) != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_percent(): o = 1 + LD_INFO.eps assert_("%.40g" % o != '1') @pytest.mark.skipif(longdouble_longer_than_double, reason="array repr problem") @pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") def test_array_repr(): o = 1 + LD_INFO.eps a = np.array([o]) b = np.array([1], dtype=np.longdouble) if not np.all(a != b): raise ValueError("precision loss creating arrays") assert_(repr(a) != repr(b)) # # Locale tests: scalar types formatting should be independent of the locale # class TestCommaDecimalPointLocale(CommaDecimalPointLocale): def test_repr_roundtrip_foreign(self): o = 1.5 assert_equal(o, np.longdouble(repr(o))) def test_fromstring_foreign_repr(self): f = 1.234 a = np.fromstring(repr(f), dtype=float, sep=" ") assert_equal(a[0], f) def test_fromstring_best_effort_float(self): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1,234", dtype=float, sep=" "), np.array([1.])) def test_fromstring_best_effort(self): with assert_warns(DeprecationWarning): assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "), np.array([1.])) def test_fromstring_foreign(self): s = "1.234" a = np.fromstring(s, dtype=np.longdouble, sep=" ") assert_equal(a[0], np.longdouble(s)) def test_fromstring_foreign_sep(self): a = np.array([1, 2, 3, 4]) b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") assert_array_equal(a, b) def test_fromstring_foreign_value(self): with assert_warns(DeprecationWarning): b = np.fromstring("1,234", dtype=np.longdouble, sep=" ") assert_array_equal(b[0], 1) @pytest.mark.parametrize("int_val", [ # cases discussed in gh-10723 # and gh-9968 2 ** 1024, 0]) def test_longdouble_from_int(int_val): # for issue gh-9968 str_val = str(int_val) # we'll expect a RuntimeWarning on platforms # with np.longdouble equivalent to np.double # for large integer input with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', RuntimeWarning) # can be inf==inf on some platforms assert np.longdouble(int_val) == np.longdouble(str_val) # we can't directly compare the int and # max longdouble value on all platforms if np.allclose(np.finfo(np.longdouble).max, np.finfo(np.double).max) and w: assert w[0].category is RuntimeWarning @pytest.mark.parametrize("bool_val", [ True, False]) def test_longdouble_from_bool(bool_val): assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
the-stack_0_14261
from datetime import datetime from telegram import base, messageentity, userprofilephotos import imageText from io import BytesIO import random userKey = "userDict" ''' This is the key needed to access the user dictionary on the cotext.bot_data dictionary. ''' randomKey = "randomMsg" ''' This is the key needed to access the random # of messages on the cotext.chat_data dictionary. ''' rndLowerBound = 1 ''' The smallest possible number the random number generation will generate when called. ''' rndUpperBound = 7 ''' The biggest possible number the random number generation will generate when called. ''' def printTime(textToPrint): now = datetime.now() current_time = now.strftime("[%Y/%m/%d - %r]") print(current_time, textToPrint) def isMessageFromAGroup(typeOfMessage): return "group" in typeOfMessage or "channel" in typeOfMessage def DictHasElems(pDict): """checks if a dictionary is not empty""" return not not pDict def getMentions(entitiesDict: dict[str, str], typeToSearch: messageentity): for entity, text in entitiesDict.items(): if(entity.type == typeToSearch): return text return None def validMessageLength(message: str, mention: str): message = removeMention(message, mention) msgLen = len(message) return (0 < msgLen) and (msgLen < 500) def userIDFromUsername(username: str, userDict: dict): validUsername = username[1:] #The username on the dictionary does not contain #the "@" at the begining. It needs to be removed #to be a valid key for the dictionary. if(validUsername in userDict): return userDict[validUsername] else: return None def generateRandom(): return random.randint(rndLowerBound, rndUpperBound) def getUserIdFromBotData(mention: str, bot_data:dict): if userKey in bot_data: return userIDFromUsername(mention, bot_data[userKey]) else: return None def shouldProcessImage(mention, bot_data, chat_data): msgsToNextPicture = 0 if (randomKey not in chat_data): msgsToNextPicture = generateRandom() else: msgsToNextPicture = chat_data[randomKey] - 1 if (msgsToNextPicture < 1 and userKey in bot_data): userId = userIDFromUsername(mention, bot_data[userKey]) if (userId): chat_data[randomKey] = generateRandom() return userId else: chat_data[randomKey] = msgsToNextPicture return None def addUserIDToDict(messageUser, userDict): userDict[messageUser.username] = messageUser.id return userDict def processUser(messageUser, bot_data): if(not messageUser.is_bot): if(userKey not in bot_data): newUserDict = {} bot_data[userKey] = addUserIDToDict(messageUser, newUserDict) elif(messageUser.username not in bot_data[userKey]): bot_data[userKey] = addUserIDToDict(messageUser, bot_data[userKey]) def removeMention(textMessage: str, mention: str): baseText = textMessage.replace(mention, "").replace("\n", "").strip() return baseText.replace(" ", " ") #This makes sure no extra whitespaces are in the message def processImage(userProfilePic: userprofilephotos, textMessage: str, mention: str, invert=False, name=""): if(userProfilePic.total_count > 0): profilePicture = userProfilePic.photos[0][-1].get_file() #This is the Highest resolution of the users profile picture. photoByteArr = profilePicture.download_as_bytearray() oldImageBArr = BytesIO(photoByteArr) img = imageText.createImage(oldImageBArr) if not invert: imageText.addTextToProfilePicture(img, removeMention(textMessage, mention)) else: img = imageText.addTextToInverseProfilePicture(img, textMessage, name) newImageBArr = BytesIO() newImageBArr.name = "response.jpg" img.save(newImageBArr, "PNG") newImageBArr.seek(0) return newImageBArr return None if __name__ == "__main__": pass
the-stack_0_14262
import math, random import numpy as np import torch import torch.nn as nn import torch.optim as optim import torch.autograd as autograd import torch.nn.functional as F USE_CUDA = torch.cuda.is_available() Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs) class Encoder(nn.Module): def __init__(self, din=32, hidden_dim=128): super(Encoder, self).__init__() self.fc = nn.Linear(din, hidden_dim) def forward(self, x): embedding = F.relu(self.fc(x)) return embedding class AttModel(nn.Module): def __init__(self, n_node, din, hidden_dim, dout): super(AttModel, self).__init__() self.fcv = nn.Linear(din, hidden_dim) self.fck = nn.Linear(din, hidden_dim) self.fcq = nn.Linear(din, hidden_dim) self.fcout = nn.Linear(hidden_dim, dout) def forward(self, x, mask): v = F.relu(self.fcv(x)) q = F.relu(self.fcq(x)) k = F.relu(self.fck(x)).permute(0,2,1) att = F.softmax(torch.mul(torch.bmm(q,k), mask) - 9e15*(1 - mask),dim=2) out = torch.bmm(att,v) #out = torch.add(out,v) out = F.relu(self.fcout(out)) return out class Q_Net(nn.Module): def __init__(self, hidden_dim, dout): super(Q_Net, self).__init__() self.fc = nn.Linear(hidden_dim, dout) def forward(self, x): q = self.fc(x) return q class DGN(nn.Module): def __init__(self,n_agent,num_inputs,hidden_dim,num_actions): super(DGN, self).__init__() self.encoder = Encoder(num_inputs,hidden_dim) self.att_1 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim) self.att_2 = AttModel(n_agent,hidden_dim,hidden_dim,hidden_dim) self.q_net = Q_Net(hidden_dim,num_actions) def forward(self, x, mask): h1 = self.encoder(x) h2 = self.att_1(h1, mask) h3 = self.att_2(h2, mask) q = self.q_net(h3) return q
the-stack_0_14263
import os import blend_pproc_render """ SET THESE AS ARGUMENTS """ """ SET THESE AS ARGUMENTS """ # where to look for the meshes. Use multiple paths if there are more folders to look into meshes_prefixes = ["C:/Users/benatti/codes/synchrono/src/data/vehicle/sedan/", 'C:/Users/benatti/codes/synchrono/src/data/simpleRural/' ] # directory where the generated images are saved out_dir = os.path.dirname(os.path.realpath(__file__)) + '/rendered_images/' # paths where the post-process outputs (.dat, .chpf) are looked for. There might be multiple files in different directories # defining different bodies in the same timestep (cosimulation) but the prefixes must be the same # e.g. : path1/001.dat, path1/001.chpf, path2/001.dat will be processed together. # Please, in follow and lookat modes the tracked object must be in the first element of the list #datadir = os.path.dirname(os.path.realpath(__file__)) datadir = ['./', 'C:/Users/benatti/codes/blender/NADS/dat_files/'] # resolution: 'HIGH', 'MID', 'LOW': divide by 1,4,16 the default HD resolution of 3840x2160 res = 'MID'#'LOW' # Camera options: 'Follow', 'Fixed', 'Lookat' camera_mode = "Fixed" # 'Fixed' 'Lookat' # If true, sky is added use_sky = True # camera position (unused in follow mode) camera_pos = (200,0,200) # Camera target data: some keys might be unused depending on the type target = dict([ # ID of the body to target (lookat and follow modes). ('bodyid' , 0), # ID of the shape on the body to target (lookat and follow modes). The body might have many shapes ('shapetypeid' , 5), # name of the mesh on the body to target (lookat and follow modes). The body might have many vis meshes ('name' , 'sedan_chassis_vis'), # Point to look at. Used only by Fixed ('position', (0,0,-10)), # Distance, relative to the target, from which the camera is looking at. Only in Follow mode ('distfrom', (-15.5,-5,1.5)) ]) # point light origin light_loc=(10, 50, 50) # light intensity light_energy=53000 # 'up' axis axis_up = 'Z' blend_pproc_render.bl_render(meshes_prefixes, out_dir, datadir, res, camera_mode, use_sky, camera_pos, target, axis_up, light_loc, light_energy)
the-stack_0_14264
from mermer.consensus.constants import ConsensusConstants from mermer.consensus.pos_quality import _expected_plot_size from mermer.types.blockchain_format.sized_bytes import bytes32 from mermer.util.hash import std_hash from mermer.util.ints import uint8, uint64, uint128 def is_overflow_block(constants: ConsensusConstants, signage_point_index: uint8) -> bool: if signage_point_index >= constants.NUM_SPS_SUB_SLOT: raise ValueError("SP index too high") return signage_point_index >= constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA def calculate_sp_interval_iters(constants: ConsensusConstants, sub_slot_iters: uint64) -> uint64: assert sub_slot_iters % constants.NUM_SPS_SUB_SLOT == 0 return uint64(sub_slot_iters // constants.NUM_SPS_SUB_SLOT) def calculate_sp_iters(constants: ConsensusConstants, sub_slot_iters: uint64, signage_point_index: uint8) -> uint64: if signage_point_index >= constants.NUM_SPS_SUB_SLOT: raise ValueError("SP index too high") return uint64(calculate_sp_interval_iters(constants, sub_slot_iters) * signage_point_index) def calculate_ip_iters( constants: ConsensusConstants, sub_slot_iters: uint64, signage_point_index: uint8, required_iters: uint64, ) -> uint64: # Note that the SSI is for the block passed in, which might be in the previous epoch sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index) sp_interval_iters: uint64 = calculate_sp_interval_iters(constants, sub_slot_iters) if sp_iters % sp_interval_iters != 0 or sp_iters >= sub_slot_iters: raise ValueError(f"Invalid sp iters {sp_iters} for this ssi {sub_slot_iters}") if required_iters >= sp_interval_iters or required_iters == 0: raise ValueError( f"Required iters {required_iters} is not below the sp interval iters {sp_interval_iters} " f"{sub_slot_iters} or not >0." ) return uint64((sp_iters + constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % sub_slot_iters) def calculate_iterations_quality( difficulty_constant_factor: uint128, quality_string: bytes32, size: int, difficulty: uint64, cc_sp_output_hash: bytes32, ) -> uint64: """ Calculates the number of iterations from the quality. This is derives as the difficulty times the constant factor times a random number between 0 and 1 (based on quality string), divided by plot size. """ sp_quality_string: bytes32 = std_hash(quality_string + cc_sp_output_hash) iters = uint64( int(difficulty) * int(difficulty_constant_factor) * int.from_bytes(sp_quality_string, "big", signed=False) // (int(pow(2, 256)) * int(_expected_plot_size(size))) ) return max(iters, uint64(1))
the-stack_0_14266
#!/usr/bin/env python3 import wellmap from pytest_unordered import unordered from .param_helpers import * class MockPathManager: def check_named_plates(self, names): pass def get_index_for_only_plate(self): return {'path': '/path/to/data'} def get_index_for_named_plate(self, name): return {'plate': name, 'path': f'/path/to/{name.lower()}'} @parametrize_from_file( schema=Schema({ 'config': with_py.eval, **with_wellmap.error_or({ 'expected': with_nan.eval, }), }), ) def test_table_from_config(config, expected, error): with error: df = wellmap.table_from_config(config, MockPathManager()) assert df.to_dict('records') == unordered(expected)
the-stack_0_14267
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import hashlib import json import logging import os import shutil import subprocess import zipfile from pathlib import Path import yaml from graphscope.analytical.udf.utils import InMemoryZip from graphscope.framework.app import AppAssets from graphscope.framework.app import AppDAGNode from graphscope.framework.app import check_argument from graphscope.framework.context import create_context_node from graphscope.framework.dag import DAGNode from graphscope.framework.dag_utils import bind_app from graphscope.framework.errors import InvalidArgumentError from graphscope.framework.graph import Graph from graphscope.framework.utils import get_tempdir from graphscope.proto import graph_def_pb2 __all__ = ["JavaApp"] logger = logging.getLogger("graphscope") # runtime workspace try: WORKSPACE = os.environ["GRAPHSCOPE_RUNTIME"] except KeyError: WORKSPACE = os.path.join(get_tempdir(), "gs") DEFAULT_GS_CONFIG_FILE = ".gs_conf.yaml" POSSIBLE_APP_TYPES = [ "default_property", "parallel_property", "default_simple", "parallel_simple", ] def _parse_user_app(java_app_class: str, java_jar_full_path: str): _java_app_type = "" _frag_param_str = "" _java_inner_context_type = "" _java_executable = "java" if shutil.which("java") is None: if os.environ.get("JAVA_HOME", None) is not None: _java_executable = os.path.join(os.environ.get("JAVA_HOME"), "bin", "java") if not os.path.isfile(_java_executable) or not os.access( _java_executable, os.X_OK ): raise RuntimeError( "Java executable not found, you shall install a java runtime." ) parse_user_app_cmd = [ _java_executable, "-cp", "{}".format(java_jar_full_path), "com.alibaba.graphscope.utils.AppBaseParser", java_app_class, ] logger.info(" ".join(parse_user_app_cmd)) parse_user_app_process = subprocess.Popen( parse_user_app_cmd, env=os.environ.copy(), encoding="utf-8", errors="replace", stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, bufsize=1, ) out, err = parse_user_app_process.communicate() logger.info(err) for line in out.split("\n"): logger.info(line) if len(line) == 0: continue if line.find("DefaultPropertyApp") != -1: _java_app_type = "default_property" elif line.find("ParallelPropertyApp") != -1: _java_app_type = "parallel_property" elif line.find("DefaultAppBase") != -1: _java_app_type = "default_simple" elif line.find("ParallelAppBase") != -1: _java_app_type = "parallel_simple" elif line.find("Error") != -1: raise Exception("Error occured in verifying user app") elif line.find("TypeParams") != -1: _frag_param_str = line.split(":")[-1].strip() elif line.find("ContextType") != -1: _java_inner_context_type = line.split(":")[-1].strip() logger.info( "Java app type: {}, frag type str: {}, ctx type: {}".format( _java_app_type, _frag_param_str, _java_inner_context_type ) ) parse_user_app_process.wait() return _java_app_type, _frag_param_str, _java_inner_context_type def _type_param_consistent(graph_actucal_type_param, java_app_type_param): if java_app_type_param == "java.lang.Long": if graph_actucal_type_param in {"uint64_t", "int64_t"}: return True return False if java_app_type_param == "java.lang.Double": if graph_actucal_type_param in {"double"}: return True return False if java_app_type_param == "java.lang.Integer": if graph_actucal_type_param in {"int32_t", "uint32_t"}: return True return False return False class JavaApp(AppAssets): """A class represents a java app assert node in a DAG that holds the jar file. It holds neccessary resouces to run a java app, including java class path, the gar file which consists jar and configuration yaml, and the specified java class. On creating a JavaApp, graphscope will try to load the specified java class, and parse the Base class for your app, and the base class for your Context Class. This operation requires a java runtime environment installed in your client machine where your graphscope session is created. To run your app, provide `JavaApp` with a property or projected graph and your querying args. """ def __init__(self, full_jar_path: str, java_app_class: str): """Init JavaApp with the full path of your `jar` file and the fully-qualified name of your app class. Args: full_jar_path (str): The path where the jar file exists. java_app_class (str): the fully-qualified name of your app class. """ self._java_app_class = java_app_class self._full_jar_path = full_jar_path self._jar_name = Path(self._full_jar_path).name gar = self._pack_jar(self._full_jar_path) gs_config = { "app": [ { "algo": "java_app", "type": "java_pie", "java_jar_path": self._full_jar_path, "java_app_class": self.java_app_class, } ] } # extract java app type with help of java class. self._java_app_type, self._frag_param_str, _java_ctx_type = _parse_user_app( java_app_class, full_jar_path ) # For four different java type, we use two different driver class if self._java_app_type not in POSSIBLE_APP_TYPES: raise RuntimeError("Unexpected app type: {}".format(self._java_app_type)) if self._java_app_type.find("property") != -1: gs_config["app"][0]["compatible_graph"] = ["vineyard::ArrowFragment"] else: gs_config["app"][0]["compatible_graph"] = ["gs::ArrowProjectedFragment"] gs_config["app"][0]["context_type"] = _java_ctx_type if self._java_app_type == "default_property": gs_config["app"][0][ "driver_header" ] = "apps/java_pie/java_pie_property_default_app.h" gs_config["app"][0]["class_name"] = "gs::JavaPIEPropertyDefaultApp" elif self._java_app_type == "parallel_property": gs_config["app"][0][ "driver_header" ] = "apps/java_pie/java_pie_property_parallel_app.h" gs_config["app"][0]["class_name"] = "gs::JavaPIEPropertyParallelApp" elif self._java_app_type == "default_simple": gs_config["app"][0][ "driver_header" ] = "apps/java_pie/java_pie_projected_default_app.h" gs_config["app"][0]["class_name"] = "gs::JavaPIEProjectedDefaultApp" elif self._java_app_type == "parallel_simple": gs_config["app"][0][ "driver_header" ] = "apps/java_pie/java_pie_projected_parallel_app.h" gs_config["app"][0]["class_name"] = "gs::JavaPIEProjectedParallelApp" else: raise Exception( "Unrecognizable java app type: {}".format(self._java_app_type) ) gar.append(DEFAULT_GS_CONFIG_FILE, yaml.dump(gs_config)) super().__init__("java_app", _java_ctx_type, gar.read_bytes()) # Override is_compatible to make sure type params of graph consists with java app. def is_compatible(self, graph): splited = graph.template_str.split("<") java_app_type_params = self.frag_param_str.split(",") num_type_params = 0 if len(splited) != 2: raise Exception( "Unrecoginizable graph template str: {}".format(graph.template_str) ) if splited[0] == "vineyard::ArrowFragment": if self.java_app_type.find("property") == -1: logger.error("Expected property app") return False if len(java_app_type_params) != 1: logger.error("Expected one type params.") return False num_type_params = 1 if splited[1] == "gs::ArrowProjectedFragment": if self.java_app_type.find("simple") == -1: logger.error("Expected simple app") return False if len(java_app_type_params) != 4: logger.error("Expected 4 type params") return False num_type_params = 4 graph_actual_type_params = splited[1][:-1].split(",") for i in range(0, num_type_params): graph_actual_type_param = graph_actual_type_params[i] java_app_type_param = java_app_type_params[i] if not _type_param_consistent(graph_actual_type_param, java_app_type_param): return False return True def _pack_jar(self, full_jar_path: str): garfile = InMemoryZip() if not os.path.exists(full_jar_path): raise FileNotFoundError("Jar file not found in {}.".format(full_jar_path)) if not full_jar_path.endswith(".jar") or not zipfile.is_zipfile(full_jar_path): raise KeyError( "{} is not a jar file, please feed your packed jar file to JavaApp.".format( full_jar_path ) ) tmp_jar_file = open(full_jar_path, "rb") jar_bytes = tmp_jar_file.read() if len(jar_bytes) <= 0: raise KeyError("Expect a non-empty Jar.") garfile.append("{}".format(full_jar_path.split("/")[-1]), jar_bytes) return garfile def signature(self): s = hashlib.sha256() s.update( f"{self.type}.{self._full_jar_path}.{self.java_app_class}".encode("utf-8") ) s.update(self.gar) return s.hexdigest() @property def java_app_class(self): return self._java_app_class @property def jar_name(self): return self._jar_name @property def java_app_type(self): return self._java_app_type @property def frag_param_str(self): return self._frag_param_str def __call__(self, graph: Graph, *args, **kwargs): kwargs_extend = dict(app_class=self.java_app_class, **kwargs) if not hasattr(graph, "graph_type"): raise InvalidArgumentError("Missing graph_type attribute in graph object.") if ( self.java_app_type.find("simple") != -1 and graph.graph_type == graph_def_pb2.ARROW_PROPERTY ): graph = graph._project_to_simple() app_ = graph.session._wrapper(JavaAppDagNode(graph, self)) return app_(*args, **kwargs_extend) class JavaAppDagNode(AppDAGNode): """retrict app assets to javaAppAssets""" def __init__(self, graph: Graph, app_assets: JavaApp): self._graph = graph self._app_assets = app_assets self._session = graph.session if not self._app_assets.is_compatible(self._graph): raise Exception( "No compactiable app and graph: {} and {}".format( self._app_assets.java_app_type, self._graph.template_str ) ) self._op = bind_app(graph, self._app_assets) # add op to dag self._session.dag.add_op(self._app_assets.op) self._session.dag.add_op(self._op) def _convert_arrow_frag_for_java(self, cpp_frag_str: str): """Convert vineyard::ArrowFragment<OID,VID> to gs::ArrowFragmentDefault<OID>""" res = cpp_frag_str.split(",")[0] + ">" return res.replace("<", "Default<", 1).replace("vineyard", "gs") def __call__(self, *args, **kwargs): """When called, check arguments based on app type, Then do build and query. Raises: InvalidArgumentError: If app_type is None, or positional argument found when app_type not `cpp_pie`. Returns: :class:`Context`: Query context, include running results of the app. """ check_argument(self._app_assets.type == "java_pie", "expect java_pie app") if not isinstance(self._graph, DAGNode) and not self._graph.loaded(): raise RuntimeError("The graph is not loaded") check_argument(not args, "Only support using keyword arguments in cython app.") if self._app_assets.java_app_type.find("property") != -1: frag_name_for_java = self._convert_arrow_frag_for_java( self._graph.template_str ) logger.info( "Set frag name to {}, {}".format( self._graph.template_str, frag_name_for_java ) ) else: frag_name_for_java = self._graph.template_str # get number of worker on each host, so we can determine the java memory settings. kwargs_extend = dict( frag_name=frag_name_for_java, jar_name=self._app_assets.jar_name, **kwargs, ) logger.info("dumping to json {}".format(json.dumps(kwargs_extend))) return create_context_node( self._app_assets.context_type, self, self._graph, json.dumps(kwargs_extend) )
the-stack_0_14268
from datasette.plugins import DEFAULT_PLUGINS from datasette.utils import detect_json1 from .fixtures import ( # noqa app_client, app_client_no_files, app_client_with_hash, app_client_shorter_time_limit, app_client_larger_cache_size, app_client_returned_rows_matches_page_size, app_client_two_attached_databases, app_client_two_attached_databases_one_immutable, app_client_conflicting_database_names, app_client_with_cors, app_client_with_dot, app_client_immutable_and_inspect_file, generate_compound_rows, generate_sortable_rows, make_app_client, EXPECTED_PLUGINS, METADATA, ) import json import pytest import sys import urllib def test_homepage(app_client): response = app_client.get("/.json") assert response.status == 200 assert "application/json; charset=utf-8" == response.headers["content-type"] assert response.json.keys() == {"fixtures": 0}.keys() d = response.json["fixtures"] assert d["name"] == "fixtures" assert d["tables_count"] == 24 assert len(d["tables_and_views_truncated"]) == 5 assert d["tables_and_views_more"] is True # 4 hidden FTS tables + no_primary_key (hidden in metadata) assert d["hidden_tables_count"] == 5 # 201 in no_primary_key, plus 5 in other hidden tables: assert d["hidden_table_rows_sum"] == 206 assert d["views_count"] == 4 def test_homepage_sort_by_relationships(app_client): response = app_client.get("/.json?_sort=relationships") assert response.status == 200 tables = [ t["name"] for t in response.json["fixtures"]["tables_and_views_truncated"] ] assert [ "simple_primary_key", "complex_foreign_keys", "roadside_attraction_characteristics", "searchable_tags", "foreign_key_references", ] == tables def test_database_page(app_client): response = app_client.get("/fixtures.json") data = response.json assert "fixtures" == data["database"] assert [ { "name": "123_starts_with_digits", "columns": ["content"], "primary_keys": [], "count": 0, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "Table With Space In Name", "columns": ["pk", "content"], "primary_keys": ["pk"], "count": 0, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "attraction_characteristic", "columns": ["pk", "name"], "primary_keys": ["pk"], "count": 2, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [ { "other_table": "roadside_attraction_characteristics", "column": "pk", "other_column": "characteristic_id", } ], "outgoing": [], }, "private": False, }, { "name": "binary_data", "columns": ["data"], "primary_keys": [], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "complex_foreign_keys", "columns": ["pk", "f1", "f2", "f3"], "primary_keys": ["pk"], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [], "outgoing": [ { "other_table": "simple_primary_key", "column": "f3", "other_column": "id", }, { "other_table": "simple_primary_key", "column": "f2", "other_column": "id", }, { "other_table": "simple_primary_key", "column": "f1", "other_column": "id", }, ], }, "private": False, }, { "name": "compound_primary_key", "columns": ["pk1", "pk2", "content"], "primary_keys": ["pk1", "pk2"], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "compound_three_primary_keys", "columns": ["pk1", "pk2", "pk3", "content"], "primary_keys": ["pk1", "pk2", "pk3"], "count": 1001, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "custom_foreign_key_label", "columns": ["pk", "foreign_key_with_custom_label"], "primary_keys": ["pk"], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [], "outgoing": [ { "other_table": "primary_key_multiple_columns_explicit_label", "column": "foreign_key_with_custom_label", "other_column": "id", } ], }, "private": False, }, { "name": "facet_cities", "columns": ["id", "name"], "primary_keys": ["id"], "count": 4, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [ { "other_table": "facetable", "column": "id", "other_column": "city_id", } ], "outgoing": [], }, "private": False, }, { "name": "facetable", "columns": [ "pk", "created", "planet_int", "on_earth", "state", "city_id", "neighborhood", "tags", "complex_array", "distinct_some_null", ], "primary_keys": ["pk"], "count": 15, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [], "outgoing": [ { "other_table": "facet_cities", "column": "city_id", "other_column": "id", } ], }, "private": False, }, { "name": "foreign_key_references", "columns": ["pk", "foreign_key_with_label", "foreign_key_with_no_label"], "primary_keys": ["pk"], "count": 2, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [], "outgoing": [ { "other_table": "primary_key_multiple_columns", "column": "foreign_key_with_no_label", "other_column": "id", }, { "other_table": "simple_primary_key", "column": "foreign_key_with_label", "other_column": "id", }, ], }, "private": False, }, { "name": "infinity", "columns": ["value"], "primary_keys": [], "count": 3, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "primary_key_multiple_columns", "columns": ["id", "content", "content2"], "primary_keys": ["id"], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [ { "other_table": "foreign_key_references", "column": "id", "other_column": "foreign_key_with_no_label", } ], "outgoing": [], }, "private": False, }, { "name": "primary_key_multiple_columns_explicit_label", "columns": ["id", "content", "content2"], "primary_keys": ["id"], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [ { "other_table": "custom_foreign_key_label", "column": "id", "other_column": "foreign_key_with_custom_label", } ], "outgoing": [], }, "private": False, }, { "name": "roadside_attraction_characteristics", "columns": ["attraction_id", "characteristic_id"], "primary_keys": [], "count": 5, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [], "outgoing": [ { "other_table": "attraction_characteristic", "column": "characteristic_id", "other_column": "pk", }, { "other_table": "roadside_attractions", "column": "attraction_id", "other_column": "pk", }, ], }, "private": False, }, { "name": "roadside_attractions", "columns": ["pk", "name", "address", "latitude", "longitude"], "primary_keys": ["pk"], "count": 4, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [ { "other_table": "roadside_attraction_characteristics", "column": "pk", "other_column": "attraction_id", } ], "outgoing": [], }, "private": False, }, { "name": "searchable", "columns": ["pk", "text1", "text2", "name with . and spaces"], "primary_keys": ["pk"], "count": 2, "hidden": False, "fts_table": "searchable_fts", "foreign_keys": { "incoming": [ { "other_table": "searchable_tags", "column": "pk", "other_column": "searchable_id", } ], "outgoing": [], }, "private": False, }, { "name": "searchable_tags", "columns": ["searchable_id", "tag"], "primary_keys": ["searchable_id", "tag"], "count": 2, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [], "outgoing": [ {"other_table": "tags", "column": "tag", "other_column": "tag"}, { "other_table": "searchable", "column": "searchable_id", "other_column": "pk", }, ], }, "private": False, }, { "name": "select", "columns": ["group", "having", "and", "json"], "primary_keys": [], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "simple_primary_key", "columns": ["id", "content"], "primary_keys": ["id"], "count": 4, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [ { "other_table": "foreign_key_references", "column": "id", "other_column": "foreign_key_with_label", }, { "other_table": "complex_foreign_keys", "column": "id", "other_column": "f3", }, { "other_table": "complex_foreign_keys", "column": "id", "other_column": "f2", }, { "other_table": "complex_foreign_keys", "column": "id", "other_column": "f1", }, ], "outgoing": [], }, "private": False, }, { "name": "sortable", "columns": [ "pk1", "pk2", "content", "sortable", "sortable_with_nulls", "sortable_with_nulls_2", "text", ], "primary_keys": ["pk1", "pk2"], "count": 201, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "table/with/slashes.csv", "columns": ["pk", "content"], "primary_keys": ["pk"], "count": 1, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "tags", "columns": ["tag"], "primary_keys": ["tag"], "count": 2, "hidden": False, "fts_table": None, "foreign_keys": { "incoming": [ { "other_table": "searchable_tags", "column": "tag", "other_column": "tag", } ], "outgoing": [], }, "private": False, }, { "name": "units", "columns": ["pk", "distance", "frequency"], "primary_keys": ["pk"], "count": 3, "hidden": False, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "no_primary_key", "columns": ["content", "a", "b", "c"], "primary_keys": [], "count": 201, "hidden": True, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "searchable_fts", "columns": ["text1", "text2", "name with . and spaces", "content"], "primary_keys": [], "count": 2, "hidden": True, "fts_table": "searchable_fts", "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "searchable_fts_content", "columns": [ "docid", "c0text1", "c1text2", "c2name with . and spaces", "c3content", ], "primary_keys": ["docid"], "count": 2, "hidden": True, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "searchable_fts_segdir", "columns": [ "level", "idx", "start_block", "leaves_end_block", "end_block", "root", ], "primary_keys": ["level", "idx"], "count": 1, "hidden": True, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, { "name": "searchable_fts_segments", "columns": ["blockid", "block"], "primary_keys": ["blockid"], "count": 0, "hidden": True, "fts_table": None, "foreign_keys": {"incoming": [], "outgoing": []}, "private": False, }, ] == data["tables"] def test_no_files_uses_memory_database(app_client_no_files): response = app_client_no_files.get("/.json") assert response.status == 200 assert { ":memory:": { "hash": None, "color": "f7935d", "hidden_table_rows_sum": 0, "hidden_tables_count": 0, "name": ":memory:", "show_table_row_counts": False, "path": "/:memory:", "table_rows_sum": 0, "tables_count": 0, "tables_and_views_more": False, "tables_and_views_truncated": [], "views_count": 0, "private": False, } } == response.json # Try that SQL query response = app_client_no_files.get( "/:memory:.json?sql=select+sqlite_version()&_shape=array" ) assert 1 == len(response.json) assert ["sqlite_version()"] == list(response.json[0].keys()) def test_database_page_for_database_with_dot_in_name(app_client_with_dot): response = app_client_with_dot.get("/fixtures.dot.json") assert 200 == response.status def test_custom_sql(app_client): response = app_client.get( "/fixtures.json?sql=select+content+from+simple_primary_key&_shape=objects" ) data = response.json assert {"sql": "select content from simple_primary_key", "params": {}} == data[ "query" ] assert [ {"content": "hello"}, {"content": "world"}, {"content": ""}, {"content": "RENDER_CELL_DEMO"}, ] == data["rows"] assert ["content"] == data["columns"] assert "fixtures" == data["database"] assert not data["truncated"] def test_canned_query_with_named_parameter(app_client): response = app_client.get("/fixtures/neighborhood_search.json?text=town") assert [ ["Corktown", "Detroit", "MI"], ["Downtown", "Los Angeles", "CA"], ["Downtown", "Detroit", "MI"], ["Greektown", "Detroit", "MI"], ["Koreatown", "Los Angeles", "CA"], ["Mexicantown", "Detroit", "MI"], ] == response.json["rows"] def test_sql_time_limit(app_client_shorter_time_limit): response = app_client_shorter_time_limit.get("/fixtures.json?sql=select+sleep(0.5)") assert 400 == response.status assert "SQL Interrupted" == response.json["title"] def test_custom_sql_time_limit(app_client): response = app_client.get("/fixtures.json?sql=select+sleep(0.01)") assert 200 == response.status response = app_client.get("/fixtures.json?sql=select+sleep(0.01)&_timelimit=5") assert 400 == response.status assert "SQL Interrupted" == response.json["title"] def test_invalid_custom_sql(app_client): response = app_client.get("/fixtures.json?sql=.schema") assert response.status == 400 assert response.json["ok"] is False assert "Statement must be a SELECT" == response.json["error"] def test_table_json(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_shape=objects") assert response.status == 200 data = response.json assert ( data["query"]["sql"] == "select id, content from simple_primary_key order by id limit 51" ) assert data["query"]["params"] == {} assert data["rows"] == [ {"id": "1", "content": "hello"}, {"id": "2", "content": "world"}, {"id": "3", "content": ""}, {"id": "4", "content": "RENDER_CELL_DEMO"}, ] def test_table_not_exists_json(app_client): assert { "ok": False, "error": "Table not found: blah", "status": 404, "title": None, } == app_client.get("/fixtures/blah.json").json def test_jsono_redirects_to_shape_objects(app_client_with_hash): response_1 = app_client_with_hash.get( "/fixtures/simple_primary_key.jsono", allow_redirects=False ) response = app_client_with_hash.get( response_1.headers["Location"], allow_redirects=False ) assert response.status == 302 assert response.headers["Location"].endswith("?_shape=objects") def test_table_shape_arrays(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_shape=arrays") assert [ ["1", "hello"], ["2", "world"], ["3", ""], ["4", "RENDER_CELL_DEMO"], ] == response.json["rows"] def test_table_shape_arrayfirst(app_client): response = app_client.get( "/fixtures.json?" + urllib.parse.urlencode( { "sql": "select content from simple_primary_key order by id", "_shape": "arrayfirst", } ) ) assert ["hello", "world", "", "RENDER_CELL_DEMO"] == response.json def test_table_shape_objects(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_shape=objects") assert [ {"id": "1", "content": "hello"}, {"id": "2", "content": "world"}, {"id": "3", "content": ""}, {"id": "4", "content": "RENDER_CELL_DEMO"}, ] == response.json["rows"] def test_table_shape_array(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_shape=array") assert [ {"id": "1", "content": "hello"}, {"id": "2", "content": "world"}, {"id": "3", "content": ""}, {"id": "4", "content": "RENDER_CELL_DEMO"}, ] == response.json def test_table_shape_array_nl(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_shape=array&_nl=on") lines = response.text.split("\n") results = [json.loads(line) for line in lines] assert [ {"id": "1", "content": "hello"}, {"id": "2", "content": "world"}, {"id": "3", "content": ""}, {"id": "4", "content": "RENDER_CELL_DEMO"}, ] == results def test_table_shape_invalid(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_shape=invalid") assert { "ok": False, "error": "Invalid _shape: invalid", "status": 400, "title": None, } == response.json def test_table_shape_object(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_shape=object") assert { "1": {"id": "1", "content": "hello"}, "2": {"id": "2", "content": "world"}, "3": {"id": "3", "content": ""}, "4": {"id": "4", "content": "RENDER_CELL_DEMO"}, } == response.json def test_table_shape_object_compound_primary_Key(app_client): response = app_client.get("/fixtures/compound_primary_key.json?_shape=object") assert {"a,b": {"pk1": "a", "pk2": "b", "content": "c"}} == response.json def test_table_with_slashes_in_name(app_client): response = app_client.get( "/fixtures/table%2Fwith%2Fslashes.csv?_shape=objects&_format=json" ) assert response.status == 200 data = response.json assert data["rows"] == [{"pk": "3", "content": "hey"}] def test_table_with_reserved_word_name(app_client): response = app_client.get("/fixtures/select.json?_shape=objects") assert response.status == 200 data = response.json assert data["rows"] == [ { "rowid": 1, "group": "group", "having": "having", "and": "and", "json": '{"href": "http://example.com/", "label":"Example"}', } ] @pytest.mark.parametrize( "path,expected_rows,expected_pages", [ ("/fixtures/no_primary_key.json", 201, 5), ("/fixtures/paginated_view.json", 201, 9), ("/fixtures/no_primary_key.json?_size=25", 201, 9), ("/fixtures/paginated_view.json?_size=50", 201, 5), ("/fixtures/paginated_view.json?_size=max", 201, 3), ("/fixtures/123_starts_with_digits.json", 0, 1), # Ensure faceting doesn't break pagination: ("/fixtures/compound_three_primary_keys.json?_facet=pk1", 1001, 21), # Paginating while sorted by an expanded foreign key should work ( "/fixtures/roadside_attraction_characteristics.json?_size=2&_sort=attraction_id&_labels=on", 5, 3, ), ], ) def test_paginate_tables_and_views(app_client, path, expected_rows, expected_pages): fetched = [] count = 0 while path: response = app_client.get(path) assert 200 == response.status count += 1 fetched.extend(response.json["rows"]) path = response.json["next_url"] if path: assert urllib.parse.urlencode({"_next": response.json["next"]}) in path path = path.replace("http://localhost", "") assert count < 30, "Possible infinite loop detected" assert expected_rows == len(fetched) assert expected_pages == count @pytest.mark.parametrize( "path,expected_error", [ ("/fixtures/no_primary_key.json?_size=-4", "_size must be a positive integer"), ("/fixtures/no_primary_key.json?_size=dog", "_size must be a positive integer"), ("/fixtures/no_primary_key.json?_size=1001", "_size must be <= 100"), ], ) def test_validate_page_size(app_client, path, expected_error): response = app_client.get(path) assert expected_error == response.json["error"] assert 400 == response.status def test_page_size_zero(app_client): "For _size=0 we return the counts, empty rows and no continuation token" response = app_client.get("/fixtures/no_primary_key.json?_size=0") assert 200 == response.status assert [] == response.json["rows"] assert 201 == response.json["filtered_table_rows_count"] assert None is response.json["next"] assert None is response.json["next_url"] def test_paginate_compound_keys(app_client): fetched = [] path = "/fixtures/compound_three_primary_keys.json?_shape=objects" page = 0 while path: page += 1 response = app_client.get(path) fetched.extend(response.json["rows"]) path = response.json["next_url"] if path: path = path.replace("http://localhost", "") assert page < 100 assert 1001 == len(fetched) assert 21 == page # Should be correctly ordered contents = [f["content"] for f in fetched] expected = [r[3] for r in generate_compound_rows(1001)] assert expected == contents def test_paginate_compound_keys_with_extra_filters(app_client): fetched = [] path = ( "/fixtures/compound_three_primary_keys.json?content__contains=d&_shape=objects" ) page = 0 while path: page += 1 assert page < 100 response = app_client.get(path) fetched.extend(response.json["rows"]) path = response.json["next_url"] if path: path = path.replace("http://localhost", "") assert 2 == page expected = [r[3] for r in generate_compound_rows(1001) if "d" in r[3]] assert expected == [f["content"] for f in fetched] @pytest.mark.parametrize( "query_string,sort_key,human_description_en", [ ("_sort=sortable", lambda row: row["sortable"], "sorted by sortable"), ( "_sort_desc=sortable", lambda row: -row["sortable"], "sorted by sortable descending", ), ( "_sort=sortable_with_nulls", lambda row: ( 1 if row["sortable_with_nulls"] is not None else 0, row["sortable_with_nulls"], ), "sorted by sortable_with_nulls", ), ( "_sort_desc=sortable_with_nulls", lambda row: ( 1 if row["sortable_with_nulls"] is None else 0, -row["sortable_with_nulls"] if row["sortable_with_nulls"] is not None else 0, row["content"], ), "sorted by sortable_with_nulls descending", ), # text column contains '$null' - ensure it doesn't confuse pagination: ("_sort=text", lambda row: row["text"], "sorted by text"), ], ) def test_sortable(app_client, query_string, sort_key, human_description_en): path = "/fixtures/sortable.json?_shape=objects&{}".format(query_string) fetched = [] page = 0 while path: page += 1 assert page < 100 response = app_client.get(path) assert human_description_en == response.json["human_description_en"] fetched.extend(response.json["rows"]) path = response.json["next_url"] if path: path = path.replace("http://localhost", "") assert 5 == page expected = list(generate_sortable_rows(201)) expected.sort(key=sort_key) assert [r["content"] for r in expected] == [r["content"] for r in fetched] def test_sortable_and_filtered(app_client): path = ( "/fixtures/sortable.json" "?content__contains=d&_sort_desc=sortable&_shape=objects" ) response = app_client.get(path) fetched = response.json["rows"] assert ( 'where content contains "d" sorted by sortable descending' == response.json["human_description_en"] ) expected = [row for row in generate_sortable_rows(201) if "d" in row["content"]] assert len(expected) == response.json["filtered_table_rows_count"] expected.sort(key=lambda row: -row["sortable"]) assert [r["content"] for r in expected] == [r["content"] for r in fetched] def test_sortable_argument_errors(app_client): response = app_client.get("/fixtures/sortable.json?_sort=badcolumn") assert "Cannot sort table by badcolumn" == response.json["error"] response = app_client.get("/fixtures/sortable.json?_sort_desc=badcolumn2") assert "Cannot sort table by badcolumn2" == response.json["error"] response = app_client.get( "/fixtures/sortable.json?_sort=sortable_with_nulls&_sort_desc=sortable" ) assert "Cannot use _sort and _sort_desc at the same time" == response.json["error"] def test_sortable_columns_metadata(app_client): response = app_client.get("/fixtures/sortable.json?_sort=content") assert "Cannot sort table by content" == response.json["error"] # no_primary_key has ALL sort options disabled for column in ("content", "a", "b", "c"): response = app_client.get("/fixtures/sortable.json?_sort={}".format(column)) assert "Cannot sort table by {}".format(column) == response.json["error"] @pytest.mark.parametrize( "path,expected_rows", [ ( "/fixtures/searchable.json?_search=dog", [ [1, "barry cat", "terry dog", "panther"], [2, "terry dog", "sara weasel", "puma"], ], ), ( # Special keyword shouldn't break FTS query "/fixtures/searchable.json?_search=AND", [], ), ( # Without _searchmode=raw this should return no results "/fixtures/searchable.json?_search=te*+AND+do*", [], ), ( # _searchmode=raw "/fixtures/searchable.json?_search=te*+AND+do*&_searchmode=raw", [ [1, "barry cat", "terry dog", "panther"], [2, "terry dog", "sara weasel", "puma"], ], ), ( "/fixtures/searchable.json?_search=weasel", [[2, "terry dog", "sara weasel", "puma"]], ), ( "/fixtures/searchable.json?_search_text2=dog", [[1, "barry cat", "terry dog", "panther"]], ), ( "/fixtures/searchable.json?_search_name%20with%20.%20and%20spaces=panther", [[1, "barry cat", "terry dog", "panther"]], ), ], ) def test_searchable(app_client, path, expected_rows): response = app_client.get(path) assert expected_rows == response.json["rows"] @pytest.mark.parametrize( "path,expected_rows", [ ( "/fixtures/searchable_view_configured_by_metadata.json?_search=weasel", [[2, "terry dog", "sara weasel", "puma"]], ), # This should return all results because search is not configured: ( "/fixtures/searchable_view.json?_search=weasel", [ [1, "barry cat", "terry dog", "panther"], [2, "terry dog", "sara weasel", "puma"], ], ), ( "/fixtures/searchable_view.json?_search=weasel&_fts_table=searchable_fts&_fts_pk=pk", [[2, "terry dog", "sara weasel", "puma"]], ), ], ) def test_searchable_views(app_client, path, expected_rows): response = app_client.get(path) assert expected_rows == response.json["rows"] def test_searchable_invalid_column(app_client): response = app_client.get("/fixtures/searchable.json?_search_invalid=x") assert 400 == response.status assert { "ok": False, "error": "Cannot search by that column", "status": 400, "title": None, } == response.json @pytest.mark.parametrize( "path,expected_rows", [ ("/fixtures/simple_primary_key.json?content=hello", [["1", "hello"]]), ( "/fixtures/simple_primary_key.json?content__contains=o", [["1", "hello"], ["2", "world"], ["4", "RENDER_CELL_DEMO"]], ), ("/fixtures/simple_primary_key.json?content__exact=", [["3", ""]]), ( "/fixtures/simple_primary_key.json?content__not=world", [["1", "hello"], ["3", ""], ["4", "RENDER_CELL_DEMO"]], ), ], ) def test_table_filter_queries(app_client, path, expected_rows): response = app_client.get(path) assert expected_rows == response.json["rows"] def test_table_filter_queries_multiple_of_same_type(app_client): response = app_client.get( "/fixtures/simple_primary_key.json?content__not=world&content__not=hello" ) assert [["3", ""], ["4", "RENDER_CELL_DEMO"]] == response.json["rows"] @pytest.mark.skipif(not detect_json1(), reason="Requires the SQLite json1 module") def test_table_filter_json_arraycontains(app_client): response = app_client.get("/fixtures/facetable.json?tags__arraycontains=tag1") assert [ [ 1, "2019-01-14 08:00:00", 1, 1, "CA", 1, "Mission", '["tag1", "tag2"]', '[{"foo": "bar"}]', "one", ], [ 2, "2019-01-14 08:00:00", 1, 1, "CA", 1, "Dogpatch", '["tag1", "tag3"]', "[]", "two", ], ] == response.json["rows"] def test_table_filter_extra_where(app_client): response = app_client.get("/fixtures/facetable.json?_where=neighborhood='Dogpatch'") assert [ [ 2, "2019-01-14 08:00:00", 1, 1, "CA", 1, "Dogpatch", '["tag1", "tag3"]', "[]", "two", ] ] == response.json["rows"] def test_table_filter_extra_where_invalid(app_client): response = app_client.get("/fixtures/facetable.json?_where=neighborhood=Dogpatch'") assert 400 == response.status assert "Invalid SQL" == response.json["title"] def test_table_filter_extra_where_disabled_if_no_sql_allowed(): with make_app_client(metadata={"allow_sql": {}}) as client: response = client.get("/fixtures/facetable.json?_where=neighborhood='Dogpatch'") assert 403 == response.status assert "_where= is not allowed" == response.json["error"] def test_table_through(app_client): # Just the museums: response = app_client.get( '/fixtures/roadside_attractions.json?_through={"table":"roadside_attraction_characteristics","column":"characteristic_id","value":"1"}' ) assert [ [ 3, "Burlingame Museum of PEZ Memorabilia", "214 California Drive, Burlingame, CA 94010", 37.5793, -122.3442, ], [ 4, "Bigfoot Discovery Museum", "5497 Highway 9, Felton, CA 95018", 37.0414, -122.0725, ], ] == response.json["rows"] assert ( 'where roadside_attraction_characteristics.characteristic_id = "1"' == response.json["human_description_en"] ) def test_max_returned_rows(app_client): response = app_client.get("/fixtures.json?sql=select+content+from+no_primary_key") data = response.json assert {"sql": "select content from no_primary_key", "params": {}} == data["query"] assert data["truncated"] assert 100 == len(data["rows"]) def test_view(app_client): response = app_client.get("/fixtures/simple_view.json?_shape=objects") assert response.status == 200 data = response.json assert data["rows"] == [ {"upper_content": "HELLO", "content": "hello"}, {"upper_content": "WORLD", "content": "world"}, {"upper_content": "", "content": ""}, {"upper_content": "RENDER_CELL_DEMO", "content": "RENDER_CELL_DEMO"}, ] def test_row(app_client): response = app_client.get("/fixtures/simple_primary_key/1.json?_shape=objects") assert response.status == 200 assert [{"id": "1", "content": "hello"}] == response.json["rows"] def test_row_format_in_querystring(app_client): # regression test for https://github.com/simonw/datasette/issues/563 response = app_client.get( "/fixtures/simple_primary_key/1?_format=json&_shape=objects" ) assert response.status == 200 assert [{"id": "1", "content": "hello"}] == response.json["rows"] def test_row_strange_table_name(app_client): response = app_client.get( "/fixtures/table%2Fwith%2Fslashes.csv/3.json?_shape=objects" ) assert response.status == 200 assert [{"pk": "3", "content": "hey"}] == response.json["rows"] def test_row_foreign_key_tables(app_client): response = app_client.get( "/fixtures/simple_primary_key/1.json?_extras=foreign_key_tables" ) assert response.status == 200 assert [ { "column": "id", "count": 1, "other_column": "foreign_key_with_label", "other_table": "foreign_key_references", }, { "column": "id", "count": 1, "other_column": "f3", "other_table": "complex_foreign_keys", }, { "column": "id", "count": 0, "other_column": "f2", "other_table": "complex_foreign_keys", }, { "column": "id", "count": 1, "other_column": "f1", "other_table": "complex_foreign_keys", }, ] == response.json["foreign_key_tables"] def test_unit_filters(app_client): response = app_client.get( "/fixtures/units.json?distance__lt=75km&frequency__gt=1kHz" ) assert response.status == 200 data = response.json assert data["units"]["distance"] == "m" assert data["units"]["frequency"] == "Hz" assert len(data["rows"]) == 1 assert data["rows"][0][0] == 2 def test_databases_json(app_client_two_attached_databases_one_immutable): response = app_client_two_attached_databases_one_immutable.get("/-/databases.json") databases = response.json assert 2 == len(databases) extra_database, fixtures_database = databases assert "extra database" == extra_database["name"] assert None == extra_database["hash"] assert True == extra_database["is_mutable"] assert False == extra_database["is_memory"] assert "fixtures" == fixtures_database["name"] assert fixtures_database["hash"] is not None assert False == fixtures_database["is_mutable"] assert False == fixtures_database["is_memory"] def test_metadata_json(app_client): response = app_client.get("/-/metadata.json") assert METADATA == response.json def test_threads_json(app_client): response = app_client.get("/-/threads.json") expected_keys = {"threads", "num_threads"} if sys.version_info >= (3, 7, 0): expected_keys.update({"tasks", "num_tasks"}) assert expected_keys == set(response.json.keys()) def test_plugins_json(app_client): response = app_client.get("/-/plugins.json") assert EXPECTED_PLUGINS == sorted(response.json, key=lambda p: p["name"]) # Try with ?all=1 response = app_client.get("/-/plugins.json?all=1") names = {p["name"] for p in response.json} assert names.issuperset(p["name"] for p in EXPECTED_PLUGINS) assert names.issuperset(DEFAULT_PLUGINS) def test_versions_json(app_client): response = app_client.get("/-/versions.json") assert "python" in response.json assert "3.0" == response.json.get("asgi") assert "version" in response.json["python"] assert "full" in response.json["python"] assert "datasette" in response.json assert "version" in response.json["datasette"] assert "sqlite" in response.json assert "version" in response.json["sqlite"] assert "fts_versions" in response.json["sqlite"] assert "compile_options" in response.json["sqlite"] def test_config_json(app_client): response = app_client.get("/-/config.json") assert { "default_page_size": 50, "default_facet_size": 30, "facet_suggest_time_limit_ms": 50, "facet_time_limit_ms": 200, "max_returned_rows": 100, "sql_time_limit_ms": 200, "allow_download": True, "allow_facet": True, "suggest_facets": True, "default_cache_ttl": 5, "default_cache_ttl_hashed": 365 * 24 * 60 * 60, "num_sql_threads": 3, "cache_size_kb": 0, "allow_csv_stream": True, "max_csv_mb": 100, "truncate_cells_html": 2048, "force_https_urls": False, "hash_urls": False, "template_debug": False, "base_url": "/", } == response.json def test_page_size_matching_max_returned_rows( app_client_returned_rows_matches_page_size, ): fetched = [] path = "/fixtures/no_primary_key.json" while path: response = app_client_returned_rows_matches_page_size.get(path) fetched.extend(response.json["rows"]) assert len(response.json["rows"]) in (1, 50) path = response.json["next_url"] if path: path = path.replace("http://localhost", "") assert 201 == len(fetched) @pytest.mark.parametrize( "path,expected_facet_results", [ ( "/fixtures/facetable.json?_facet=state&_facet=city_id", { "state": { "name": "state", "hideable": True, "type": "column", "toggle_url": "/fixtures/facetable.json?_facet=city_id", "results": [ { "value": "CA", "label": "CA", "count": 10, "toggle_url": "_facet=state&_facet=city_id&state=CA", "selected": False, }, { "value": "MI", "label": "MI", "count": 4, "toggle_url": "_facet=state&_facet=city_id&state=MI", "selected": False, }, { "value": "MC", "label": "MC", "count": 1, "toggle_url": "_facet=state&_facet=city_id&state=MC", "selected": False, }, ], "truncated": False, }, "city_id": { "name": "city_id", "hideable": True, "type": "column", "toggle_url": "/fixtures/facetable.json?_facet=state", "results": [ { "value": 1, "label": "San Francisco", "count": 6, "toggle_url": "_facet=state&_facet=city_id&city_id=1", "selected": False, }, { "value": 2, "label": "Los Angeles", "count": 4, "toggle_url": "_facet=state&_facet=city_id&city_id=2", "selected": False, }, { "value": 3, "label": "Detroit", "count": 4, "toggle_url": "_facet=state&_facet=city_id&city_id=3", "selected": False, }, { "value": 4, "label": "Memnonia", "count": 1, "toggle_url": "_facet=state&_facet=city_id&city_id=4", "selected": False, }, ], "truncated": False, }, }, ), ( "/fixtures/facetable.json?_facet=state&_facet=city_id&state=MI", { "state": { "name": "state", "hideable": True, "type": "column", "toggle_url": "/fixtures/facetable.json?_facet=city_id&state=MI", "results": [ { "value": "MI", "label": "MI", "count": 4, "selected": True, "toggle_url": "_facet=state&_facet=city_id", } ], "truncated": False, }, "city_id": { "name": "city_id", "hideable": True, "type": "column", "toggle_url": "/fixtures/facetable.json?_facet=state&state=MI", "results": [ { "value": 3, "label": "Detroit", "count": 4, "selected": False, "toggle_url": "_facet=state&_facet=city_id&state=MI&city_id=3", } ], "truncated": False, }, }, ), ( "/fixtures/facetable.json?_facet=planet_int", { "planet_int": { "name": "planet_int", "hideable": True, "type": "column", "toggle_url": "/fixtures/facetable.json", "results": [ { "value": 1, "label": 1, "count": 14, "selected": False, "toggle_url": "_facet=planet_int&planet_int=1", }, { "value": 2, "label": 2, "count": 1, "selected": False, "toggle_url": "_facet=planet_int&planet_int=2", }, ], "truncated": False, } }, ), ( # planet_int is an integer field: "/fixtures/facetable.json?_facet=planet_int&planet_int=1", { "planet_int": { "name": "planet_int", "hideable": True, "type": "column", "toggle_url": "/fixtures/facetable.json?planet_int=1", "results": [ { "value": 1, "label": 1, "count": 14, "selected": True, "toggle_url": "_facet=planet_int", } ], "truncated": False, } }, ), ], ) def test_facets(app_client, path, expected_facet_results): response = app_client.get(path) facet_results = response.json["facet_results"] # We only compare the querystring portion of the taggle_url for facet_name, facet_info in facet_results.items(): assert facet_name == facet_info["name"] assert False is facet_info["truncated"] for facet_value in facet_info["results"]: facet_value["toggle_url"] = facet_value["toggle_url"].split("?")[1] assert expected_facet_results == facet_results def test_suggested_facets(app_client): suggestions = [ { "name": suggestion["name"], "querystring": suggestion["toggle_url"].split("?")[-1], } for suggestion in app_client.get("/fixtures/facetable.json").json[ "suggested_facets" ] ] expected = [ {"name": "created", "querystring": "_facet=created"}, {"name": "planet_int", "querystring": "_facet=planet_int"}, {"name": "on_earth", "querystring": "_facet=on_earth"}, {"name": "state", "querystring": "_facet=state"}, {"name": "city_id", "querystring": "_facet=city_id"}, {"name": "neighborhood", "querystring": "_facet=neighborhood"}, {"name": "tags", "querystring": "_facet=tags"}, {"name": "complex_array", "querystring": "_facet=complex_array"}, {"name": "created", "querystring": "_facet_date=created"}, ] if detect_json1(): expected.append({"name": "tags", "querystring": "_facet_array=tags"}) assert expected == suggestions def test_allow_facet_off(): with make_app_client(config={"allow_facet": False}) as client: assert 400 == client.get("/fixtures/facetable.json?_facet=planet_int").status # Should not suggest any facets either: assert [] == client.get("/fixtures/facetable.json").json["suggested_facets"] def test_suggest_facets_off(): with make_app_client(config={"suggest_facets": False}) as client: # Now suggested_facets should be [] assert [] == client.get("/fixtures/facetable.json").json["suggested_facets"] def test_expand_labels(app_client): response = app_client.get( "/fixtures/facetable.json?_shape=object&_labels=1&_size=2" "&neighborhood__contains=c" ) assert { "2": { "pk": 2, "created": "2019-01-14 08:00:00", "planet_int": 1, "on_earth": 1, "state": "CA", "city_id": {"value": 1, "label": "San Francisco"}, "neighborhood": "Dogpatch", "tags": '["tag1", "tag3"]', "complex_array": "[]", "distinct_some_null": "two", }, "13": { "pk": 13, "created": "2019-01-17 08:00:00", "planet_int": 1, "on_earth": 1, "state": "MI", "city_id": {"value": 3, "label": "Detroit"}, "neighborhood": "Corktown", "tags": "[]", "complex_array": "[]", "distinct_some_null": None, }, } == response.json def test_expand_label(app_client): response = app_client.get( "/fixtures/foreign_key_references.json?_shape=object" "&_label=foreign_key_with_label&_size=1" ) assert { "1": { "pk": "1", "foreign_key_with_label": {"value": "1", "label": "hello"}, "foreign_key_with_no_label": "1", } } == response.json @pytest.mark.parametrize( "path,expected_cache_control", [ ("/fixtures/facetable.json", "max-age=5"), ("/fixtures/facetable.json?_ttl=invalid", "max-age=5"), ("/fixtures/facetable.json?_ttl=10", "max-age=10"), ("/fixtures/facetable.json?_ttl=0", "no-cache"), ], ) def test_ttl_parameter(app_client, path, expected_cache_control): response = app_client.get(path) assert expected_cache_control == response.headers["Cache-Control"] @pytest.mark.parametrize( "path,expected_redirect", [ ("/fixtures/facetable.json?_hash=1", "/fixtures-HASH/facetable.json"), ( "/fixtures/facetable.json?city_id=1&_hash=1", "/fixtures-HASH/facetable.json?city_id=1", ), ], ) def test_hash_parameter( app_client_two_attached_databases_one_immutable, path, expected_redirect ): # First get the current hash for the fixtures database current_hash = app_client_two_attached_databases_one_immutable.ds.databases[ "fixtures" ].hash[:7] response = app_client_two_attached_databases_one_immutable.get( path, allow_redirects=False ) assert response.status == 302 location = response.headers["Location"] assert expected_redirect.replace("HASH", current_hash) == location def test_hash_parameter_ignored_for_mutable_databases(app_client): path = "/fixtures/facetable.json?_hash=1" response = app_client.get(path, allow_redirects=False) assert response.status == 200 test_json_columns_default_expected = [ {"intval": 1, "strval": "s", "floatval": 0.5, "jsonval": '{"foo": "bar"}'} ] @pytest.mark.parametrize( "extra_args,expected", [ ("", test_json_columns_default_expected), ("&_json=intval", test_json_columns_default_expected), ("&_json=strval", test_json_columns_default_expected), ("&_json=floatval", test_json_columns_default_expected), ( "&_json=jsonval", [{"intval": 1, "strval": "s", "floatval": 0.5, "jsonval": {"foo": "bar"}}], ), ], ) def test_json_columns(app_client, extra_args, expected): sql = """ select 1 as intval, "s" as strval, 0.5 as floatval, '{"foo": "bar"}' as jsonval """ path = "/fixtures.json?" + urllib.parse.urlencode({"sql": sql, "_shape": "array"}) path += extra_args response = app_client.get(path) assert expected == response.json def test_config_cache_size(app_client_larger_cache_size): response = app_client_larger_cache_size.get("/fixtures/pragma_cache_size.json") assert [[-2500]] == response.json["rows"] def test_config_force_https_urls(): with make_app_client(config={"force_https_urls": True}) as client: response = client.get("/fixtures/facetable.json?_size=3&_facet=state") assert response.json["next_url"].startswith("https://") assert response.json["facet_results"]["state"]["results"][0][ "toggle_url" ].startswith("https://") assert response.json["suggested_facets"][0]["toggle_url"].startswith("https://") # Also confirm that request.url and request.scheme are set correctly response = client.get("/") assert client.ds._last_request.url.startswith("https://") assert client.ds._last_request.scheme == "https" def test_infinity_returned_as_null(app_client): response = app_client.get("/fixtures/infinity.json?_shape=array") assert [ {"rowid": 1, "value": None}, {"rowid": 2, "value": None}, {"rowid": 3, "value": 1.5}, ] == response.json def test_infinity_returned_as_invalid_json_if_requested(app_client): response = app_client.get("/fixtures/infinity.json?_shape=array&_json_infinity=1") assert [ {"rowid": 1, "value": float("inf")}, {"rowid": 2, "value": float("-inf")}, {"rowid": 3, "value": 1.5}, ] == response.json def test_custom_query_with_unicode_characters(app_client): response = app_client.get("/fixtures/𝐜𝐢𝐭𝐢𝐞𝐬.json?_shape=array") assert [{"id": 1, "name": "San Francisco"}] == response.json def test_trace(app_client): response = app_client.get("/fixtures/simple_primary_key.json?_trace=1") data = response.json assert "_trace" in data trace_info = data["_trace"] assert isinstance(trace_info["request_duration_ms"], float) assert isinstance(trace_info["sum_trace_duration_ms"], float) assert isinstance(trace_info["num_traces"], int) assert isinstance(trace_info["traces"], list) assert len(trace_info["traces"]) == trace_info["num_traces"] for trace in trace_info["traces"]: assert isinstance(trace["type"], str) assert isinstance(trace["start"], float) assert isinstance(trace["end"], float) assert trace["duration_ms"] == (trace["end"] - trace["start"]) * 1000 assert isinstance(trace["traceback"], list) assert isinstance(trace["database"], str) assert isinstance(trace["sql"], str) assert isinstance(trace["params"], (list, dict, None.__class__)) @pytest.mark.parametrize( "path,status_code", [ ("/fixtures.json", 200), ("/fixtures/no_primary_key.json", 200), # A 400 invalid SQL query should still have the header: ("/fixtures.json?sql=select+blah", 400), ], ) def test_cors(app_client_with_cors, path, status_code): response = app_client_with_cors.get(path) assert response.status == status_code assert "*" == response.headers["Access-Control-Allow-Origin"] @pytest.mark.parametrize( "path", ( "/", ".json", "/searchable", "/searchable.json", "/searchable_view", "/searchable_view.json", ), ) def test_database_with_space_in_name(app_client_two_attached_databases, path): response = app_client_two_attached_databases.get("/extra database" + path) assert response.status == 200 def test_common_prefix_database_names(app_client_conflicting_database_names): # https://github.com/simonw/datasette/issues/597 assert ["fixtures", "foo", "foo-bar"] == [ d["name"] for d in app_client_conflicting_database_names.get("/-/databases.json").json ] for db_name, path in (("foo", "/foo.json"), ("foo-bar", "/foo-bar.json")): data = app_client_conflicting_database_names.get(path).json assert db_name == data["database"] def test_null_foreign_keys_are_not_expanded(app_client): response = app_client.get( "/fixtures/foreign_key_references.json?_shape=array&_labels=on" ) assert [ { "pk": "1", "foreign_key_with_label": {"value": "1", "label": "hello"}, "foreign_key_with_no_label": {"value": "1", "label": "1"}, }, {"pk": "2", "foreign_key_with_label": None, "foreign_key_with_no_label": None,}, ] == response.json def test_inspect_file_used_for_count(app_client_immutable_and_inspect_file): response = app_client_immutable_and_inspect_file.get("/fixtures/sortable.json") assert response.json["filtered_table_rows_count"] == 100
the-stack_0_14269
""" Background Music Example If Python and Arcade are installed, this example can be run from the command line with: python -m arcade.examples.background_music """ import time import arcade SCREEN_WIDTH = 600 SCREEN_HEIGHT = 300 SCREEN_TITLE = "Starting Template Simple" MUSIC_VOLUME = 0.5 class MyGame(arcade.Window): """Main application class.""" def __init__(self, width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHITE) # Variables used to manage our music. See setup() for giving them # values. self.music_list = [] self.current_song_index = 0 self.current_player = None self.music = None def advance_song(self): """Advance our pointer to the next song. This does NOT start the song.""" self.current_song_index += 1 if self.current_song_index >= len(self.music_list): self.current_song_index = 0 print(f"Advancing song to {self.current_song_index}.") def play_song(self): """Play the song.""" # Stop what is currently playing. if self.music: self.music.stop(self.current_player) # Play the next song print(f"Playing {self.music_list[self.current_song_index]}") self.music = arcade.Sound( self.music_list[self.current_song_index], streaming=True ) self.current_player = self.music.play(MUSIC_VOLUME) # This is a quick delay. If we don't do this, our elapsed time is 0.0 # and on_update will think the music is over and advance us to the next # song before starting this one. time.sleep(0.03) def setup(self): """Set up the game here. Call this function to restart the game.""" # List of music self.music_list = [ ":resources:music/funkyrobot.mp3", ":resources:music/1918.mp3", ] # Array index of what to play self.current_song_index = 0 # Play the song self.play_song() def on_draw(self): """Render the screen.""" arcade.start_render() position = self.music.get_stream_position(self.current_player) length = self.music.get_length() size = 20 margin = size * 0.5 # Print time elapsed and total y = SCREEN_HEIGHT - (size + margin) text = f"{int(position) // 60}:{int(position) % 60:02} of {int(length) // 60}:{int(length) % 60:02}" arcade.draw_text(text, 0, y, arcade.csscolor.BLACK, size) # Print current song y -= size + margin text = f"Currently playing: {self.music_list[self.current_song_index]}" arcade.draw_text(text, 0, y, arcade.csscolor.BLACK, size) def on_update(self, dt): position = self.music.get_stream_position(self.current_player) # The position pointer is reset to 0 right after we finish the song. # This makes it very difficult to figure out if we just started playing # or if we are doing playing. if position == 0.0: self.advance_song() self.play_song() def main(): """Main method""" window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if __name__ == "__main__": main()
the-stack_0_14270
from enum import Enum class KlineInterval(str, Enum): ONE_MINUTE = '1m' THREE_MINUTES = '3m' FIVE_MINUTES = '5m' FIFTEEN_MINUTES = '15m' THIRTY_MINUTES = '30m' ONE_HOUR = '1h' TWO_HOURS = '2h' FOUR_HOURS = '4h' SIX_HOURS = '6h' EIGHT_HOURS = '8h' TWELVE_HOURS = '12h' ONE_DAY = '1d' THREE_DAYS = '3d' ONE_WEEK = '1w' ONE_MONTH = '1M' class OrderStatus(str, Enum): ACK = 'Ack' PARTIAL_FILL = 'PartialFill' IOC_NO_FILL = 'IocNoFill' FULLY_FILL = 'FullyFill' CANCELED = 'Canceled' EXPIRED = 'Expired' FAILED_BLOCKING = 'FailedBlocking' FAILED_MATCHING = 'FailedMatching' class OrderSide(str, Enum): BUY = 'buy' SELL = 'sell' class TimeInForce(str, Enum): GOOD_TILL_EXPIRE = "GTE" IMMEDIATE_OR_CANCEL = "IOC" class TransactionSide(str, Enum): RECEIVE = 'RECEIVE' SEND = 'SEND' class TransactionType(str, Enum): NEW_ORDER = 'NEW_ORDER' ISSUE_TOKEN = 'ISSUE_TOKEN' BURN_TOKEN = 'BURN_TOKEN' LIST_TOKEN = 'LIST_TOKEN' CANCEL_ORDER = 'CANCEL_ORDER' FREEZE_TOKEN = 'FREEZE_TOKEN' UN_FREEZE_TOKEN = 'UN_FREEZE_TOKEN' TRANSFER = 'TRANSFER' PROPOSAL = 'PROPOSAL' VOTE = 'VOTE' class OrderType(str, Enum): LIMIT = "LIMIT" class PeerType(str, Enum): NODE = 'node' WEBSOCKET = 'ws' class RpcBroadcastRequestType(int, Enum): SYNC = 1 ASYNC = 2 COMMIT = 3
the-stack_0_14273
# Copyright 2014-2020 Chris Cummins <[email protected]>. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility code for working with sqlalchemy.""" import contextlib import os import pathlib import queue import sqlite3 import sys import threading import time import typing from typing import Callable from typing import List from typing import Optional import sqlalchemy as sql from absl import flags as absl_flags from sqlalchemy import func from sqlalchemy import orm from sqlalchemy.dialects import mysql from sqlalchemy.ext import declarative from labm8.py import humanize from labm8.py import labdate from labm8.py import pbutil from labm8.py import progress from labm8.py import text from labm8.py.internal import labm8_logging as logging FLAGS = absl_flags.FLAGS absl_flags.DEFINE_boolean( "sqlutil_echo", False, "If True, the Engine will log all statements as well as a repr() of their " "parameter lists to the engines logger, which defaults to sys.stdout.", ) absl_flags.DEFINE_boolean( "sqlutil_pool_pre_ping", True, "Enable pessimistic pre-ping to check that database connections are " "alive. This adds some overhead, but reduces the risk of " '"server has gone away" errors. See:' "<https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic>", ) absl_flags.DEFINE_integer( "mysql_engine_pool_size", 5, "The number of connections to keep open inside the connection pool. A " "--mysql_engine_pool_size of 0 indicates no limit", ) absl_flags.DEFINE_integer( "mysql_engine_max_overflow", 10, "The number of connections to allow in connection pool “overflow”, that " "is connections that can be opened above and beyond the " "--mysql_engine_pool_size setting", ) absl_flags.DEFINE_boolean( "mysql_assume_utf8_charset", True, "Default to adding the '?charset=utf8' suffix to MySQL database URLs.", ) absl_flags.DEFINE_boolean( "sqlite_enable_foreign_keys", True, "Enable foreign key support for SQLite. This enforces foreign key " "constraints, and enables cascaded update/delete statements. See: " "https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#foreign-key-support", ) # The Query type is returned by Session.query(). This is a convenience for type # annotations. Query = orm.query.Query class DatabaseNotFound(FileNotFoundError): """An error that is raised if the requested database cannot be found.""" def __init__(self, url: str): self._url = url @property def url(self): return self._url def __repr__(self) -> str: return f"Database not found: '{self.url}'" def __str__(self) -> str: return repr(self) def Base(*args, **kwargs) -> sql.ext.declarative.DeclarativeMeta: """Construct a base class for declarative class definitions.""" return sql.ext.declarative.declarative_base(*args, **kwargs) def GetOrAdd( session: sql.orm.session.Session, model, defaults: typing.Dict[str, object] = None, **kwargs, ): """Instantiate a mapped database object. If the object is not in the database, add it. Note that no change is written to disk until commit() is called on the session. Args: session: The database session. model: The database table class. defaults: Default values for mapped objects. kwargs: The values for the table row. Returns: An instance of the model class, with the values specified. """ instance = session.query(model).filter_by(**kwargs).first() if not instance: params = { k: v for k, v in kwargs.items() if not isinstance(v, sql.sql.expression.ClauseElement) } params.update(defaults or {}) instance = model(**params) session.add(instance) logging.Log( logging.GetCallingModuleName(), 5, "New record: %s(%s)", model.__name__, params, ) return instance def Get( session: sql.orm.session.Session, model, defaults: typing.Dict[str, object] = None, **kwargs, ): """Determine if a database object exists. Args: session: The database session. model: The database table class. defaults: Default values for mapped objects. kwargs: The values for the table row. Returns: An instance of the model class with the values specified, or None if the object is not in the database. """ del defaults return session.query(model).filter_by(**kwargs).first() def CreateEngine(url: str, must_exist: bool = False) -> sql.engine.Engine: """Create an sqlalchemy database engine. This is a convenience wrapper for creating an sqlalchemy engine, that also creates the database if required, and checks that the database exists. This means that it is less flexible than SqlAlchemy's create_engine() - only three combination of dialects and drivers are supported: sqlite, mysql, and postgresql. See https://docs.sqlalchemy.org/en/latest/core/engines.html for details. Additionally, this implements a custom 'file://' handler, which reads a URL from a local file, and returns a connection to the database addressed by the URL. Use this if you would like to keep sensitive information such as a MySQL database password out of your .bash_history. Examples: Create in-memory SQLite database: >>> engine = CreateEngine('sqlite://') Connect to an SQLite database at relative.db: >>> engine = CreateEngine('sqlite:///relative.db') Connect to an SQLite database at /absolute/path/to/db: >>> engine = CreateEngine('sqlite:////absolute/path/to/db') Connect to MySQL database: >>> engine = CreateEngine( 'mysql://bob:password@localhost:1234/database?charset=utf8') Connect to PostgreSQL database: >>> engine.CreateEngine( 'postgresql://bob:password@localhost:1234/database') Connect to a URL specified in the file /tmp/url.txt: >>> engine.CreateEngine('file:///tmp/url.txt') Connect to a URL specified in the file /tmp/url.txt, with the suffix '/database?charset=utf8': >>> engine.CreateEngine('file:///tmp/url.txt?/database?charset=utf8') Args: url: The URL of the database to connect to. must_exist: If True, raise DatabaseNotFound if it doesn't exist. Else, database is created if it doesn't exist. Returns: An SQLalchemy Engine instance. Raises: DatabaseNotFound: If the database does not exist and must_exist is set. ValueError: If the datastore backend is not supported. """ engine_args = {} # Read and expand a `file://` prefixed URL. url = ResolveUrl(url) if url.startswith("mysql://"): # Support for MySQL dialect. # We create a throwaway engine that we use to check if the requested # database exists. engine = sql.create_engine("/".join(url.split("/")[:-1])) database = url.split("/")[-1].split("?")[0] query = engine.execute( sql.text( "SELECT SCHEMA_NAME FROM " "INFORMATION_SCHEMA.SCHEMATA WHERE " "SCHEMA_NAME = :database", ), database=database, ) # Engine-specific options. engine_args["pool_size"] = FLAGS.mysql_engine_pool_size engine_args["max_overflow"] = FLAGS.mysql_engine_max_overflow if not query.first(): if must_exist: raise DatabaseNotFound(url) else: # We can't use sql.text() escaping here because it uses single quotes # for escaping. MySQL only accepts backticks for quoting database # names. engine.execute(f"CREATE DATABASE `{database}`") engine.dispose() elif url.startswith("sqlite://"): # Support for SQLite dialect. # This project (phd) deliberately disallows relative paths due to Bazel # sandboxing. if url != "sqlite://" and not url.startswith("sqlite:////"): raise ValueError("Relative path to SQLite database is not allowed") if url == "sqlite://": if must_exist: raise ValueError( "must_exist=True not valid for in-memory SQLite database", ) else: path = pathlib.Path(url[len("sqlite:///") :]) if must_exist: if not path.is_file(): raise DatabaseNotFound(url) else: # Make the parent directory for SQLite database if creating a new # database. path.parent.mkdir(parents=True, exist_ok=True) elif url.startswith("postgresql://"): # Support for PostgreSQL dialect. engine = sql.create_engine("/".join(url.split("/")[:-1] + ["postgres"])) conn = engine.connect() database = url.split("/")[-1] query = conn.execute( sql.text("SELECT 1 FROM pg_database WHERE datname = :database"), database=database, ) if not query.first(): if must_exist: raise DatabaseNotFound(url) else: # PostgreSQL does not let you create databases within a transaction, so # manually complete the transaction before creating the database. conn.execute(sql.text("COMMIT")) # PostgreSQL does not allow single quoting of database names. conn.execute(f"CREATE DATABASE {database}") conn.close() engine.dispose() else: raise ValueError(f"Unsupported database URL='{url}'") # Create the engine. engine = sql.create_engine( url, encoding="utf-8", echo=FLAGS.sqlutil_echo, pool_pre_ping=FLAGS.sqlutil_pool_pre_ping, **engine_args, ) # Create and immediately close a connection. This is because SQLAlchemy engine # is lazily instantiated, so for connections such as SQLite, this line # actually creates the file. engine.connect().close() return engine @sql.event.listens_for(sql.engine.Engine, "connect") def EnableSqliteForeignKeysCallback(dbapi_connection, connection_record): """Enable foreign key constraints for SQLite databases. See --sqlite_enable_foreign_keys for details. """ del connection_record # This callback listens for *all* database connections, not just SQLite. Check # the type before trying to run an SQLite-specific pragma. if FLAGS.sqlite_enable_foreign_keys and isinstance( dbapi_connection, sqlite3.Connection ): cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() def ResolveUrl(url: str, use_flags: bool = True): """Resolve the URL of a database. The following modifications are supported: * If the url begins with 'file://', the URL is substituted with the contents of the file. * If --mysql_assume_utf8_charset is set, then '?charset=utf8' suffix is appended to URLs which begin with mysql://. * Shell variables are expanded. Args: url: The URL to expand, e.g. `file://path/to/file.txt?arg' use_flags: Determine whether behaviour is dictated by the FLAGS variables. Set this to False only when resolving database URLs before flags parsing, e.g. in enumerating test fixtures. Returns: The URL as interpreted by reading any URL file. Raises: ValueError: If the file path is invalid. FileNotFoundError: IF the file path does not exist. """ # Substitute shell variables. url = os.path.expandvars(url) if url.startswith("file://"): # Split the URL into the file path, and the optional suffix. components = url.split("?") path, suffix = components[0], "?".join(components[1:]) # Strip the file:// prefix from the path. path = pathlib.Path(path[len("file://") :]) if not path.is_absolute(): raise ValueError("Relative path to file:// is not allowed") if not path.is_file(): raise FileNotFoundError(f"File '{path}' not found") # Read the contents of the file, ignoring lines starting with '#'. with open(path) as f: url = "\n".join( x for x in f.read().split("\n") if not x.lstrip().startswith("#") ).strip() # Append the suffix. url += suffix if ( use_flags and url.startswith("mysql://") and FLAGS.mysql_assume_utf8_charset ): url += "?charset=utf8" return url def ColumnNames(model) -> typing.List[str]: """Return the names of all columns in a mapped object. Args: model: A mapped class. Returns: A list of string column names in the order that they are declared. """ try: inst = sql.inspect(model) return [c_attr.key for c_attr in inst.mapper.column_attrs] except sql.exc.NoInspectionAvailable as e: raise TypeError(str(e)) class Session(orm.session.Session): """A subclass of the default SQLAlchemy Session with added functionality. An instance of this class is returned by Database.Session(). """ def GetOrAdd( self, model, defaults: typing.Dict[str, object] = None, **kwargs ): """Instantiate a mapped database object. If the object is not in the database, add it. Note that no change is written to disk until commit() is called on the session. Args: model: The database table class. defaults: Default values for mapped objects. kwargs: The values for the table row. Returns: An instance of the model class, with the values specified. """ return GetOrAdd(self, model, defaults, **kwargs) class Database(object): """A base class for implementing databases.""" SessionType = Session def __init__(self, url: str, declarative_base, must_exist: bool = False): """Instantiate a database object. Example: >>> db = Database('sqlite:////tmp/foo.db', sqlalchemy.ext.declarative.declarative_base()) Args: url: The URL of the database to connect to. declarative_base: The SQLAlchemy declarative base instance. must_exist: If True, raise DatabaseNotFound if it doesn't exist. Else, database is created if it doesn't exist. Raises: DatabaseNotFound: If the database does not exist and must_exist is set. ValueError: If the datastore backend is not supported. """ self._url = url self.engine = CreateEngine(url, must_exist=must_exist) declarative_base.metadata.create_all(self.engine) declarative_base.metadata.bind = self.engine # Bind the Engine to a session maker, which instantiates our own Session # class, which is a subclass of the default SQLAlchemy Session with added # functionality. self.MakeSession = orm.sessionmaker(bind=self.engine, class_=Session) def Close(self) -> None: """Close the connection to the database. Use this to free up the connection to a database, while keeping the database instance around. After calling this method, attempting to run operations on this database will raise an error (like a sqlalchemy.exc.OperationalError). Usage of this method is generally discouraged - connections are automatically closed up when a database instance is garbage collected, so there are rarely cases for leaving a database instance around with the connection closed. Use at your peril! """ self.engine.dispose() def Drop(self, are_you_sure_about_this_flag: bool = False): """Drop the database, irreverisbly destroying it. Be careful with this! After calling this method an a Database instance, no further operations can be made on it, and any Sessions should be discarded. Args: are_you_sure_about_this_flag: You should be sure. Raises: ValueError: In case you're not 100% sure. """ if not are_you_sure_about_this_flag: raise ValueError("Let's take a minute to think things over") if self.url.startswith("mysql://"): engine = sql.create_engine("/".join(self.url.split("/")[:-1])) database = self.url.split("/")[-1].split("?")[0] logging.Log(logging.GetCallingModuleName(), 1, "database %s", database) engine.execute(f"DROP DATABASE IF EXISTS `{database}`") elif self.url == "sqlite://": # In-memory databases do not dropping. pass elif self.url.startswith("sqlite:///"): path = pathlib.Path(self.url[len("sqlite:///") :]) assert path.is_file() path.unlink() else: raise NotImplementedError( f"Unsupported operation DROP for database: '{self.url}'", ) @property def url(self) -> str: """Return the URL of the database.""" return self._url @contextlib.contextmanager def Session( self, commit: bool = False, session: Optional[Session] = None ) -> Session: """Provide a transactional scope around a session. The optional session argument may be used for cases where you want to optionally re-use an existing session, rather than always creating a new session, e.g.: class MyDatabase(sqlutil.Database): def DoAThing(self, session=None): with self.Session(session=session, commit=True): # go nuts ... Args: commit: If true, commit session at the end of scope. session: An existing session object to re-use. Returns: A database session. """ session = session or self.MakeSession() try: yield session if commit: session.commit() except: session.rollback() raise finally: session.close() @property def Random(self): """Get the backend-specific random function. This can be used to select a random row from a table, e.g. session.query(Table).order_by(db.Random()).first() """ if self.url.startswith("mysql"): return func.rand else: return func.random # for PostgreSQL, SQLite def __repr__(self) -> str: return self.url class TablenameFromClassNameMixin(object): """A class mixin which derives __tablename__ from the class name. Add this mixin to a mapped table class to automatically set the set the __tablename__ property of a class to the lowercase name of the Python class. """ @declarative.declared_attr def __tablename__(self): return self.__name__.lower() class TablenameFromCamelCapsClassNameMixin(object): """A class mixin which derives __tablename__ from the class name. Add this mixin to a mapped table class to automatically set the set the __tablename__ property of a class to the name of the Python class with camel caps converted to underscores, e.g. class FooBar -> table "foo_bar". """ @declarative.declared_attr def __tablename__(self): return text.CamelCapsToUnderscoreSeparated(self.__name__) class PluralTablenameFromCamelCapsClassNameMixin(object): """A class mixin which derives __tablename__ from the class name. Add this mixin to a mapped table class to automatically set the set the __tablename__ property of a class to the pluralized name of the Python class with camel caps converted to underscores, e.g. class FooBar -> table "foo_bars". """ @declarative.declared_attr def __tablename__(self): pluralised = humanize.Plural(2, self.__name__) pluralised = " ".join(pluralised.split()[1:]) return text.CamelCapsToUnderscoreSeparated(pluralised) class ProtoBackedMixin(object): """A database table backed by protocol buffers. This class provides the abstract interface for sqlalchemy table classes which support serialization to and from protocol buffers. This is only an interface - inheriting classes must still inherit from sqlalchemy.ext.declarative.declarative_base(). """ proto_t = None def SetProto(self, proto: pbutil.ProtocolBuffer) -> None: """Set the fields of a protocol buffer with the values from the instance. Args: proto: A protocol buffer. """ raise NotImplementedError( f"{type(self).__name__}.SetProto() not implemented", ) def ToProto(self) -> pbutil.ProtocolBuffer: """Serialize the instance to protocol buffer. Returns: A protocol buffer. """ proto = self.proto_t() self.SetProto(proto) return proto @classmethod def FromProto( cls, proto: pbutil.ProtocolBuffer, ) -> typing.Dict[str, typing.Any]: """Return a dictionary of instance constructor args from proto. Examples: Construct a table instance from proto: >>> table = Table(**Table.FromProto(proto)) Construct a table instance and add to session: >>> session.GetOrAdd(Table, **Table.FromProto(proto)) Args: proto: A protocol buffer. Returns: A dictionary of constructor arguments. """ raise NotImplementedError( f"{type(self).__name__}.FromProto() not implemented", ) @classmethod def FromFile(cls, path: pathlib.Path) -> typing.Dict[str, typing.Any]: """Return a dictionary of instance constructor args from proto file. Examples: Construct a table instance from proto file: >>> table = Table(**Table.FromFile(path)) Construct a table instance and add to session: >>> session.GetOrAdd(Table, **Table.FromFile(path)) Args: path: Path to a proto file. Returns: An instance. """ proto = pbutil.FromFile(path, cls.proto_t()) return cls.FromProto(proto) class OffsetLimitQueryResultsBatch(typing.NamedTuple): """The results of an offset-limit batched query.""" # The current batch number. batch_num: int # Offset into the results set. offset: int # Limit is the last row in the results set. limit: int # The total number of rows in the query if compute_max_rows=True, else None. max_rows: int # The results of the query. rows: typing.List[typing.Any] def OffsetLimitBatchedQuery( query: Query, batch_size: int = 1000, start_at: int = 0, compute_max_rows: bool = False, ) -> typing.Iterator[OffsetLimitQueryResultsBatch]: """Split and return the rows resulting from a query in to batches. This iteratively runs the query `SELECT * FROM * OFFSET i LIMIT batch_size;` with `i` initialized to `start_at` and increasing by `batch_size` per iteration. Iteration terminates when the query returns no rows. This function is useful for returning row sets from enormous tables, where loading the full query results in to memory would take prohibitive time or resources. Args: query: The query to run. batch_size: The number of rows to return per batch. start_at: The initial offset into the table. compute_max_rows: If true Returns: A generator of OffsetLimitQueryResultsBatch tuples, where each tuple contains between 1 <= x <= `batch_size` rows. """ max_rows = None if compute_max_rows: max_rows = query.count() batch_num = 0 i = start_at while True: batch_num += 1 batch = query.offset(i).limit(batch_size).all() if batch: yield OffsetLimitQueryResultsBatch( batch_num=batch_num, offset=i, limit=i + batch_size, max_rows=max_rows, rows=batch, ) i += len(batch) else: break class ColumnTypes(object): """Abstract class containing methods for generating column types.""" def __init__(self): raise TypeError("abstract class") @staticmethod def BinaryArray(length: int): """Return a fixed size binary array column type. Args: length: The length of the column. Returns: A column type. """ return sql.Binary(length).with_variant(mysql.BINARY(length), "mysql") @staticmethod def LargeBinary(): """Return a fixed size binary array column type. Returns: A column type. """ return sql.LargeBinary().with_variant(sql.LargeBinary(2 ** 31), "mysql") @staticmethod def UnboundedUnicodeText(): """Return an unbounded unicode text column type. This isn't truly unbounded, but 2^32 chars should be enough! Returns: A column type. """ return sql.UnicodeText().with_variant(sql.UnicodeText(2 ** 31), "mysql") @staticmethod def IndexableString(length: int = None): """Return a string that is short enough that it can be used as an index. Returns: A column type. """ # MySQL InnoDB tables use a default index key prefix length limit of 767. # https://dev.mysql.com/doc/refman/5.6/en/innodb-restrictions.html MAX_LENGTH = 767 if length and length > MAX_LENGTH: raise ValueError( f"IndexableString requested length {length} is greater " f"than maximum allowed {MAX_LENGTH}", ) return sql.String(MAX_LENGTH) @staticmethod def MillisecondDatetime(): """Return a datetime type with millisecond precision. Returns: A column type. """ return sql.DateTime().with_variant(mysql.DATETIME(fsp=3), "mysql") class ColumnFactory(object): """Abstract class containing methods for generating columns.""" @staticmethod def MillisecondDatetime( nullable: bool = False, default=labdate.GetUtcMillisecondsNow, ): """Return a datetime column with millisecond precision. Returns: A column which defaults to UTC now. """ return sql.Column( sql.DateTime().with_variant(mysql.DATETIME(fsp=3), "mysql",), nullable=nullable, default=default, ) def ResilientAddManyAndCommit(db: Database, mapped: typing.Iterable[Base]): """Attempt to commit all mapped objects and return those that fail. This method creates a session and commits the given mapped objects. In case of error, this method will recurse up to O(log(n)) times, committing as many objects that can be as possible. Args: db: The database to add the objects to. mapped: A sequence of objects to commit. Returns: Any items in `mapped` which could not be committed, if any. Relative order of items is preserved. """ failures = [] if not mapped: return failures mapped = list(mapped) try: with db.Session(commit=True) as session: session.add_all(mapped) except sql.exc.SQLAlchemyError as e: logging.Log( logging.GetCallingModuleName(), 1, "Caught error while committing %d mapped objects: %s", len(mapped), e, ) # Divide and conquer. If we're committing only a single object, then a # failure to commit it means that we can do nothing other than return it. # Else, divide the mapped objects in half and attempt to commit as many of # them as possible. if len(mapped) == 1: return mapped else: mid = int(len(mapped) / 2) left = mapped[:mid] right = mapped[mid:] failures += ResilientAddManyAndCommit(db, left) failures += ResilientAddManyAndCommit(db, right) return failures def QueryToString(query) -> str: """Compile the query to inline literals in place of '?' placeholders. See: https://stackoverflow.com/a/23835766 """ return str(query.statement.compile(compile_kwargs={"literal_binds": True})) class BufferedDatabaseWriter(threading.Thread): """A buffered writer for adding objects to a database. Use this class for cases when you are producing lots of mapped objects that you would like to commit to a database, but don't require them to be committed immediately. By buffering objects and committing them in batches, this class minimises the number of SQL statements that are executed, and is faster than creating and committing a session for every object. This object spawns a separate thread for asynchronously performing database writes. Use AddOne() and AddMany() methods to add objects to the write buffer. Note that because this is a multithreaded implementation, in-memory SQLite databases are not supported. The user is responsible for calling Close() to flush the contents of the buffer and terminate the thread. Alternatively, use this class as a context manager to automatically flush the buffer and terminate the thread: with BufferedDatabaseWriter(db, max_buffer_length=128) as writer: for chunk in chunks_to_process: objs = ProcessChunk(chunk) writer.AddMany(objs) """ def __init__( self, db: Database, max_buffer_size: Optional[int] = None, max_buffer_length: Optional[int] = None, max_seconds_since_flush: Optional[float] = None, log_level: int = 2, ctx: progress.ProgressContext = progress.NullContext, ): """Constructor. Args: db: The database to write to. max_buffer_size: The maximum size of the buffer before flushing, in bytes. The buffer size is the sum of the elements in the write buffer. The size of elements is determined using sys.getsizeof(), and has all the caveats of this method. max_buffer_length: The maximum number of items in the write buffer before flushing. max_seconds_since_flush: The maximum number of elapsed seconds between flushes. ctx: progress.ProgressContext = progress.NullContext, log_level: The logging level for logging output. """ super(BufferedDatabaseWriter, self).__init__() self.db = db self.ctx = ctx self.log_level = log_level self.max_seconds_since_flush = max_seconds_since_flush self.max_buffer_size = max_buffer_size self.max_buffer_length = max_buffer_length # Counters. self.flush_count = 0 self.error_count = 0 self._buffer = [] self.buffer_size = 0 self._last_flush = time.time() # Limit the size of the queue so that calls to AddOne() or AddMany() will # block if the calling code is too far ahead of the writer. queue_size = self.max_buffer_length * 2 if self.max_buffer_length else 1000 self._queue = queue.Queue(maxsize=queue_size) self.start() def __enter__(self) -> "Buff": """Enter a scoped writer context closes at the end.""" return self def __exit__(self, exc_type, exc_val, exc_tb): """Exit a scoped writer context closes at the end.""" del exc_type del exc_val del exc_tb self.Close() def AddOne(self, mapped, size: Optional[int] = None) -> None: """Add a mapped object. Args: mapped: The mapped object to write to the database. size: The object sizes to use to update the total buffer size. If not provided, sys.getsizeof() is used to determine the size. """ size = size or sys.getsizeof(mapped) self._queue.put((mapped, size)) def AddMany(self, mappeds, sizes: Optional[List[int]] = None) -> None: """Add many mapped objects. Args: mappeds: The mapped objects to write to the database. sizes: A list of mapped object sizes to use to calculate the buffer size. If not provided, sys.getsizeof() is used to determine the size. """ sizes = sizes or [sys.getsizeof(item) for item in mappeds] for mapped, size in zip(mappeds, sizes): self._queue.put((mapped, size)) def AddLambdaOp(self, callback: Callable[[Database.SessionType], None]): self._queue.put(BufferedDatabaseWriter.LambdaOp(callback)) def Flush(self) -> None: """Flush the buffer. This method blocks until the flush has completed. In normal use, you can rely on the automated flushing mechanisms to flush the write buffer, rather than calling this by hand. """ self._queue.put(BufferedDatabaseWriter.FlushMarker()) self._queue.join() def Close(self): """Close the writer thread. This method blocks until the buffer has been flushed and the thread terminates. """ if not self.is_alive(): raise TypeError("Close() called on dead BufferedDatabaseWriter") self._queue.put(BufferedDatabaseWriter.CloseMarker()) self._queue.join() self.join() @property def buffer_length(self) -> int: """Get the current length of the buffer, in range [0, max_buffer_length].""" return len(self._buffer) @property def seconds_since_last_flush(self) -> float: """Get the number of seconds since the buffer was last flushed.""" return time.time() - self._last_flush ############################################################################## # Private methods. ############################################################################## class CloseMarker(object): """An object to append to _queue to close the thread.""" pass class FlushMarker(object): """An object to append to _queue to flush the buffer.""" pass class LambdaOp(object): def __init__(self, callback): self.callback = callback def __call__(self, session: Database.SessionType): self.callback(session) def run(self): """The thread loop.""" while True: # Block until there is something on the queue. Use max_seconds_since_flush # as a timeout to ensure that flushes still occur when the writer is not # being used. try: item = self._queue.get(timeout=self.max_seconds_since_flush) except queue.Empty: self._Flush() continue if isinstance(item, BufferedDatabaseWriter.CloseMarker): # End of queue. Break out of the loop. break elif isinstance(item, BufferedDatabaseWriter.FlushMarker): # Force a flush. self._Flush() elif isinstance(item, BufferedDatabaseWriter.LambdaOp): # Handle delete op. self._buffer.append(item) self._MaybeFlush() else: # Add the object to the buffer. mapped, size = item self._buffer.append(mapped) self.buffer_size += size self._MaybeFlush() # Register that the item has been processed. This is used by join() to # signal to stop blocking. self._queue.task_done() # Register that the end-of-queue marker has been processed. self._Flush() self._queue.task_done() def _MaybeFlush(self) -> None: if ( (self.max_buffer_size and self.buffer_size >= self.max_buffer_size) or ( self.max_buffer_length and self.buffer_length >= self.max_buffer_length ) or ( self.max_seconds_since_flush and self.seconds_since_last_flush >= self.max_seconds_since_flush ) ): self._Flush() def _AddMapped(self, mapped) -> None: """Add and commit a list of mapped objects.""" if not mapped: return failures = ResilientAddManyAndCommit(self.db, mapped) if failures: self.ctx.Error("Logger failed to commit %d objects", len(failures)) self.error_count += len(failures) def _Flush(self): """Flush the buffer.""" if not self._buffer: return with self.ctx.Profile( self.log_level, f"Committed {self.buffer_length} rows " f"({humanize.BinaryPrefix(self.buffer_size, 'B')}) to {self.db.url}", ), self.db.Session() as session: # Iterate through the buffer and handle any lambda ops. start_i, end_i = 0, 0 for end_i, item in enumerate(self._buffer): if isinstance(item, BufferedDatabaseWriter.LambdaOp): # If we have a lambda op, we flush the contents of the current buffer, # then execute the op and continue. self._AddMapped(self._buffer[start_i:end_i]) self._buffer[end_i](session) session.commit() start_i = end_i + 1 # Add any remaining mapped objects from the buffer. self._AddMapped(self._buffer[start_i:]) self._buffer = [] self._last_flush = time.time() self.buffer_size = 0 self.flush_count += 1
the-stack_0_14274
""" In this example we modify the mesh of a shape by moving the points along the normals to the surface and along the radius of a sphere centered at the center of mass. At each step we redefine the actor so that the normals are recalculated for the underlying polydata. """ from __future__ import division, print_function from vtkplotter import * settings.computeNormals = True # on object creation by default vp = Plotter(axes=0, verbose=0, bg="w") s = vp.load(datadir+"290.vtk", c="red") c = s.centerOfMass() vp += [Point(c), Text(__doc__, c="k")] Niter = 4 for t in range(Niter): print("iteration", t) coords = s.coordinates() normals = s.normals() aves = s.averageSize() * 1.5 for i in range(s.N()): n = normals[i] p = coords[i] q = versor(p - c) * aves + c # versor = unit vector dp = mag(q - p) alongn = n * dp alongr = q - p # bias normal newp = p + (alongn + alongr) / 2 / Niter s.setPoint(i, newp) # refresh actor, so polydata normals are recalculated s = s.clone() vp += s.alpha(0.1).color("gold").wireframe(True) #add into Plotter vp.show()
the-stack_0_14275
import typing from datetime import datetime from ParadoxTrading.Indicator.Bar.BarIndicatorAbstract import BarIndicatorAbstract from ParadoxTrading.Utils import DataStruct class OpenBar(BarIndicatorAbstract): def __init__( self, _use_key: str, _idx_key: str = 'time', _ret_key: str = 'open' ): super().__init__() self.use_key = _use_key self.idx_key = _idx_key self.ret_key = _ret_key self.data = DataStruct( [self.idx_key, self.ret_key], self.idx_key ) def _addOne( self, _data_struct: DataStruct, _idx: typing.Union[str, datetime] = None ): self.data.addDict({ self.idx_key: _idx, self.ret_key: _data_struct[self.use_key][0] })
the-stack_0_14276
# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import netaddr from webob import exc from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2 from nova.api.openstack.compute.plugins.v3 import hypervisors \ as hypervisors_v21 from nova.api.openstack import extensions from nova import context from nova import db from nova import exception from nova import objects from nova import test from nova.tests.unit.api.openstack import fakes TEST_HYPERS = [ dict(id=1, service_id=1, host="compute1", vcpus=4, memory_mb=10 * 1024, local_gb=250, vcpus_used=2, memory_mb_used=5 * 1024, local_gb_used=125, hypervisor_type="xen", hypervisor_version=3, hypervisor_hostname="hyper1", free_ram_mb=5 * 1024, free_disk_gb=125, current_workload=2, running_vms=2, cpu_info='cpu_info', disk_available_least=100, host_ip=netaddr.IPAddress('1.1.1.1')), dict(id=2, service_id=2, host="compute2", vcpus=4, memory_mb=10 * 1024, local_gb=250, vcpus_used=2, memory_mb_used=5 * 1024, local_gb_used=125, hypervisor_type="xen", hypervisor_version=3, hypervisor_hostname="hyper2", free_ram_mb=5 * 1024, free_disk_gb=125, current_workload=2, running_vms=2, cpu_info='cpu_info', disk_available_least=100, host_ip=netaddr.IPAddress('2.2.2.2'))] TEST_SERVICES = [ objects.Service(id=1, host="compute1", binary="nova-compute", topic="compute_topic", report_count=5, disabled=False, disabled_reason=None, availability_zone="nova"), objects.Service(id=2, host="compute2", binary="nova-compute", topic="compute_topic", report_count=5, disabled=False, disabled_reason=None, availability_zone="nova"), ] TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct) for hyper_dct in TEST_HYPERS] TEST_HYPERS[0].update({'service': TEST_SERVICES[0]}) TEST_HYPERS[1].update({'service': TEST_SERVICES[1]}) TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"), dict(name="inst2", uuid="uuid2", host="compute2"), dict(name="inst3", uuid="uuid3", host="compute1"), dict(name="inst4", uuid="uuid4", host="compute2")] def fake_compute_node_get_all(context): return TEST_HYPERS_OBJ def fake_compute_node_search_by_hypervisor(context, hypervisor_re): return TEST_HYPERS_OBJ def fake_compute_node_get(context, compute_id): for hyper in TEST_HYPERS_OBJ: if hyper.id == int(compute_id): return hyper raise exception.ComputeHostNotFound(host=compute_id) @classmethod def fake_service_get_by_host_and_topic(cls, context, host, topic): for service in TEST_SERVICES: if service.host == host: return service def fake_compute_node_statistics(context): result = dict( count=0, vcpus=0, memory_mb=0, local_gb=0, vcpus_used=0, memory_mb_used=0, local_gb_used=0, free_ram_mb=0, free_disk_gb=0, current_workload=0, running_vms=0, disk_available_least=0, ) for hyper in TEST_HYPERS_OBJ: for key in result: if key == 'count': result[key] += 1 else: result[key] += hyper[key] return result def fake_instance_get_all_by_host(context, host): results = [] for inst in TEST_SERVERS: if inst['host'] == host: results.append(inst) return results class HypervisorsTestV21(test.NoDBTestCase): DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS) del DETAIL_HYPERS_DICTS[0]['service_id'] del DETAIL_HYPERS_DICTS[1]['service_id'] del DETAIL_HYPERS_DICTS[0]['host'] del DETAIL_HYPERS_DICTS[1]['host'] DETAIL_HYPERS_DICTS[0].update({'state': 'up', 'status': 'enabled', 'service': dict(id=1, host='compute1', disabled_reason=None)}) DETAIL_HYPERS_DICTS[1].update({'state': 'up', 'status': 'enabled', 'service': dict(id=2, host='compute2', disabled_reason=None)}) INDEX_HYPER_DICTS = [ dict(id=1, hypervisor_hostname="hyper1", state='up', status='enabled'), dict(id=2, hypervisor_hostname="hyper2", state='up', status='enabled')] NO_SERVER_HYPER_DICTS = copy.deepcopy(INDEX_HYPER_DICTS) NO_SERVER_HYPER_DICTS[0].update({'servers': []}) NO_SERVER_HYPER_DICTS[1].update({'servers': []}) def _get_request(self, use_admin_context): return fakes.HTTPRequest.blank('', use_admin_context=use_admin_context) def _set_up_controller(self): self.controller = hypervisors_v21.HypervisorsController() self.controller.servicegroup_api.service_is_up = mock.MagicMock( return_value=True) def setUp(self): super(HypervisorsTestV21, self).setUp() self._set_up_controller() self.stubs.Set(self.controller.host_api, 'compute_node_get_all', fake_compute_node_get_all) self.stubs.Set(objects.Service, 'get_by_host_and_topic', fake_service_get_by_host_and_topic) self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor) self.stubs.Set(self.controller.host_api, 'compute_node_get', fake_compute_node_get) self.stubs.Set(db, 'compute_node_statistics', fake_compute_node_statistics) self.stubs.Set(db, 'instance_get_all_by_host', fake_instance_get_all_by_host) def test_view_hypervisor_nodetail_noservers(self): result = self.controller._view_hypervisor( TEST_HYPERS_OBJ[0], TEST_SERVICES[0], False) self.assertEqual(result, self.INDEX_HYPER_DICTS[0]) def test_view_hypervisor_detail_noservers(self): result = self.controller._view_hypervisor( TEST_HYPERS_OBJ[0], TEST_SERVICES[0], True) self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0]) def test_view_hypervisor_servers(self): result = self.controller._view_hypervisor(TEST_HYPERS_OBJ[0], TEST_SERVICES[0], False, TEST_SERVERS) expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0]) expected_dict.update({'servers': [ dict(name="inst1", uuid="uuid1"), dict(name="inst2", uuid="uuid2"), dict(name="inst3", uuid="uuid3"), dict(name="inst4", uuid="uuid4")]}) self.assertEqual(result, expected_dict) def test_index(self): req = self._get_request(True) result = self.controller.index(req) self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS)) def test_index_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.index, req) def test_detail(self): req = self._get_request(True) result = self.controller.detail(req) self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS)) def test_detail_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.detail, req) def test_show_noid(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3') def test_show_non_integer_id(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc') def test_show_withid(self): req = self._get_request(True) result = self.controller.show(req, '1') self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0])) def test_show_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.show, req, '1') def test_uptime_noid(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3') def test_uptime_notimplemented(self): def fake_get_host_uptime(context, hyp): raise exc.HTTPNotImplemented() self.stubs.Set(self.controller.host_api, 'get_host_uptime', fake_get_host_uptime) req = self._get_request(True) self.assertRaises(exc.HTTPNotImplemented, self.controller.uptime, req, '1') def test_uptime_implemented(self): def fake_get_host_uptime(context, hyp): return "fake uptime" self.stubs.Set(self.controller.host_api, 'get_host_uptime', fake_get_host_uptime) req = self._get_request(True) result = self.controller.uptime(req, '1') expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0]) expected_dict.update({'uptime': "fake uptime"}) self.assertEqual(result, dict(hypervisor=expected_dict)) def test_uptime_non_integer_id(self): req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc') def test_uptime_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.uptime, req, '1') def test_search(self): req = self._get_request(True) result = self.controller.search(req, 'hyper') self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS)) def test_search_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.search, req, '1') def test_search_non_exist(self): def fake_compute_node_search_by_hypervisor_return_empty(context, hypervisor_re): return [] self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor_return_empty) req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a') def test_servers(self): req = self._get_request(True) result = self.controller.servers(req, 'hyper') expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS) expected_dict[0].update({'servers': [ dict(name="inst1", uuid="uuid1"), dict(name="inst3", uuid="uuid3")]}) expected_dict[1].update({'servers': [ dict(name="inst2", uuid="uuid2"), dict(name="inst4", uuid="uuid4")]}) self.assertEqual(result, dict(hypervisors=expected_dict)) def test_servers_non_id(self): def fake_compute_node_search_by_hypervisor_return_empty(context, hypervisor_re): return [] self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor_return_empty) req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.servers, req, '115') def test_servers_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.servers, req, '1') def test_servers_with_non_integer_hypervisor_id(self): def fake_compute_node_search_by_hypervisor_return_empty(context, hypervisor_re): return [] self.stubs.Set(self.controller.host_api, 'compute_node_search_by_hypervisor', fake_compute_node_search_by_hypervisor_return_empty) req = self._get_request(True) self.assertRaises(exc.HTTPNotFound, self.controller.servers, req, 'abc') def test_servers_with_no_server(self): def fake_instance_get_all_by_host_return_empty(context, hypervisor_re): return [] self.stubs.Set(db, 'instance_get_all_by_host', fake_instance_get_all_by_host_return_empty) req = self._get_request(True) result = self.controller.servers(req, '1') self.assertEqual(result, dict(hypervisors=self.NO_SERVER_HYPER_DICTS)) def test_statistics(self): req = self._get_request(True) result = self.controller.statistics(req) self.assertEqual(result, dict(hypervisor_statistics=dict( count=2, vcpus=8, memory_mb=20 * 1024, local_gb=500, vcpus_used=4, memory_mb_used=10 * 1024, local_gb_used=250, free_ram_mb=10 * 1024, free_disk_gb=250, current_workload=4, running_vms=4, disk_available_least=200))) def test_statistics_non_admin(self): req = self._get_request(False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.statistics, req) class HypervisorsTestV2(HypervisorsTestV21): DETAIL_HYPERS_DICTS = copy.deepcopy( HypervisorsTestV21.DETAIL_HYPERS_DICTS) del DETAIL_HYPERS_DICTS[0]['state'] del DETAIL_HYPERS_DICTS[1]['state'] del DETAIL_HYPERS_DICTS[0]['status'] del DETAIL_HYPERS_DICTS[1]['status'] del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason'] del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason'] del DETAIL_HYPERS_DICTS[0]['host_ip'] del DETAIL_HYPERS_DICTS[1]['host_ip'] INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS) del INDEX_HYPER_DICTS[0]['state'] del INDEX_HYPER_DICTS[1]['state'] del INDEX_HYPER_DICTS[0]['status'] del INDEX_HYPER_DICTS[1]['status'] NO_SERVER_HYPER_DICTS = copy.deepcopy( HypervisorsTestV21.NO_SERVER_HYPER_DICTS) del NO_SERVER_HYPER_DICTS[0]['state'] del NO_SERVER_HYPER_DICTS[1]['state'] del NO_SERVER_HYPER_DICTS[0]['status'] del NO_SERVER_HYPER_DICTS[1]['status'] del NO_SERVER_HYPER_DICTS[0]['servers'] del NO_SERVER_HYPER_DICTS[1]['servers'] def _set_up_controller(self): self.context = context.get_admin_context() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
the-stack_0_14278
# coding: utf-8 # pew in unshortener-venv python ~/wm-dist-tmp/Unshortener/unshortener/unshortener.py import requests from datatools.url import * from urllib.request import urlopen from systemtools.basics import * from systemtools.location import * from systemtools.logger import * import requests.auth from datastructuretools.hashmap import * from hjwebbrowser.httpbrowser import * from hjwebbrowser.browser import * from hjwebbrowser.utils import * from threading import Thread try: from systemtools.hayj import * except: pass import random from unshortener import config as unsConfig class Unshortener(): """ See the README """ def __init__ \ ( self, logger=None, verbose=True, serializableDictParams=\ { "limit": 10000000, "name": "unshortenedurls", "cacheCheckRatio": 0.0, "mongoIndex": "url", }, httpBrowserParams=\ { "maxRetryWithoutProxy": 0, "maxRetryIfTimeout": 1, "maxRetryIf407": 1, }, user=None, password=None, host=None, useMongodb=None, hostname=None, shortenersDomainsFilePath=None, retryFailedRatio=0.5, useProxy=True, randomProxyFunct=None, timeout=25, maxRetry=2, nextTriesTimeoutRatio=0.3, readOnly=False, proxy=None, ): self.useMongodb = useMongodb if self.useMongodb is None: self.useMongodb = unsConfig.useMongodb # We store some params: self.retryFailedRatio = retryFailedRatio self.verbose = verbose self.logger = logger self.timeout = timeout self.maxRetries = maxRetry self.nextTriesTimeoutRatio = nextTriesTimeoutRatio self.readOnly = readOnly self.proxy = proxy # We create the url parser: self.urlParser = URLParser() # We get the default randomProxyFunct: self.useProxy = useProxy self.randomProxyFunct = randomProxyFunct if self.randomProxyFunct is None: try: self.randomProxyFunct = getRandomProxy except: pass if self.randomProxyFunct is None: self.useProxy = False # We init the mongo collection through SerializableDict: self.serializableDictParams = serializableDictParams if hostname is None: hostname = unsConfig.hostname if host is None: host = unsConfig.host if user is None: user = unsConfig.user if password is None: password = unsConfig.password if user == "hayj": try: (user, password, host) = getMongoAuth(user=user, hostname=hostname) except: pass self.serializableDictParams["user"] = user self.serializableDictParams["password"] = password self.serializableDictParams["host"] = host self.serializableDictParams["logger"] = self.logger self.serializableDictParams["verbose"] = self.verbose self.serializableDictParams["useMongodb"] = self.useMongodb self.data = SerializableDict(**self.serializableDictParams) # We get shorteners domains: self.shortenersDomainsFilePath = shortenersDomainsFilePath if self.shortenersDomainsFilePath is None: self.shortenersDomainsFilePath = getDataDir() + "/Misc/crawling/shorteners.txt" self.shortenersDomains = None self.initShortenersDomains() # We create the http browser: self.httpBrowserParams = httpBrowserParams self.httpBrowser = HTTPBrowser(logger=self.logger, verbose=self.verbose, **self.httpBrowserParams) def initShortenersDomains(self): if self.shortenersDomains is None: if not isFile(self.shortenersDomainsFilePath): raise Exception("File " + str(self.shortenersDomainsFilePath) + " not found.") shorteners = fileToStrList(self.shortenersDomainsFilePath, removeDuplicates=True) newShorteners = [] for current in shorteners: current = current.lower() newShorteners.append(current) shorteners = newShorteners self.shortenersDomains = set() for current in shorteners: newCurrent = self.urlParser.getDomain(current) self.shortenersDomains.add(newCurrent) self.shortenersDomains = list(self.shortenersDomains) # We filter all by presence of a point: newShortenersDomains= [] for current in self.shortenersDomains: if "." in current: newShortenersDomains.append(current) self.shortenersDomains = newShortenersDomains def reduceIrrelevantUrls(self, isRelevantUrlFunct): """ If some last urls are not enough relevant to keep the html content You can delete it by call this method You have to give a funct in params This method can take a long time and will update all row so you will loose old/new read/write sort. """ for theHash, current in self.data.items(): if isRelevantUrlFunct(current["lastUrl"]): if dictContains(current, "relevant") and not current["relevant"]: logError("You previously set this row as irrelevant but now you set it as relevant, so you lost the html data, you can re-set the html data using hjwebbrowser.httpbrowser.HTTPBrowser", self) logError(reduceDictStr(current), self) self.data.updateRow(theHash, "relevant", True) else: self.data.updateRow(theHash, "html", None) self.data.updateRow(theHash, "relevant", False) def getUnshortenersDomains(self): return self.shortenersDomains def close(self): self.data.close() def isShortened(self, *args, **kwargs): return self.isShortener(*args, **kwargs) def isShortener(self, url): """ Use this method to test if an url come from an unshortener service """ smartDomain = self.urlParser.getDomain(url) return smartDomain in self.shortenersDomains def has(self, *args, **kwargs): return self.hasKey(*args, **kwargs) def isAlreadyUnshortened(self, *args, **kwargs): return self.hasKey(*args, **kwargs) def hasKey(self, url): """ This method test if an url was already unshortened before """ url = self.urlParser.normalize(url) return self.data.hasKey(url) def unshort\ ( self, *args, **kwargs ): """ This method will call request but give the last url (unshortened) instead of all data """ result = self.request(*args, **kwargs) if result is None: return None else: if dictContains(result, "lastUrl"): return result["lastUrl"] else: return None def add(self, result, onlyHttpBrowser=True): # We check readOnly: if self.readOnly: logError("The unshortener is set as read only!", self) return False # We check None: if result is None or not isinstance(result, dict): logError("No data found to add in unshortener!", self) return False resultStr = lts(reduceDictStr(result)) # We check keys: for key in \ [ "lastUrl", "browser", "lastUrlDomain", "historyCount", "html", "title", "status", ]: if key not in result: logError(key + " is not in:\n" + resultStr, self) return False # We check keys not None: for key in ["url", "domain"]: if not dictContains(result, key): logError(key + " is not in:\n" + resultStr, self) return False # We check the browser: if onlyHttpBrowser and result["browser"] != "http": logError("The browser must be an http browser!", self) return False # We delete and add some elements: if "crawlingElement" in result: del result["crawlingElement"] if "relevant" not in result: result["relevant"] = True # We check the status: if result["httpStatus"] == 200 or result["httpStatus"] == 404: # We add the data: self.data[result["url"]] = result return True else: logError("Cant't add this data to unshortener because of the http status:\n"\ + resultStr, self) return False return False def request\ ( self, url, force=False, retriesCount=0, ): """ This method will request the given url You can read the last url (unshortened) in the field "lastUrl" of the returned dict If the request failed, this method return None force as True will give the last url for the request, even if it is not a shortener... """ # We set the timeout: timeout = self.timeout if retriesCount >= 1: timeout = int(self.nextTriesTimeoutRatio * timeout) # We parse the url: url = self.urlParser.normalize(url) smartDomain = self.urlParser.getDomain(url) # We return None if we don't have to request it: thisIsAShortener = smartDomain in self.shortenersDomains if not force and not thisIsAShortener: return None # We check if we already have the url: if self.data.hasKey(url): # log(url + " was in the Unshortener database!", self) return self.data.get(url) # If we read only, we don't request the url: elif self.readOnly: # log(url + " is not in the database and the unshortener was set as read only!", self) return None # Else we can request it: else: # We get a random proxy: proxy = None if self.useProxy: proxy = self.proxy if proxy is None: proxy = self.randomProxyFunct() # We set the proxy and the timeout: self.httpBrowser.setProxy(proxy) self.httpBrowser.setTimeout(timeout) # We request the url: result = self.httpBrowser.get(url) # We add some params to the result: result["url"] = url # result["isShortener"] = thisIsAShortener result["relevant"] = True # And if the request succeded: # if result["status"] == REQUEST_STATUS.duplicate or \ # result["status"] == REQUEST_STATUS.success or \ # result["status"] == REQUEST_STATUS.error404 or \ # result["status"] == REQUEST_STATUS.timeoutWithContent: if result["httpStatus"] == 200 or \ result["httpStatus"] == 404: # We add the row: self.data[url] = result # We log it: log("Unshort succedded: " + url, self) log(getRequestInfos(result), self) # And finally we return the result: return result # Else we retry: else: # We log the error: log("Unshort failed: " + url, self) log(getRequestInfos(result), self) # log(listToStr(reduceDictStr(result, replaceNewLine=True)), self) # If we can retry: if retriesCount < self.maxRetries: # We recursively call the method: log("We retry to unshort: " + url, self) return self.request(url, force=force, retriesCount=retriesCount+1) # If we failed, we just return None: else: return None def getRequestInfos(result): return str(result["proxy"]) + " " + str(result["status"].name) + " (" + str(result["httpStatus"]) + ")" def test1(): uns = Unshortener(host="localhost") url = "https://api.ipify.org/?format=json" # url = "http://httpbin.org/redirect/3" printLTS(uns.unshort(url, force=True)) def test2(): uns = Unshortener(host="localhost") printLTS(uns.getUnshortenersDomains()) def test3(): def getShares(crawlOrScrap): if dictContains(crawlOrScrap, "scrap"): scrap = crawlOrScrap["scrap"] else: scrap = crawlOrScrap if dictContains(scrap, "tweets"): tweets = scrap["tweets"] for tweet in tweets: if dictContains(tweet, "shares"): for share in tweet["shares"]: yield share uns = Unshortener(host="localhost", useProxy=False) (user, password, host) = getStudentMongoAuth() collection = MongoCollection("twitter", "usercrawl", user=user, password=password, host=host) i = 0 for current in collection.find(): urls = list(getShares(current)) for url in urls: url = url["url"] if getRandomFloat() > 0.8 and (uns.isShortener(url) or getRandomFloat() > 0.95): print(url) print("isShortener: " + str(uns.isShortener(url))) print(uns.unshort(url, force=True)) print() print() print() print() # input() i += 1 if i > 100: exit() def test4(): urls = \ [ "http://ow.ly/DIFx30hfmsE", "http://bit.ly/2jBKQoh", ] uns = Unshortener(host="localhost") print() print() print() print() for url in urls: print(url) print("isShortener: " + str(uns.isShortener(url))) print(uns.unshort(url, force=True)) print() print() print() print() def testAlexis(): uns = Unshortener\ ( shortenersDomainsFilePath="/tmp", useProxy=False, randomProxyFunct=None, proxy=None, serializableDictParams=\ { "limit": 10000000, "useMongodb": False, "name": "unshortenedurls", "cacheCheckRatio": 0.0, "mongoIndex": "url", "serializeEachNAction": 1, } ) print(uns.unshort("https://bit.ly/2Hor6PN")) if __name__ == '__main__': # test1() # test2() # test3() # test4() testAlexis()
the-stack_0_14279
import hashlib import json from time import time from uuid import uuid4 from flask import Flask, jsonify, request import sys class Blockchain(object): def __init__(self): self.chain = [] self.current_transactions = [] self.nodes = set() self.new_block(previous_hash=1, proof=99) def new_block(self, proof, previous_hash=None): """ Create a new Block in the Blockchain :param proof: <int> The proof given by the Proof of Work algorithm :param previous_hash: (Optional) <str> Hash of previous Block :return: <dict> New Block """ block = { 'index': len(self.chain) + 1, 'timestamp': time(), 'transactions': self.current_transactions, 'proof': proof, 'previous_hash': previous_hash or self.hash(self.chain[-1]), } # Reset the current list of transactions self.current_transactions = [] self.chain.append(block) return block def new_transaction(self, sender, recipient, amount): """ Creates a new transaction to go into the next mined Block :param sender: <str> Address of the Recipient :param recipient: <str> Address of the Recipient :param amount: <int> Amount :return: <int> The index of the BLock that will hold this transaction """ self.current_transactions.append({ 'sender': sender, 'recipient': recipient, 'amount': amount, }) return self.last_block['index'] + 1 @staticmethod def hash(block): """ Creates a SHA-256 hash of a Block :param block": <dict> Block "return": <str> """ # We must make sure that the Dictionary is Ordered, # or we'll have inconsistent hashes block_string = json.dumps(block, sort_keys=True).encode() return hashlib.sha256(block_string).hexdigest() @property def last_block(self): return self.chain[-1] @staticmethod def valid_proof(last_proof, proof): """ Validates the Proof: Does hash(last_proof, proof) contain 6 leading zeroes? """ guess = f'{last_proof}{proof}'.encode() guess_hash = hashlib.sha256(guess).hexdigest() return guess_hash[:6] == "000000" def valid_chain(self, chain): """ Determine if a given blockchain is valid :param chain: <list> A blockchain :return: <bool> True if valid, False if not """ last_block = chain[0] current_index = 1 while current_index < len(chain): block = chain[current_index] print(f'{last_block}') print(f'{block}') print("\n-------------------\n") # Check that the hash of the block is correct if block['previous_hash'] != self.hash(last_block): return False # Check that the Proof of Work is correct if not self.valid_proof(last_block['proof'], block['proof']): return False last_block = block current_index += 1 return True # Instantiate our Node app = Flask(__name__) # Generate a globally unique address for this node node_identifier = str(uuid4()).replace('-', '') # Instantiate the Blockchain blockchain = Blockchain() @app.route('/mine', methods=['POST']) # changed from GET to POST def mine(): values = request.get_json() print("VALUES", values) # Check that the required fields are in the POST'ed data required = ['proof'] if not all(k in values for k in required): return 'Missing Values', 400 # use valid_proof to validate the client's proof validated = blockchain.valid_proof(blockchain.last_block['proof'], values['proof']) if validated: # create transaction for the reward blockchain.new_transaction( sender='0', recipient=node_identifier, amount=1, ) # add block to chain previous_hash = blockchain.hash(blockchain.last_block) block = blockchain.new_block(values['proof'], previous_hash) # send a success message return jsonify(message='New Block Forged'), 200 # else send a message that it's not validated return jsonify(message='Error, proof not validated or proof has changed'), 200 @app.route('/transactions/new', methods=['POST']) def new_transaction(): values = request.get_json() # Check that the required fields are in the POST'ed data required = ['sender', 'recipient', 'amount'] if not all(k in values for k in required): return 'Missing Values', 400 # Create a new Transaction index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount']) response = {'message': f'Transaction will be added to Block {index}'} return jsonify(response), 201 @app.route('/chain', methods=['GET']) def full_chain(): response = { 'chain': blockchain.chain, 'length': len(blockchain.chain), } return jsonify(response), 200 @app.route('/last_proof', methods=['GET']) def last_proof(): # get the last index of blockchain.chain response = { 'proof': blockchain.last_block['proof'] } return jsonify(response), 200 # Note, when demoing, start with this, then change to the below # if __name__ == '__main__': # app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': if len(sys.argv) > 1: port = int(sys.argv[1]) else: port = 5000 app.run(host='0.0.0.0', port=port)
the-stack_0_14280
from Products.CMFCore.utils import getToolByName from Products.Five.browser import BrowserView from isaw.bibitems.browser.view import BibItemView class PublicationView(BibItemView): """view class""" @property def authors(self): members = self._get_members(self.context.authors) return ', '.join(members) @property def contributors(self): members = self._get_members(self.context.contributors) return ', '.join(members) @property def editors(self): members = self._get_members(self.context.editors) return ', '.join(members) def _get_members(self, member_list): mt = getToolByName(self.context, 'portal_membership') members = [] for author in member_list: author = author.strip() if not author: continue info = mt.getMemberInfo(author) if info: members.append('<a href="%s">%s</a>' % (info.get('home_page'), info.get('fullname', author))) else: members.append(author) return members @property def images(self): return self.context.objectValues() class PublicationImagesView(BrowserView): """ images overlay """ @property def images(self): return self.context.objectValues() class PublicationListingView(BrowserView): """view class""" batch_size = 0 page = 1 def __init__(self, request, context): super(PublicationListingView, self).__init__(request, context) self.page = int(self.request.get('page', 1)) def _query(self, query=None, exclude=None, b_start=None, b_size=None): if b_size is None: b_size = self.batch_size if b_start is None: b_start = (getattr(self, 'page', 1) - 1) * b_size if query is None: query = {'portal_type': 'isaw.policy.publication'} if exclude is not None: uuid = getattr(exclude, 'UID') if callable(uuid): uuid = uuid() if uuid: query['UID'] = {'not': uuid} if self.context.portal_type == 'Folder': self.request['b_start'] = b_start self.request['b_size'] = b_size query['b_start'] = b_start query['b_size'] = b_size items = self.context.getFolderContents(contentFilter=query, batch=True, b_size=b_size) elif self.context.portal_type == 'Topic': if b_start and not self.request.get('b_start'): self.request['b_start'] = b_start items = self.context.queryCatalog(self.request, True, b_size, **query) elif self.context.portal_type == 'Collection': items = self.context.results(True, b_start, b_size, custom_query=query) else: items = [] return items def listings(self, b_start=None, b_size=None): """get a page of listings""" return self._query(b_start=b_start, b_size=b_size)
the-stack_0_14281
""" Copyright (c) 2016-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. """ import asyncio import unittest import unittest.mock from orc8r.protos.common_pb2 import Void from orc8r.protos.service303_pb2 import ServiceInfo from magma.common.service_registry import ServiceRegistry from magma.magmad.service_poller import ServicePoller class MockFuture(object): def __init__(self, is_error): self._is_error = is_error def exception(self): if self._is_error: return self.MockException() return None def result(self): return ServiceInfo() class MockException(object): def details(self): return '' def code(self): return 0 class ServicePollerTests(unittest.TestCase): """ Tests for the ServicePoller """ def setUp(self): ServiceRegistry.add_service('test1', '0.0.0.0', 0) ServiceRegistry.add_service('test2', '0.0.0.0', 0) config = { 'magma_services': ['test1', 'test2'], 'non_service303_services': ['test2'] } self._loop = asyncio.new_event_loop() self._service_poller = ServicePoller(self._loop, config) @unittest.mock.patch('magma.magmad.service_poller.Service303Stub') @unittest.mock.patch('magma.configuration.service_configs') def test_poll(self, _service_configs_mock, service303_mock): """ Test if the query to Service303 succeeds. """ # Mock out GetServiceInfo.future mock = unittest.mock.Mock() mock.GetServiceInfo.future.side_effect = [unittest.mock.Mock()] service303_mock.side_effect = [mock] self._service_poller.start() mock.GetServiceInfo.future.assert_called_once_with( Void(), self._service_poller.GET_STATUS_TIMEOUT) # pylint: disable=protected-access self._service_poller._get_service_info_done('test1', MockFuture(False)) @unittest.mock.patch('magma.magmad.service_poller.Service303Stub') @unittest.mock.patch('magma.configuration.service_configs') def test_poll_exception(self, _service_configs_mock, service303_mock): """ Test if the query to Service303 fails and handled gracefully. """ # Mock out GetServiceInfo.future mock = unittest.mock.Mock() mock.GetServiceInfo.future.side_effect = [unittest.mock.Mock()] service303_mock.side_effect = [mock] self._service_poller.start() mock.GetServiceInfo.future.assert_called_once_with( Void(), self._service_poller.GET_STATUS_TIMEOUT) # pylint: disable=protected-access self._service_poller._get_service_info_done('test1', MockFuture(True)) if __name__ == "__main__": unittest.main()
the-stack_0_14283
# -*- coding: utf-8 -*- name = u'simplejson' version = '3.15.0' description = \ """ Simple, fast, extensible JSON encoder/decoder for Python """ variants = [] requires = ['boost' ] def commands(): import os libs_path = os.path.join(getenv("PYTHON_LIBS_PATH"), "simplejson", "%s" % version) env.PYTHONPATH.append(os.path.join(libs_path, "lib").replace("/", os.sep))
the-stack_0_14284
import requests import petl from parsons.etl.table import Table from parsons.utilities import check_env URI = 'https://api.targetsmart.com/' class TargetSmartConnector(object): def __init__(self, api_key): self.uri = URI self.api_key = check_env.check('TS_API_KEY', api_key) self.headers = {'x-api-key': self.api_key} def request(self, url, args=None, raw=False): r = requests.get(url, headers=self.headers, params=args) # This allows me to deal with data that needs to be munged. if raw: return r.json() return Table(r.json()['output']) class Person(object): def __init__(self): return None def data_enhance(self, search_id, search_id_type='voterbase', state=None): """ Searches for a record based on an id or phone or email address `Args:` search_id: str The primary key or email address or phone number search_id_type: str One of ``voterbase``, ``exacttrack``, ``abilitec_consumer_link``, ``phone``, ``email``, ``smartvan``, ``votebuilder``, ``voter``, ``household``. state: str Two character state code. Required if ``search_id_type`` of ``smartvan``, ``votebuilder`` or ``voter``. `Returns` Parsons Table See :ref:`parsons-table` for output options. """ if search_id_type in ['smartvan', 'votebuilder', 'voter'] and state is None: raise KeyError("Search ID type '{}' requires state kwarg".format(search_id_type)) if search_id_type not in ('voterbase', 'exacttrack', 'abilitec_consumer_link', 'phone', 'email', 'smartvan', 'votebuilder', 'voter', 'household'): raise ValueError('Search_id_type is not valid') url = self.connection.uri + 'person/data-enhance' args = {'search_id': search_id, 'search_id_type': search_id_type, 'state': state } return self.connection.request(url, args=args) def radius_search(self, first_name, last_name, middle_name=None, name_suffix=None, latitude=None, longitude=None, address=None, radius_size=10, radius_unit='miles', max_results=10, gender='a', age_min=None, age_max=None, composite_score_min=1, composite_score_max=100, last_name_exact=True, last_name_is_prefix=False, last_name_prefix_length=10): """ Search for a person based on a specified radius `Args`: first_name: str One or more alpha characters last_name: str One or more alpha characters middle_name: str One or more alpha characters name_suffix: str One or more alpha characters latitude: float Floating point number (e.g. 33.738987255507) longitude: float Floating point number (e.g. -116.40833849559) address: str Any geocode-able address address_type: str ``reg`` for registration (default) or ``tsmart`` for TargetSmart radius_unit: str One of ``meters``, ``feet``, ``miles`` (default), or ``kilometers``. max_results: int Default of ``10``. An integer in range [0 - 100] gender: str Default of ``a``. One of ``m``, ``f``, ``u``, ``a``. age_min: int A positive integer age_max: int A positive integer composite_score_min: int An integer in range [1 - 100]. Filter out results with composite score less than this value. composite_score_max: int An integer in range [1 - 100]. Filter out results with composite score greater than this value. last_name_exact: boolean By default, the full last name is used for finding matches if the length of the last name is not longer than 10 characters. As an example, “anders” is less likely to match to “anderson” with this enabled. Disable this option if you are using either ``last_name_is_prefix`` or ``last_name_prefix_length``. last_name_is_prefix: boolean By default, the full last name is used for finding matches. Enable this parameter if your search last name is truncated. This can be common for some client applications that for various reasons do not have full last names. Use this parameter along with ``last_name_prefix_length`` to configure the length of the last name prefix. This parameter is ignored if ``last_name_exact`` is enabled. last_name_prefix_length: int By default, up to the first 10 characters of the search last name are used for finding relative matches. This value must be between 3 and 10. This parameter is ignored if last_name_exact is enabled. `Returns` Parsons Table See :ref:`parsons-table` for output options. """ if (latitude is None or longitude is None) and address is None: raise ValueError('Lat/Long or Address required') # Convert booleans for a in [last_name_exact, last_name_is_prefix]: a = str(a) url = self.connection.uri + 'person/radius-search' args = {'first_name': first_name, 'last_name': last_name, 'middle_name': middle_name, 'name_suffix': name_suffix, 'latitude': latitude, 'longitude': longitude, 'address': address, 'radius_size': radius_size, 'radius_unit': radius_unit, 'max_results': max_results, 'gender': gender, 'age_min': age_min, 'age_max': age_max, 'composite_score_min': composite_score_min, 'composite_score_max': composite_score_max, 'last_name_exact': last_name_exact, 'last_name_is_prefix': last_name_is_prefix, 'last_name_prefix_length': last_name_prefix_length } r = self.connection.request(url, args=args, raw=True) return Table([itm for itm in r['output']]).unpack_dict('data_fields', prepend=False) def phone(self, table): """ Match based on a list of 500 phones numbers. Table can contain up to 500 phone numbers to match `Args:` table: parsons table See :ref:`parsons-table`. One row per phone number, up to 500 phone numbers. `Returns:` See :ref:`parsons-table` for output options. """ url = self.connection.uri + 'person/phone-search' args = {'phones': list(petl.values(table.table, 0))} return Table(self.connection.request(url, args=args, raw=True)['result']) class Service(object): def __init__(self): return None def district(self, search_type='zip', address=None, zip5=None, zip4=None, state=None, latitude=None, longitude=None): """ Return district information based on a geographic point. The method allows you to search based on the following: .. list-table:: :widths: 30 30 30 :header-rows: 1 * - Search Type - Search Type Name - Required kwarg(s) * - Zip Code - ``zip`` - ``zip5``, ``zip4`` * - Address - ``address`` - ``address`` * - Point - point - ``latitude``, ``longitude`` `Args`: search_type: str The type of district search to perform. One of ``zip``, ``address`` or ``point``. address: str An uparsed full address zip5: str The USPS Zip5 code zip4: str The USPS Zip4 code state: str The two character state code latitude: float or str Valid latitude floating point lontitude: float or str Valid longitude floating point `Returns`: Parsons Table See :ref:`parsons-table` for output options. """ if search_type == 'zip' and None in [zip5, zip4]: raise ValueError("Search type 'zip' requires 'zip5' and 'zip4' arguments") elif search_type == 'point' and None in [latitude, longitude]: raise ValueError("Search type 'point' requires 'latitude' and 'longitude' arguments") elif search_type == 'address' and None in [address]: raise ValueError("Search type 'address' requires 'address' argument") elif search_type not in ['zip', 'point', 'address']: raise KeyError("Invalid 'search_type' provided. ") else: pass url = self.connection.uri + 'service/district' args = {'search_type': search_type, 'address': address, 'zip5': zip5, 'zip4': zip4, 'state': state, 'latitude': latitude, 'longitude': longitude } return Table([self.connection.request(url, args=args, raw=True)['match_data']]) class Voter(object): def __init__(self, connection): self.connection = connection def voter_registration_check(self, first_name=None, last_name=None, state=None, street_number=None, street_name=None, city=None, zip_code=None, age=None, dob=None, phone=None, email=None, unparsed_full_address=None): """ Searches for a registered individual, returns matches. A search must include the at minimum first name, last name and state. `Args:` first_name: str Required; One or more alpha characters. Trailing wildcard allowed last_name: str Required; One or more alpha characters. Trailing wildcard allowed state: str Required; Two character state code (e.g. ``NY``) street_number: str Optional; One or more alpha characters. Trailing wildcard allowed street_name: str Optional; One or more alpha characters. Trailing wildcard allowed city: str Optional; The person's home city zip_code: str Optional; Numeric characters. Trailing wildcard allowed age; int Optional; One or more integers. Trailing wildcard allowed dob; str Numeric characters in YYYYMMDD format. Trailing wildcard allowed phone; str Integer followed by 0 or more * or integers email: str Alphanumeric character followed by 0 or more * or legal characters (alphanumeric, @, -, .) unparsed_full_address: str One or more alphanumeric characters. No wildcards. `Returns` Parsons Table See :ref:`parsons-table` for output options. """ url = self.connection.uri + 'voter/voter-registration-check' if None in [first_name, last_name, state]: raise ValueError("""Function must include at least first_name, last_name, and state.""") args = {'first_name': first_name, 'last_name': last_name, 'state': state, 'street_number': street_number, 'street_name': street_name, 'city': city, 'zip_code': zip_code, 'age': age, 'dob': dob, 'phone': phone, 'email': email, 'unparsed_full_address': unparsed_full_address } return self.connection.request(url, args=args, raw=True) class TargetSmartAPI(Voter, Person, Service): def __init__(self, api_key=None): self.connection = TargetSmartConnector(api_key=api_key)
the-stack_0_14285
import datetime import glob import gzip import json import os import pickle import random import time from hashlib import sha256 import cv2 import numpy as np from tqdm import tqdm from pcs.augmentations import PCSDefaultAugmentor from pcs.utils import ( convert_min_area_rect, grayscale_to_float, grayscale_to_uint ) class CocoExporter: def __init__(self, output_dir="", dataset_name=""): self.output_dir = output_dir self.dataset_name = dataset_name if not os.path.isdir(self.output_dir): os.mkdir(self.output_dir) self.dataset_dir = os.path.join(self.output_dir, self.dataset_name) if not os.path.isdir(self.dataset_dir): os.mkdir(self.dataset_dir) self.number_images = 0 self.number_annotations = 0 date = str(datetime.date.today()) self.coco_dataset = { "info": { "description": "Protein crystals in suspension (PCS) dataset for automated crystal detection", "url": "", "version": "1.0", "year": 2021, "contributor": "Daniel Bischoff, Sebastian Franz", "date_created": date, }, "licenses": [ { "url": "https://opensource.org/licenses/MIT", "id": 1, "name": "MIT License", }, ], "categories": [ {"supercategory": "Crystal", "id": 1, "name": "Crystal"}, ], "images": [], "annotations": [], } self.images_template = { "license": 1, "file_name": "", "width": -1, "height": -1, "date_captured": date, "id": 0, } self.annotations_template = { "segmentation": [ [] ], "area": 0, "iscrowd": 0, "image_id": 0, "bbox": [0, 0, 0, 0], "category_id": 1, "id": 0, } def add_image(self, image_path, height, width): self.number_images += 1 image_id = self.number_images image_name = f"{str(image_id).zfill(10)}" _, ext = os.path.splitext(image_path) image_dict = self.images_template.copy() image_dict["file_name"] = image_name + ext image_dict["width"] = width image_dict["height"] = height image_dict["id"] = image_id self.coco_dataset["images"].append(image_dict) return image_dict def add_annotation(self, image_id=1, segmentation=None, bbox=None, area=0): self.number_annotations += 1 annotation_id = self.number_annotations if segmentation is None: segmentation = [[]] if bbox is None: bbox = [] # Annotation annotation_dict = self.annotations_template.copy() annotation_dict["segmentation"] = segmentation annotation_dict["bbox"] = bbox annotation_dict["image_id"] = image_id annotation_dict["id"] = annotation_id annotation_dict["area"] = area self.coco_dataset["annotations"].append(annotation_dict) return annotation_id def write(self): dataset_annotations_file = os.path.join( self.output_dir, self.dataset_name + ".json" ) with open(dataset_annotations_file, "w") as f: json.dump(self.coco_dataset, f, indent=None) class Indexer: def __init__(self, root_dirs, labels={"train": 80, "validation": 20}): if isinstance(root_dirs, str): root_dirs = [root_dirs] for root_dir in root_dirs: assert os.path.isdir(root_dir), f"Not a directory: {root_dir}" self.root_dirs = root_dirs sum_weights = sum(labels.values()) self.labels = { label: weight / sum_weights for label, weight in labels.items() } assert sum(self.labels.values()) == 1 def _index_iopairs(self, reindex): iopairs = {} for root_dir in self.root_dirs: glob_str = os.path.join(root_dir, "**", "*.png") inputs = glob.glob(glob_str, recursive=True) for image_file in tqdm( inputs, desc=f"Indexing io pairs from directory {root_dir}", total=len(inputs) ): index_file = image_file + ".idx.json" annotation_file = image_file + ".json" if not reindex and os.path.exists(index_file): with open(index_file, "r") as f: d_iopair = json.load(f) else: d_iopair = {} d_iopair["valid"] = os.path.exists(annotation_file) image_data = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE) d_iopair["height"], d_iopair["width"] = image_data.shape d_iopair["key"] = sha256(image_data.data.tobytes()).hexdigest() if d_iopair["key"] in iopairs: d_iopair["valid"] = False print(f"warning: invalidating {image_file} (duplicate)") # validate annotations try: with open(annotation_file, "r") as f: annotations = json.load(f) except json.JSONDecodeError: d_iopair["valid"] = False print(f"warning: invalidating {image_file} (JSON decode error)") if not "segmentation" in annotations: d_iopair["valid"] = False print(f"warning: invalidating {image_file} (missing segmentation field)") # shape check arr = np.array(annotations["segmentation"]) if len(arr.shape) < 3: d_iopair["valid"] = False print(f"warning: invalidating {image_file} (wrong segmentation shape)") if arr.shape[2] != 3: d_iopair["valid"] = False print(f"warning: invalidating {image_file} (wrong segmentation shape)") # coordinated check y_min = arr[:, :, 1].min() y_max = arr[:, :, 1].max() if y_min < 0 or y_max >= d_iopair["height"]: d_iopair["valid"] = False print(f"warning: invalidating {image_file} (coordinate out of image bounds)") x_min = arr[:, :, 1].min() x_max = arr[:, :, 1].max() if x_min < 0 or x_max >= d_iopair["width"]: d_iopair["valid"] = False print(f"warning: invalidating {image_file} (coordinate out of image bounds)") d_iopair["label"] = "" with open(index_file, "w") as f: json.dump(d_iopair, f) iopairs[d_iopair["key"]] = ( image_file, annotation_file, index_file, d_iopair["height"], d_iopair["width"], d_iopair["label"], d_iopair["valid"] ) return iopairs def _load_only(self): iopairs = {} for root_dir in self.root_dirs: glob_str = os.path.join(root_dir, "**", "*.png") inputs = glob.glob(glob_str, recursive=True) for image_file in inputs: index_file = image_file + ".idx.json" annotation_file = image_file + ".json" with open(index_file, "r") as f: d_iopair = json.load(f) iopairs[d_iopair["key"]] = ( image_file, annotation_file, index_file, d_iopair["height"], d_iopair["width"], d_iopair["label"], d_iopair["valid"] ) return iopairs def _resample_iopairs(self, iopairs): keys = list(iopairs.keys()) random.shuffle(keys) offset = 0 for label, fraction in self.labels.items(): size = int(round(fraction * len(iopairs))) label_keys = keys[offset:offset+size] offset += size for key in label_keys: _, _, index_file, height, width, _, valid = iopairs[key] d_iopair = { "key": key, "height": height, "width": width, "label": label, "valid": valid } with open(index_file, "w") as f: json.dump(d_iopair, f) def load_iopairs(self, reindex=False): iopairs = self._index_iopairs(reindex) filtered_iopairs = {key: iopair for key, iopair in iopairs.items() if iopair[6]} self._resample_iopairs(filtered_iopairs) updated_iopairs = self._load_only() label_count = {label: 0 for label, _ in self.labels.items()} for _, iopair in updated_iopairs.items(): label_count[iopair[5]] += 1 print("after indexing:") for root_dir in self.root_dirs: print(f"\t{root_dir}") for label, count in label_count.items(): print(f"\t{label}: {count} ({round(100 * self.labels[label], 2)}%)") return updated_iopairs def to_coco(self, output_dir, iopairs=None, box_mode="xywha", reindex=False, flip_y=True, digits=2): assert box_mode in ("xywha", "coco") exporters = { label: CocoExporter( output_dir=output_dir, dataset_name=f"pcs_{label}") for label, _ in self.labels.items() } label_dirs = { label: os.path.join(output_dir, f"pcs_{label}") for label, _ in self.labels.items() } if iopairs is None: valid_labeled_iopairs = self.load_iopairs(reindex=reindex) else: valid_labeled_iopairs = iopairs for _, iopair in tqdm( valid_labeled_iopairs.items(), desc=f"Exporting dataset to coco format", total=len(valid_labeled_iopairs), ): image_file, annotation_file, _, height, width, label, _ = iopair exporter = exporters[label] # Adding image to dataset while ensuring that only grayscale images are stored image_dict = exporter.add_image(image_file, height, width) image_store_path = os.path.join(label_dirs[label], image_dict["file_name"]) image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE) cv2.imwrite(image_store_path, image) # Adding annotations to image with open(annotation_file, "r") as f: annotations = json.load(f) segmentations = annotations["segmentation"] for verts in segmentations: # x, y, z device coordinates scaled and shifted to fit image dimesions of every crystal vertex verts = np.array(verts) assert verts.shape[-1] == 3 # depth information is currently not used. # the array is copied during np.delete which prevents a CV2 error. verts = np.delete(verts, 2, axis=1) if flip_y: verts[:, 1] = (image_dict["height"] - 1) - verts[:, 1] # let CV2 figure out the correct ordering of the vertices hull = cv2.convexHull(np.float32(verts)) # rounding to make the resulting JSON files smaller. area = round(cv2.contourArea(hull), digits) segmentation = [ [round(v, digits) for v in hull.flatten().tolist()] ] if box_mode == "coco": x0 = verts[:, 0].min() y0 = verts[:, 1].min() w = verts[:, 0].max() - x0 h = verts[:, 1].max() - y0 bbox = [round(v, digits) for v in [x0, y0, w, h]] elif box_mode == "xywha": min_area_rect = cv2.minAreaRect(hull) bbox = convert_min_area_rect(min_area_rect) bbox = [round(v, digits) for v in bbox] exporter.add_annotation( image_id=image_dict["id"], segmentation=segmentation, bbox=bbox, area=area ) for _, exporter in exporters.items(): exporter.write() class PCSDataset: def __init__(self, dataset_file, image_dir=None, use_augmentations=False, intermediates=False): assert os.path.exists(dataset_file) self.dataset_file = dataset_file assert self.dataset_file.lower().endswith(".json") or self.dataset_file.lower().endswith(".gzip") self.compressed = True if self.dataset_file.lower().endswith(".gzip") else False if not image_dir: dirname = os.path.dirname(self.dataset_file) basename = os.path.basename(self.dataset_file) name, _ = os.path.splitext(basename) if self.compressed and name.endswith(".json"): # support for .json.gzip name = name[:-len(".json")] image_dir = os.path.join(dirname, name) assert os.path.isdir(image_dir), f"Image directory not found: {image_dir}" self.image_dir = image_dir self.stats_file = self.image_dir + "_stats.json" self.aug_stats_file = self.image_dir + "_aug_stats.json" if self.compressed: print("Reading compressed PCSCocoDataset:", dataset_file, "...") start = time.time() with gzip.open(dataset_file, "r") as file: self.data = json.loads(file.read().decode('utf-8')) end = time.time() print("finished reading in", f"{round(end-start, 3)} seconds") else: print("Reading PCSCocoDataset:", dataset_file, "...") start = time.time() with open(dataset_file, "r") as file: self.data = json.load(file) end = time.time() print("finished reading in", f"{round(end-start, 3)} seconds") self.image_annotations = dict() for annotation in self.data["annotations"]: image_id = annotation["image_id"] if not image_id in self.image_annotations: self.image_annotations[image_id] = list() self.image_annotations[image_id].append(annotation) self.augmentations_active = use_augmentations self.intermediates = intermediates def use_augmentations(self, flag=True, intermediates=False): self.augmentations_active = flag self.intermediates = intermediates def write_statistics(self, num_images=20000, digits=2): dataset_statistics = { "pixel_mean": -1, "pixel_std": -1, "num_images": -1, "num_annotations": -1, "augmentations_active": self.augmentations_active, "images": { "image_id": [], "height": [], "width": [], "instances_mean_area": [], "instances_mean_ratio": [], "annotation_ids": [] }, "annotations": { "image_id": [], "annotation_id": [], "x": [], "y": [], "width": [], "height": [], "angle": [], "area": [], "ratio": [] } } dataset_statistics["num_images"] = len(self) num_annotations = 0 image_stats_num_images = min(len(self), num_images) image_stats_indices = set(random.sample(range(image_stats_num_images), image_stats_num_images)) image_flat_store = [] image_stats = dataset_statistics["images"] annotation_stats = dataset_statistics["annotations"] def rndf(x): return round(float(x), digits) for index, img_data in enumerate(tqdm( self, total=len(self), desc="calculate image stats" )): if self.augmentations_active: image = img_data["aug_img"] else: image = img_data["img"] if index in image_stats_indices: image_flat_store.append(image.flatten().astype(np.float64)) image_stats["image_id"].append(img_data["meta"]["img_dict"]["id"]) image_shape = image.shape image_stats["height"].append(image_shape[0]) image_stats["width"].append(image_shape[1]) image_instance_areas = [] image_instance_ratios = [] image_stats["annotation_ids"] = img_data["anno_ids"] if self.augmentations_active: segms = img_data["aug_segms"] bboxs = img_data["aug_rbboxs"] else: segms = img_data["segms"] bboxs = img_data["rbboxs"] for segmentation, rotated_box in zip(segms, bboxs): num_annotations += 1 annotation_stats["image_id"].append( img_data["meta"]["img_dict"]["id"] ) x_ctr, y_ctr, width, height, angle = rotated_box annotation_stats["x"].append(rndf(x_ctr)) annotation_stats["y"].append(rndf(y_ctr)) annotation_stats["width"].append(rndf(width)) annotation_stats["height"].append(rndf(height)) annotation_stats["angle"].append(rndf(angle)) ratio = width / (height + 1e-4) image_instance_ratios.append(rndf(ratio)) annotation_stats["ratio"].append(rndf(ratio)) area = cv2.contourArea(np.float32(segmentation)) image_instance_areas.append(rndf(area)) annotation_stats["area"].append(rndf(area)) image_stats["instances_mean_area"].append( rndf(np.mean(image_instance_areas)) ) image_stats["instances_mean_ratio"].append( rndf(np.mean(image_instance_ratios)) ) image_flat_store = np.concatenate(image_flat_store) dataset_statistics["pixel_mean"] = rndf(np.mean(image_flat_store)) dataset_statistics["pixel_std"] = rndf(np.std(image_flat_store)) dataset_statistics["num_annotations"] = num_annotations output_file = self.aug_stats_file if self.augmentations_active else self.stats_file with open(output_file, "w") as f: json.dump(dataset_statistics, f) def write_augmented_dataset(self, output_dataset, digits=2): dirname = os.path.dirname(self.dataset_file) coco_exporter = CocoExporter( output_dir=dirname, dataset_name=output_dataset ) self.use_augmentations() def rndf(x): return round(float(x), digits) for img_data in tqdm( self, total=len(self), desc="augmenting dataset" ): img_path = img_data["meta"]["img_path"] height, width = img_data["img"].shape image_dict = coco_exporter.add_image( img_path, int(height), int(width) ) cv2.imwrite( os.path.join( dirname, output_dataset, img_data["meta"]["img_dict"]["file_name"] ), grayscale_to_uint(img_data["aug_img"]) ) image_id = image_dict["id"] for segmentation, rotated_box in zip(img_data["aug_segms"], img_data["aug_rbboxs"]): area = rndf(cv2.contourArea(np.float32(segmentation))) segmentation = segmentation.flatten().tolist() segmentation = [rndf(v) for v in segmentation] if isinstance(rotated_box, np.ndarray): rotated_box = rotated_box.flatten().tolist() rotated_box = [rndf(v) for v in rotated_box] coco_exporter.add_annotation( image_id=image_id, segmentation=[segmentation], bbox=rotated_box, area=area ) coco_exporter.write() def write_trimmed_dataset(self, output_dataset, digits=2, num=20, augmented=False): dirname = os.path.dirname(self.dataset_file) coco_exporter = CocoExporter( output_dir=dirname, dataset_name=output_dataset ) if augmented: self.use_augmentations() def rndf(x): return round(float(x), digits) for idx, img_data in enumerate(tqdm( self, total=num, desc="trimming dataset" )): if idx == num: break img_path = img_data["meta"]["img_path"] height, width = img_data["img"].shape image_dict = coco_exporter.add_image( img_path, int(height), int(width) ) cv2.imwrite( os.path.join( dirname, output_dataset, img_data["meta"]["img_dict"]["file_name"] ), grayscale_to_uint(img_data["aug_img" if augmented else "img"]) ) image_id = image_dict["id"] for segmentation, rotated_box in zip(img_data["aug_segms" if augmented else "segms"], img_data["aug_rbboxs" if augmented else "rbboxs"]): area = rndf(cv2.contourArea(np.float32(segmentation))) segmentation = segmentation.flatten().tolist() segmentation = [rndf(v) for v in segmentation] if isinstance(rotated_box, np.ndarray): rotated_box = rotated_box.flatten().tolist() rotated_box = [rndf(v) for v in rotated_box] coco_exporter.add_annotation( image_id=image_id, segmentation=[segmentation], bbox=rotated_box, area=area ) coco_exporter.write() def write_pickled_dataset(self, output_dataset): dirname = os.path.dirname(self.dataset_file) outpath = os.path.join(dirname, output_dataset) assert outpath.endswith(".pkl") data = [] for idx, img_data in enumerate(tqdm( self, total=len(self), desc="writing segmented dataset" )): _, _, _, segmentations, rotated_boxes = img_data["meta"]["img_dict"], img_data["img"], None, img_data["segms"], img_data["rbboxs"] segmentations=[np.float32(segm) for segm in segmentations], rotated_boxes=np.float32(rotated_boxes) data.append((segmentations, rotated_boxes)) with open(outpath, "wb") as f: pickle.dump(data, f) def load_aug_stats(self): with open(self.aug_stats_file, "r") as f: aug_stats = json.load(f) return aug_stats def load_stats(self): with open(self.stats_file, "r") as f: aug_stats = json.load(f) return aug_stats @staticmethod def get_segmentations(image_annotations): return [ np.array(annotation["segmentation"], dtype=np.float32).flatten().reshape(-1, 2) for annotation in image_annotations ] @staticmethod def get_rotated_boxes(image_annotations, segmentations): # use bbox field if angle information is present, otherwise infer from segmentations assert len(image_annotations) > 0 has_angle = len(image_annotations[0]["bbox"]) == 5 if has_angle: return [ np.array(annotation["bbox"], dtype=np.float32).flatten() for annotation in image_annotations ] else: min_area_rects = [ cv2.minAreaRect(segmentation) for segmentation in segmentations ] return [ np.array(convert_min_area_rect(min_area_rect), dtype=np.float32) for min_area_rect in min_area_rects ] @staticmethod def get_annotation_ids(image_annotations): return [annotation["id"] for annotation in image_annotations] def get_meta(self, idx): image_dict = self.data["images"][idx] image_annotations = self.image_annotations[image_dict["id"]] image_path = os.path.join(self.image_dir, image_dict["file_name"]) assert os.path.exists(image_path) return dict( img_dict=image_dict, img_path=image_path, img_annos=image_annotations ) def __getitem__(self, idx): meta = self.get_meta(idx) image = grayscale_to_float( cv2.imread(meta["img_path"], cv2.IMREAD_GRAYSCALE) ) segmentations = PCSDataset.get_segmentations(meta["img_annos"]) rotated_boxes = PCSDataset.get_rotated_boxes(meta["img_annos"], segmentations) annotation_ids = PCSDataset.get_annotation_ids(meta["img_annos"]) return dict( meta=meta, img=image, anno_ids=annotation_ids, segms=segmentations, rbboxs=rotated_boxes ) def __iter__(self): if self.augmentations_active: return PCSDatasetAugmentedIterator(self) else: return PCSDatasetIterator(self) def __len__(self): return len(self.data["images"]) class PCSDatasetIterator: def __init__(self, pcs_coco_dataset): self.dataset = pcs_coco_dataset self.num_images = len(self.dataset.data["images"]) self.index = 0 def __next__(self): if self.index < self.num_images: img_data = self.dataset[self.index] self.index += 1 return img_data else: raise StopIteration class PCSDatasetAugmentedIterator: def __init__(self, dataset): self.dataset = dataset self.dataset_iter = PCSDatasetIterator( self.dataset ) self.augmentor = PCSDefaultAugmentor() def __next__(self): img_data = next( self.dataset_iter ) aug_result = self.augmentor( img_data["img"].copy(), [x.copy() for x in img_data["segms"]], [x.copy() for x in img_data["rbboxs"]] ) img_data.update(aug_result) return img_data def __iter__(self): return self
the-stack_0_14286
""" Copyright 2020, The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of The Regents of the University of California. """ import logging import os import sys import scapy.utils from scapy.layers.l2 import Ether from scapy.layers.inet import IP, UDP import cocotb_test.simulator import cocotb from cocotb.log import SimLog from cocotb.clock import Clock from cocotb.triggers import RisingEdge, FallingEdge, Timer from cocotbext.axi import AxiStreamBus from cocotbext.eth import XgmiiSource, XgmiiSink from cocotbext.pcie.core import RootComplex from cocotbext.pcie.xilinx.us import UltraScalePlusPcieDevice try: import mqnic except ImportError: # attempt import from current directory sys.path.insert(0, os.path.join(os.path.dirname(__file__))) try: import mqnic finally: del sys.path[0] class TB(object): def __init__(self, dut): self.dut = dut self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE")) self.log = SimLog("cocotb.tb") self.log.setLevel(logging.DEBUG) # PCIe self.rc = RootComplex() self.rc.max_payload_size = 0x1 # 256 bytes self.rc.max_read_request_size = 0x2 # 512 bytes self.dev = UltraScalePlusPcieDevice( # configuration options pcie_generation=3, pcie_link_width=16, user_clk_frequency=250e6, alignment="dword", cq_cc_straddle=False, rq_rc_straddle=False, rc_4tlp_straddle=False, enable_pf1=False, enable_client_tag=True, enable_extended_tag=True, enable_parity=False, enable_rx_msg_interface=False, enable_sriov=False, enable_extended_configuration=False, enable_pf0_msi=True, enable_pf1_msi=False, # signals # Clock and Reset Interface user_clk=dut.clk_250mhz, user_reset=dut.rst_250mhz, # user_lnk_up # sys_clk # sys_clk_gt # sys_reset # phy_rdy_out # Requester reQuest Interface rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"), pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0, pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0, pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1, pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1, # pcie_rq_tag0 # pcie_rq_tag1 # pcie_rq_tag_av # pcie_rq_tag_vld0 # pcie_rq_tag_vld1 # Requester Completion Interface rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"), # Completer reQuest Interface cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"), # pcie_cq_np_req # pcie_cq_np_req_count # Completer Completion Interface cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"), # Transmit Flow Control Interface # pcie_tfc_nph_av=dut.pcie_tfc_nph_av, # pcie_tfc_npd_av=dut.pcie_tfc_npd_av, # Configuration Management Interface cfg_mgmt_addr=dut.cfg_mgmt_addr, cfg_mgmt_function_number=dut.cfg_mgmt_function_number, cfg_mgmt_write=dut.cfg_mgmt_write, cfg_mgmt_write_data=dut.cfg_mgmt_write_data, cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable, cfg_mgmt_read=dut.cfg_mgmt_read, cfg_mgmt_read_data=dut.cfg_mgmt_read_data, cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done, # cfg_mgmt_debug_access # Configuration Status Interface # cfg_phy_link_down # cfg_phy_link_status # cfg_negotiated_width # cfg_current_speed cfg_max_payload=dut.cfg_max_payload, cfg_max_read_req=dut.cfg_max_read_req, # cfg_function_status # cfg_vf_status # cfg_function_power_state # cfg_vf_power_state # cfg_link_power_state # cfg_err_cor_out # cfg_err_nonfatal_out # cfg_err_fatal_out # cfg_local_error_out # cfg_local_error_valid # cfg_rx_pm_state # cfg_tx_pm_state # cfg_ltssm_state # cfg_rcb_status # cfg_obff_enable # cfg_pl_status_change # cfg_tph_requester_enable # cfg_tph_st_mode # cfg_vf_tph_requester_enable # cfg_vf_tph_st_mode # Configuration Received Message Interface # cfg_msg_received # cfg_msg_received_data # cfg_msg_received_type # Configuration Transmit Message Interface # cfg_msg_transmit # cfg_msg_transmit_type # cfg_msg_transmit_data # cfg_msg_transmit_done # Configuration Flow Control Interface cfg_fc_ph=dut.cfg_fc_ph, cfg_fc_pd=dut.cfg_fc_pd, cfg_fc_nph=dut.cfg_fc_nph, cfg_fc_npd=dut.cfg_fc_npd, cfg_fc_cplh=dut.cfg_fc_cplh, cfg_fc_cpld=dut.cfg_fc_cpld, cfg_fc_sel=dut.cfg_fc_sel, # Configuration Control Interface # cfg_hot_reset_in # cfg_hot_reset_out # cfg_config_space_enable # cfg_dsn # cfg_bus_number # cfg_ds_port_number # cfg_ds_bus_number # cfg_ds_device_number # cfg_ds_function_number # cfg_power_state_change_ack # cfg_power_state_change_interrupt cfg_err_cor_in=dut.status_error_cor, cfg_err_uncor_in=dut.status_error_uncor, # cfg_flr_in_process # cfg_flr_done # cfg_vf_flr_in_process # cfg_vf_flr_func_num # cfg_vf_flr_done # cfg_pm_aspm_l1_entry_reject # cfg_pm_aspm_tx_l0s_entry_disable # cfg_req_pm_transition_l23_ready # cfg_link_training_enable # Configuration Interrupt Controller Interface # cfg_interrupt_int # cfg_interrupt_sent # cfg_interrupt_pending cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable, cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable, cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update, cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data, # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select, cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int, cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status, cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable, # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num, cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent, cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail, # cfg_interrupt_msix_enable # cfg_interrupt_msix_mask # cfg_interrupt_msix_vf_enable # cfg_interrupt_msix_vf_mask # cfg_interrupt_msix_address # cfg_interrupt_msix_data # cfg_interrupt_msix_int # cfg_interrupt_msix_vec_pending # cfg_interrupt_msix_vec_pending_status cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr, cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present, cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type, # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag, # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number, # Configuration Extend Interface # cfg_ext_read_received # cfg_ext_write_received # cfg_ext_register_number # cfg_ext_function_number # cfg_ext_write_data # cfg_ext_write_byte_enable # cfg_ext_read_data # cfg_ext_read_data_valid ) # self.dev.log.setLevel(logging.DEBUG) self.rc.make_port().connect(self.dev) self.driver = mqnic.Driver(self.rc) self.dev.functions[0].msi_multiple_message_capable = 5 self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True) # Ethernet cocotb.fork(Clock(dut.qsfp_0_rx_clk_0, 6.4, units="ns").start()) self.qsfp_0_0_source = XgmiiSource(dut.qsfp_0_rxd_0, dut.qsfp_0_rxc_0, dut.qsfp_0_rx_clk_0, dut.qsfp_0_rx_rst_0) cocotb.fork(Clock(dut.qsfp_0_tx_clk_0, 6.4, units="ns").start()) self.qsfp_0_0_sink = XgmiiSink(dut.qsfp_0_txd_0, dut.qsfp_0_txc_0, dut.qsfp_0_tx_clk_0, dut.qsfp_0_tx_rst_0) cocotb.fork(Clock(dut.qsfp_0_rx_clk_1, 6.4, units="ns").start()) self.qsfp_0_1_source = XgmiiSource(dut.qsfp_0_rxd_1, dut.qsfp_0_rxc_1, dut.qsfp_0_rx_clk_1, dut.qsfp_0_rx_rst_1) cocotb.fork(Clock(dut.qsfp_0_tx_clk_1, 6.4, units="ns").start()) self.qsfp_0_1_sink = XgmiiSink(dut.qsfp_0_txd_1, dut.qsfp_0_txc_1, dut.qsfp_0_tx_clk_1, dut.qsfp_0_tx_rst_1) cocotb.fork(Clock(dut.qsfp_0_rx_clk_2, 6.4, units="ns").start()) self.qsfp_0_2_source = XgmiiSource(dut.qsfp_0_rxd_2, dut.qsfp_0_rxc_2, dut.qsfp_0_rx_clk_2, dut.qsfp_0_rx_rst_2) cocotb.fork(Clock(dut.qsfp_0_tx_clk_2, 6.4, units="ns").start()) self.qsfp_0_2_sink = XgmiiSink(dut.qsfp_0_txd_2, dut.qsfp_0_txc_2, dut.qsfp_0_tx_clk_2, dut.qsfp_0_tx_rst_2) cocotb.fork(Clock(dut.qsfp_0_rx_clk_3, 6.4, units="ns").start()) self.qsfp_0_3_source = XgmiiSource(dut.qsfp_0_rxd_3, dut.qsfp_0_rxc_3, dut.qsfp_0_rx_clk_3, dut.qsfp_0_rx_rst_3) cocotb.fork(Clock(dut.qsfp_0_tx_clk_3, 6.4, units="ns").start()) self.qsfp_0_3_sink = XgmiiSink(dut.qsfp_0_txd_3, dut.qsfp_0_txc_3, dut.qsfp_0_tx_clk_3, dut.qsfp_0_tx_rst_3) cocotb.fork(Clock(dut.qsfp_1_rx_clk_0, 6.4, units="ns").start()) self.qsfp_1_0_source = XgmiiSource(dut.qsfp_1_rxd_0, dut.qsfp_1_rxc_0, dut.qsfp_1_rx_clk_0, dut.qsfp_1_rx_rst_0) cocotb.fork(Clock(dut.qsfp_1_tx_clk_0, 6.4, units="ns").start()) self.qsfp_1_0_sink = XgmiiSink(dut.qsfp_1_txd_0, dut.qsfp_1_txc_0, dut.qsfp_1_tx_clk_0, dut.qsfp_1_tx_rst_0) cocotb.fork(Clock(dut.qsfp_1_rx_clk_1, 6.4, units="ns").start()) self.qsfp_1_1_source = XgmiiSource(dut.qsfp_1_rxd_1, dut.qsfp_1_rxc_1, dut.qsfp_1_rx_clk_1, dut.qsfp_1_rx_rst_1) cocotb.fork(Clock(dut.qsfp_1_tx_clk_1, 6.4, units="ns").start()) self.qsfp_1_1_sink = XgmiiSink(dut.qsfp_1_txd_1, dut.qsfp_1_txc_1, dut.qsfp_1_tx_clk_1, dut.qsfp_1_tx_rst_1) cocotb.fork(Clock(dut.qsfp_1_rx_clk_2, 6.4, units="ns").start()) self.qsfp_1_2_source = XgmiiSource(dut.qsfp_1_rxd_2, dut.qsfp_1_rxc_2, dut.qsfp_1_rx_clk_2, dut.qsfp_1_rx_rst_2) cocotb.fork(Clock(dut.qsfp_1_tx_clk_2, 6.4, units="ns").start()) self.qsfp_1_2_sink = XgmiiSink(dut.qsfp_1_txd_2, dut.qsfp_1_txc_2, dut.qsfp_1_tx_clk_2, dut.qsfp_1_tx_rst_2) cocotb.fork(Clock(dut.qsfp_1_rx_clk_3, 6.4, units="ns").start()) self.qsfp_1_3_source = XgmiiSource(dut.qsfp_1_rxd_3, dut.qsfp_1_rxc_3, dut.qsfp_1_rx_clk_3, dut.qsfp_1_rx_rst_3) cocotb.fork(Clock(dut.qsfp_1_tx_clk_3, 6.4, units="ns").start()) self.qsfp_1_3_sink = XgmiiSink(dut.qsfp_1_txd_3, dut.qsfp_1_txc_3, dut.qsfp_1_tx_clk_3, dut.qsfp_1_tx_rst_3) dut.qsfp_0_i2c_scl_i.setimmediatevalue(1) dut.qsfp_0_i2c_sda_i.setimmediatevalue(1) dut.qsfp_0_intr_n.setimmediatevalue(1) dut.qsfp_0_mod_prsnt_n.setimmediatevalue(0) dut.qsfp_0_rx_error_count_0.setimmediatevalue(0) dut.qsfp_0_rx_error_count_1.setimmediatevalue(0) dut.qsfp_0_rx_error_count_2.setimmediatevalue(0) dut.qsfp_0_rx_error_count_3.setimmediatevalue(0) dut.qsfp_1_i2c_scl_i.setimmediatevalue(1) dut.qsfp_1_i2c_sda_i.setimmediatevalue(1) dut.qsfp_1_intr_n.setimmediatevalue(1) dut.qsfp_1_mod_prsnt_n.setimmediatevalue(0) dut.qsfp_1_rx_error_count_0.setimmediatevalue(0) dut.qsfp_1_rx_error_count_1.setimmediatevalue(0) dut.qsfp_1_rx_error_count_2.setimmediatevalue(0) dut.qsfp_1_rx_error_count_3.setimmediatevalue(0) dut.qspi_dq_i.setimmediatevalue(0) dut.pps_in.setimmediatevalue(0) dut.bmc_miso.setimmediatevalue(0) dut.bmc_int.setimmediatevalue(0) self.loopback_enable = False cocotb.fork(self._run_loopback()) async def init(self): self.dut.qsfp_0_rx_rst_0.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_0.setimmediatevalue(0) self.dut.qsfp_0_rx_rst_1.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_1.setimmediatevalue(0) self.dut.qsfp_0_rx_rst_2.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_2.setimmediatevalue(0) self.dut.qsfp_0_rx_rst_3.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_3.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_0.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_0.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_1.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_1.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_2.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_2.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_3.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_3.setimmediatevalue(0) await RisingEdge(self.dut.clk_250mhz) await RisingEdge(self.dut.clk_250mhz) self.dut.qsfp_0_rx_rst_0.setimmediatevalue(1) self.dut.qsfp_0_tx_rst_0.setimmediatevalue(1) self.dut.qsfp_0_rx_rst_1.setimmediatevalue(1) self.dut.qsfp_0_tx_rst_1.setimmediatevalue(1) self.dut.qsfp_0_rx_rst_2.setimmediatevalue(1) self.dut.qsfp_0_tx_rst_2.setimmediatevalue(1) self.dut.qsfp_0_rx_rst_3.setimmediatevalue(1) self.dut.qsfp_0_tx_rst_3.setimmediatevalue(1) self.dut.qsfp_1_rx_rst_0.setimmediatevalue(1) self.dut.qsfp_1_tx_rst_0.setimmediatevalue(1) self.dut.qsfp_1_rx_rst_1.setimmediatevalue(1) self.dut.qsfp_1_tx_rst_1.setimmediatevalue(1) self.dut.qsfp_1_rx_rst_2.setimmediatevalue(1) self.dut.qsfp_1_tx_rst_2.setimmediatevalue(1) self.dut.qsfp_1_rx_rst_3.setimmediatevalue(1) self.dut.qsfp_1_tx_rst_3.setimmediatevalue(1) await FallingEdge(self.dut.rst_250mhz) await Timer(100, 'ns') await RisingEdge(self.dut.clk_250mhz) await RisingEdge(self.dut.clk_250mhz) self.dut.qsfp_0_rx_rst_0.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_0.setimmediatevalue(0) self.dut.qsfp_0_rx_rst_1.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_1.setimmediatevalue(0) self.dut.qsfp_0_rx_rst_2.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_2.setimmediatevalue(0) self.dut.qsfp_0_rx_rst_3.setimmediatevalue(0) self.dut.qsfp_0_tx_rst_3.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_0.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_0.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_1.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_1.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_2.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_2.setimmediatevalue(0) self.dut.qsfp_1_rx_rst_3.setimmediatevalue(0) self.dut.qsfp_1_tx_rst_3.setimmediatevalue(0) await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True) async def _run_loopback(self): while True: await RisingEdge(self.dut.clk_250mhz) if self.loopback_enable: if not self.qsfp_0_0_sink.empty(): await self.qsfp_0_0_source.send(await self.qsfp_0_0_sink.recv()) if not self.qsfp_0_1_sink.empty(): await self.qsfp_0_1_source.send(await self.qsfp_0_1_sink.recv()) if not self.qsfp_0_2_sink.empty(): await self.qsfp_0_2_source.send(await self.qsfp_0_2_sink.recv()) if not self.qsfp_0_3_sink.empty(): await self.qsfp_0_3_source.send(await self.qsfp_0_3_sink.recv()) if not self.qsfp_1_0_sink.empty(): await self.qsfp_1_0_source.send(await self.qsfp_1_0_sink.recv()) if not self.qsfp_1_1_sink.empty(): await self.qsfp_1_1_source.send(await self.qsfp_1_1_sink.recv()) if not self.qsfp_1_2_sink.empty(): await self.qsfp_1_2_source.send(await self.qsfp_1_2_sink.recv()) if not self.qsfp_1_3_sink.empty(): await self.qsfp_1_3_source.send(await self.qsfp_1_3_sink.recv()) @cocotb.test() async def run_test_nic(dut): tb = TB(dut) await tb.init() tb.log.info("Init driver") await tb.driver.init_dev(tb.dev.functions[0].pcie_id) await tb.driver.interfaces[0].open() # await driver.interfaces[1].open() # enable queues tb.log.info("Enable queues") await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001) for k in range(tb.driver.interfaces[0].tx_queue_count): await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000003) # wait for all writes to complete await tb.rc.mem_read(tb.driver.hw_addr, 4) tb.log.info("Init complete") tb.log.info("Send and receive single packet") data = bytearray([x % 256 for x in range(1024)]) await tb.driver.interfaces[0].start_xmit(data, 0) pkt = await tb.qsfp_0_0_sink.recv() tb.log.info("Packet: %s", pkt) await tb.qsfp_0_0_source.send(pkt) pkt = await tb.driver.interfaces[0].recv() tb.log.info("Packet: %s", pkt) assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff # await tb.driver.interfaces[1].start_xmit(data, 0) # pkt = await tb.qsfp_1_0_sink.recv() # tb.log.info("Packet: %s", pkt) # await tb.qsfp_1_0_source.send(pkt) # pkt = await tb.driver.interfaces[1].recv() # tb.log.info("Packet: %s", pkt) # assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff tb.log.info("RX and TX checksum tests") payload = bytes([x % 256 for x in range(256)]) eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5') ip = IP(src='192.168.1.100', dst='192.168.1.101') udp = UDP(sport=1, dport=2) test_pkt = eth / ip / udp / payload test_pkt2 = test_pkt.copy() test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP])) await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6) pkt = await tb.qsfp_0_0_sink.recv() tb.log.info("Packet: %s", pkt) await tb.qsfp_0_0_source.send(pkt) pkt = await tb.driver.interfaces[0].recv() tb.log.info("Packet: %s", pkt) assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff assert Ether(pkt.data).build() == test_pkt.build() tb.log.info("Multiple small packets") count = 64 pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)] tb.loopback_enable = True for p in pkts: await tb.driver.interfaces[0].start_xmit(p, 0) for k in range(count): pkt = await tb.driver.interfaces[0].recv() tb.log.info("Packet: %s", pkt) assert pkt.data == pkts[k] assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff tb.loopback_enable = False tb.log.info("Multiple large packets") count = 64 pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)] tb.loopback_enable = True for p in pkts: await tb.driver.interfaces[0].start_xmit(p, 0) for k in range(count): pkt = await tb.driver.interfaces[0].recv() tb.log.info("Packet: %s", pkt) assert pkt.data == pkts[k] assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff tb.loopback_enable = False await RisingEdge(dut.clk_250mhz) await RisingEdge(dut.clk_250mhz) # cocotb-test tests_dir = os.path.dirname(__file__) rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl')) lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib')) axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl')) axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl')) eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl')) pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl')) def test_fpga_core(request): dut = "fpga_core" module = os.path.splitext(os.path.basename(__file__))[0] toplevel = dut verilog_sources = [ os.path.join(rtl_dir, f"{dut}.v"), os.path.join(rtl_dir, "bmc_spi.v"), os.path.join(rtl_dir, "common", "mqnic_interface.v"), os.path.join(rtl_dir, "common", "mqnic_port.v"), os.path.join(rtl_dir, "common", "cpl_write.v"), os.path.join(rtl_dir, "common", "cpl_op_mux.v"), os.path.join(rtl_dir, "common", "desc_fetch.v"), os.path.join(rtl_dir, "common", "desc_op_mux.v"), os.path.join(rtl_dir, "common", "queue_manager.v"), os.path.join(rtl_dir, "common", "cpl_queue_manager.v"), os.path.join(rtl_dir, "common", "tx_engine.v"), os.path.join(rtl_dir, "common", "rx_engine.v"), os.path.join(rtl_dir, "common", "tx_checksum.v"), os.path.join(rtl_dir, "common", "rx_hash.v"), os.path.join(rtl_dir, "common", "rx_checksum.v"), os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"), os.path.join(rtl_dir, "common", "event_mux.v"), os.path.join(rtl_dir, "common", "tdma_scheduler.v"), os.path.join(rtl_dir, "common", "tdma_ber.v"), os.path.join(rtl_dir, "common", "tdma_ber_ch.v"), os.path.join(eth_rtl_dir, "eth_mac_10g_fifo.v"), os.path.join(eth_rtl_dir, "eth_mac_10g.v"), os.path.join(eth_rtl_dir, "axis_xgmii_rx_64.v"), os.path.join(eth_rtl_dir, "axis_xgmii_tx_64.v"), os.path.join(eth_rtl_dir, "lfsr.v"), os.path.join(eth_rtl_dir, "ptp_clock.v"), os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"), os.path.join(eth_rtl_dir, "ptp_perout.v"), os.path.join(eth_rtl_dir, "ptp_ts_extract.v"), os.path.join(axi_rtl_dir, "axil_interconnect.v"), os.path.join(axi_rtl_dir, "arbiter.v"), os.path.join(axi_rtl_dir, "priority_encoder.v"), os.path.join(axis_rtl_dir, "axis_adapter.v"), os.path.join(axis_rtl_dir, "axis_arb_mux.v"), os.path.join(axis_rtl_dir, "axis_async_fifo.v"), os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"), os.path.join(axis_rtl_dir, "axis_fifo.v"), os.path.join(axis_rtl_dir, "axis_register.v"), os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"), os.path.join(pcie_rtl_dir, "dma_if_pcie_us.v"), os.path.join(pcie_rtl_dir, "dma_if_pcie_us_rd.v"), os.path.join(pcie_rtl_dir, "dma_if_pcie_us_wr.v"), os.path.join(pcie_rtl_dir, "dma_if_mux.v"), os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"), os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"), os.path.join(pcie_rtl_dir, "dma_psdpram.v"), os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"), os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"), os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"), os.path.join(pcie_rtl_dir, "pcie_us_msi.v"), os.path.join(pcie_rtl_dir, "pulse_merge.v"), ] parameters = {} parameters['AXIS_PCIE_DATA_WIDTH'] = 512 parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32 parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 62 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 137 parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 161 parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 88 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 183 parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33 if parameters['AXIS_PCIE_DATA_WIDTH'] < 512 else 81 parameters['RQ_SEQ_NUM_WIDTH'] = 6 parameters['BAR0_APERTURE'] = 24 extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()} sim_build = os.path.join(tests_dir, "sim_build", request.node.name.replace('[', '-').replace(']', '')) cocotb_test.simulator.run( python_search=[tests_dir], verilog_sources=verilog_sources, toplevel=toplevel, module=module, parameters=parameters, sim_build=sim_build, extra_env=extra_env, )
the-stack_0_14287
# -*- coding: utf-8 -*- # # ResNet152 model definition is based on TensorFlow Slim implementation. # # Author: Gencer Sumbul, http://www.user.tu-berlin.de/gencersumbul/ # Email: [email protected] # Date: 23 Dec 2019 # Version: 1.0.1 import tensorflow as tf from nets.resnet_utils import resnet_arg_scope from nets.resnet_v1 import resnet_v1_152 from models.main_model import Model class DNN_model(Model): def create_network(self): with tf.contrib.slim.arg_scope(resnet_arg_scope()): logits, end_points = resnet_v1_152( self.img, num_classes = self.nb_class, is_training = self.is_training, global_pool=True, spatial_squeeze=True ) self.logits = logits self.probabilities = tf.nn.sigmoid(self.logits) self.predictions = tf.cast(self.probabilities >= self.prediction_threshold, tf.float32)
the-stack_0_14293
# Copyright 2020 Tier IV, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from ament_index_python.packages import get_package_share_directory import launch from launch.actions import DeclareLaunchArgument from launch.actions import OpaqueFunction from launch.substitutions import LaunchConfiguration from launch_ros.actions import ComposableNodeContainer from launch_ros.descriptions import ComposableNode import yaml def launch_setup(context, *args, **kwargs): with open(LaunchConfiguration("cpu_monitor_config_file").perform(context), "r") as f: cpu_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"] cpu_monitor = ComposableNode( package="system_monitor", plugin="CPUMonitor", name="cpu_monitor", parameters=[ cpu_monitor_config, ], ) with open(LaunchConfiguration("hdd_monitor_config_file").perform(context), "r") as f: hdd_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"] hdd_monitor = ComposableNode( package="system_monitor", plugin="HDDMonitor", name="hdd_monitor", parameters=[ hdd_monitor_config, ], ) with open(LaunchConfiguration("mem_monitor_config_file").perform(context), "r") as f: mem_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"] mem_monitor = ComposableNode( package="system_monitor", plugin="MemMonitor", name="mem_monitor", parameters=[ mem_monitor_config, ], ) with open(LaunchConfiguration("net_monitor_config_file").perform(context), "r") as f: net_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"] net_monitor = ComposableNode( package="system_monitor", plugin="NetMonitor", name="net_monitor", parameters=[ net_monitor_config, ], ) with open(LaunchConfiguration("ntp_monitor_config_file").perform(context), "r") as f: ntp_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"] ntp_monitor = ComposableNode( package="system_monitor", plugin="NTPMonitor", name="ntp_monitor", parameters=[ ntp_monitor_config, ], ) with open(LaunchConfiguration("process_monitor_config_file").perform(context), "r") as f: process_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"] process_monitor = ComposableNode( package="system_monitor", plugin="ProcessMonitor", name="process_monitor", parameters=[ process_monitor_config, ], ) with open(LaunchConfiguration("gpu_monitor_config_file").perform(context), "r") as f: gpu_monitor_config = yaml.safe_load(f)["/**"]["ros__parameters"] gpu_monitor = ComposableNode( package="system_monitor", plugin="GPUMonitor", name="gpu_monitor", parameters=[ gpu_monitor_config, ], ) # set container to run all required components in the same process container = ComposableNodeContainer( name="system_monitor_container", namespace="system_monitor", package="rclcpp_components", executable="component_container_mt", composable_node_descriptions=[ cpu_monitor, hdd_monitor, mem_monitor, net_monitor, ntp_monitor, process_monitor, gpu_monitor, ], output="screen", ) return [container] def generate_launch_description(): system_monitor_path = os.path.join( get_package_share_directory("system_launch"), "config", "system_monitor" ) return launch.LaunchDescription( [ DeclareLaunchArgument( "cpu_monitor_config_file", default_value=os.path.join(system_monitor_path, "cpu_monitor.param.yaml"), ), DeclareLaunchArgument( "hdd_monitor_config_file", default_value=os.path.join(system_monitor_path, "hdd_monitor.param.yaml"), ), DeclareLaunchArgument( "mem_monitor_config_file", default_value=os.path.join(system_monitor_path, "mem_monitor.param.yaml"), ), DeclareLaunchArgument( "net_monitor_config_file", default_value=os.path.join(system_monitor_path, "net_monitor.param.yaml"), ), DeclareLaunchArgument( "ntp_monitor_config_file", default_value=os.path.join(system_monitor_path, "ntp_monitor.param.yaml"), ), DeclareLaunchArgument( "process_monitor_config_file", default_value=os.path.join(system_monitor_path, "process_monitor.param.yaml"), ), DeclareLaunchArgument( "gpu_monitor_config_file", default_value=os.path.join(system_monitor_path, "gpu_monitor.param.yaml"), ), OpaqueFunction(function=launch_setup), ] )
the-stack_0_14295
# Copyright 2019, by the California Institute of Technology. # ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged. # Any commercial use must be negotiated with the Office of Technology # Transfer at the California Institute of Technology. # # This software may be subject to U.S. export control laws. By accepting # this software, the user agrees to comply with all applicable U.S. export # laws and regulations. User has the responsibility to obtain export # licenses, or other export authority as may be required before exporting # such information to foreign countries or providing access to foreign # persons. """ ========= subset.py ========= Functions related to subsetting a NetCDF file. """ import datetime import functools import json import operator import os import geopandas as gpd import importlib_metadata import julian import numpy as np import xarray as xr import netCDF4 as nc import pandas as pd from shapely.geometry import Point from shapely.ops import transform from podaac.subsetter import xarray_enhancements as xre GROUP_DELIM = '__' SERVICE_NAME = 'l2ss-py' def apply_scale_offset(scale, offset, value): """Apply scale and offset to the given value""" return (value + offset) / scale def remove_scale_offset(value, scale, offset): """Remove scale and offset from the given value""" return (value * scale) - offset def convert_bound(bound, coord_max, coord_var): """ This function will return a converted bound which which matches the range of the given input file. Parameters ---------- bound : np.array 1-dimensional 2-element numpy array which represent the lower and upper bounding box on this coordinate, respectively. coord_max : integer The max value which is possible given this coordinate. For example, the max for longitude is 360. coord_var : xarray.DataArray The xarray variable for some coordinate. Returns ------- np.array 1-dimensional 2-element number array which represents the lower and upper bounding box on this coordinate and has been converted based on the valid bounds coordinate range of the dataset. Notes ----- Assumption that 0 is always on the prime meridian/equator. """ scale = coord_var.attrs.get('scale_factor', 1.0) offset = coord_var.attrs.get('add_offset', 0.0) valid_min = coord_var.attrs.get('valid_min', None) if valid_min is None or valid_min > 0: # If coord var doesn't contain valid min, attempt to find # manually. Note: Given the perfect storm, this could still fail # to find the actual bounds. # Filter out _FillValue from data before calculating min and max fill_value = coord_var.attrs.get('_FillValue', None) var_values = coord_var.values if fill_value: var_values = np.where(var_values != fill_value, var_values, np.nan) var_min = np.nanmin(var_values) var_max = np.nanmax(var_values) if 0 <= var_min <= var_max <= (coord_max / scale): valid_min = 0 # If the file coords are 0 --> max if valid_min == 0: bound = (bound + coord_max) % coord_max # If the right/top bound is 0, set to max. if bound[1] == 0: bound[1] = coord_max # If edges are the same, assume it wraps and return all if bound[0] == bound[1]: bound = np.array([0, coord_max]) # If the file longitude is -coord_max/2 --> coord_max/2 if valid_min != 0: # If edges are the same, assume it wraps and return all if bound[0] == bound[1]: bound = np.array([-(coord_max / 2), coord_max / 2]) # Calculate scale and offset so the bounds match the coord data return apply_scale_offset(scale, offset, bound) def convert_bbox(bbox, dataset, lat_var_name, lon_var_name): """ This function will return a converted bbox which matches the range of the given input file. This will convert both the latitude and longitude range. For example, an input dataset can have a valid longitude range of -180 --> 180 or of 0 --> 360. Parameters ---------- bbox : np.array The bounding box dataset : xarray.Dataset The dataset which is being subset. lat_var_name : str Name of the lat variable in the given dataset lon_var_name : str Name of the lon variable in the given dataset Returns ------- bbox : np.array The new bbox which matches latitude and longitude ranges of the input file. Notes ----- Assumption that the provided bounding box is always between -180 --> 180 for longitude and -90, 90 for latitude. """ return np.array([convert_bound(bbox[0], 360, dataset[lon_var_name]), convert_bound(bbox[1], 180, dataset[lat_var_name])]) def set_json_history(dataset, cut, file_to_subset, bbox=None, shapefile=None, origin_source=None): """ Set the 'json_history' metadata header of the granule to reflect the current version of the subsetter, as well as the parameters used to call the subsetter. This will append an json array to the json_history of the following format: Parameters ---------- dataset : xarray.Dataset The dataset to change the header of bbox : np.ndarray The requested bounding box file_to_subset : string The filepath of the file which was used to subset cut : boolean True to cut the scanline shapefile : str Name of the shapefile to include in the version history """ params = f'cut={cut}' if bbox is not None: params = f'bbox={bbox.tolist()} {params}' elif shapefile is not None: params = f'shapefile={shapefile} {params}' history_json = dataset.attrs.get('history_json', []) if history_json: history_json = json.loads(history_json) if origin_source: derived_from = origin_source else: derived_from = os.path.basename(file_to_subset) new_history_json = { "date_time": datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), "derived_from": derived_from, "program": SERVICE_NAME, "version": importlib_metadata.distribution(SERVICE_NAME).version, "parameters": params, "program_ref": "https://cmr.earthdata.nasa.gov:443/search/concepts/S1962070864-POCLOUD", "$schema": "https://harmony.earthdata.nasa.gov/schemas/history/0.1.0/history-v0.1.0.json" } history_json.append(new_history_json) dataset.attrs['history_json'] = json.dumps(history_json) def set_version_history(dataset, cut, bbox=None, shapefile=None): """ Set the 'history' metadata header of the granule to reflect the current version of the subsetter, as well as the parameters used to call the subsetter. This will append a line to the history of the following format: TIMESTAMP podaac.subsetter VERSION (PARAMS) Parameters ---------- dataset : xarray.Dataset The dataset to change the header of bbox : np.ndarray The requested bounding box cut : boolean True to cut the scanline shapefile : str Name of the shapefile to include in the version history """ version = importlib_metadata.distribution(SERVICE_NAME).version history = dataset.attrs.get('history', "") timestamp = datetime.datetime.utcnow() params = f'cut={cut}' if bbox is not None: params = f'bbox={bbox.tolist()} {params}' elif shapefile is not None: params = f'shapefile={shapefile} {params}' history += f"\n{timestamp} {SERVICE_NAME} v{version} ({params})" dataset.attrs['history'] = history.strip() def calculate_chunks(dataset): """ For the given dataset, calculate if the size on any dimension is worth chunking. Any dimension larger than 4000 will be chunked. This is done to ensure that the variable can fit in memory. Parameters ---------- dataset : xarray.Dataset The dataset to calculate chunks for. Returns ------- dict The chunk dictionary, where the key is the dimension and the value is 4000. """ chunk_dict = {dim: 4000 for dim in dataset.dims if dataset.dims[dim] > 4000 and len(dataset.dims) > 1} return chunk_dict def find_matching_coords(dataset, match_list): """ As a backup for finding a coordinate var, look at the 'coordinates' metadata attribute of all data vars in the granule. Return any coordinate vars that have name matches with the provided 'match_list' Parameters ---------- dataset : xr.Dataset Dataset to search data variable coordinate metadata attribute match_list : list (str) List of possible matches to search for. For example, ['lat', 'latitude'] would search for variables in the 'coordinates' metadata attribute containing either 'lat' or 'latitude' Returns ------- list (str) List of matching coordinate variables names """ coord_attrs = [ var.attrs['coordinates'] for var_name, var in dataset.data_vars.items() if 'coordinates' in var.attrs ] coord_attrs = list(set(coord_attrs)) match_coord_vars = [] for coord_attr in coord_attrs: coords = coord_attr.split(' ') match_vars = [ coord for coord in coords if any(coord_cand in coord for coord_cand in match_list) ] if match_vars and match_vars[0] in dataset: # Check if the var actually exists in the dataset match_coord_vars.append(match_vars[0]) return match_coord_vars def get_coord_variable_names(dataset): """ Given a dataset, determine the coordinate variable from a list of options Parameters ---------- dataset: xr.Dataset The dataset to find the coordinate variables for Returns ------- tuple, str Tuple of strings, where the first element is the lat coordinate name and the second element is the lon coordinate name """ possible_lat_coord_names = ['lat', 'latitude', 'y'] possible_lon_coord_names = ['lon', 'longitude', 'x'] def var_is_coord(var_name, possible_coord_names): var_name = var_name.strip(GROUP_DELIM).split(GROUP_DELIM)[-1] return var_name in possible_coord_names lat_coord_names = list(filter( lambda var_name: var_is_coord(var_name, possible_lat_coord_names), dataset.variables)) lon_coord_names = list(filter( lambda var_name: var_is_coord(var_name, possible_lon_coord_names), dataset.variables)) if len(lat_coord_names) < 1 or len(lon_coord_names) < 1: lat_coord_names = find_matching_coords(dataset, possible_lat_coord_names) lon_coord_names = find_matching_coords(dataset, possible_lon_coord_names) if len(lat_coord_names) < 1 or len(lon_coord_names) < 1: raise ValueError('Could not determine coordinate variables') return lat_coord_names, lon_coord_names def is_360(lon_var, scale, offset): """ Determine if given dataset is a '360' dataset or not. Parameters ---------- lon_var : xr.DataArray The lon variable from the xarray Dataset scale : float Used to remove scale and offset for easier calculation offset : float Used to remove scale and offset for easier calculation Returns ------- bool True if dataset is 360, False if not. Defaults to False. """ valid_min = lon_var.attrs.get('valid_min', None) if valid_min is None or valid_min > 0: var_min = remove_scale_offset(np.amin(lon_var.values), scale, offset) var_max = remove_scale_offset(np.amax(lon_var.values), scale, offset) if var_min < 0: return False if var_max > 180: return True if valid_min == 0: return True if valid_min < 0: return False return False def get_spatial_bounds(dataset, lat_var_names, lon_var_names): """ Get the spatial bounds for this dataset. These values are masked and scaled. Parameters ---------- dataset : xr.Dataset Dataset to retrieve spatial bounds for lat_var_name : str Name of the lat variable lon_var_name : str Name of the lon variable Returns ------- np.array [[lon min, lon max], [lat min, lat max]] """ lat_var_name = lat_var_names[0] if len(lat_var_names) == 1 else [ lat_name for lat_name in lat_var_names if lat_name in dataset.data_vars.keys() ][0] lon_var_name = lon_var_names[0] if len(lon_var_names) == 1 else [ lon_name for lon_name in lon_var_names if lon_name in dataset.data_vars.keys() ][0] # Get scale from coordinate variable metadata attributes lat_scale = dataset[lat_var_name].attrs.get('scale_factor', 1.0) lon_scale = dataset[lon_var_name].attrs.get('scale_factor', 1.0) lat_offset = dataset[lat_var_name].attrs.get('add_offset', 0.0) lon_offset = dataset[lon_var_name].attrs.get('add_offset', 0.0) lon_valid_min = dataset[lon_var_name].attrs.get('valid_min', None) lat_fill_value = dataset[lat_var_name].attrs.get('_FillValue', None) lon_fill_value = dataset[lon_var_name].attrs.get('_FillValue', None) # Apply mask and scale to min/max coordinate variables to get # spatial bounds # Remove fill value. Might cause errors when getting min and max lats = dataset[lat_var_name].values.flatten() lons = dataset[lon_var_name].values.flatten() if lat_fill_value: lats = list(filter(lambda a: not a == lat_fill_value, lats)) if lon_fill_value: lons = list(filter(lambda a: not a == lon_fill_value, lons)) if len(lats) == 0 or len(lons) == 0: return None min_lat = remove_scale_offset(np.nanmin(lats), lat_scale, lat_offset) max_lat = remove_scale_offset(np.nanmax(lats), lat_scale, lat_offset) min_lon = remove_scale_offset(np.nanmin(lons), lon_scale, lon_offset) max_lon = remove_scale_offset(np.nanmax(lons), lon_scale, lon_offset) min_lat = round(min_lat, 1) max_lat = round(max_lat, 1) min_lon = round(min_lon, 1) max_lon = round(max_lon, 1) # Convert longitude to [-180,180] format if lon_valid_min == 0 or 0 <= min_lon <= max_lon <= 360: if min_lon > 180: min_lon -= 360 if max_lon > 180: max_lon -= 360 if min_lon == max_lon: min_lon = -180 max_lon = 180 return np.array([[min_lon, max_lon], [min_lat, max_lat]]) def get_time_variable_name(dataset, lat_var): """ Try to determine the name of the 'time' variable. This is done as follows: - The variable name contains 'time' - The variable dimensions match the dimensions of the given lat var Parameters ---------- dataset : xr.Dataset: xarray dataset to find time variable from lat_var : xr.Variable Lat variable for this dataset Returns ------- str The name of the variable Raises ------ ValueError If the time variable could not be determined """ time_vars = find_matching_coords(dataset, ['time']) if time_vars: # There should only be one time var match (this is called once # per lat var) return time_vars[0] for var_name in list(dataset.dims.keys()): if "time" in var_name and dataset[var_name].squeeze().dims == lat_var.squeeze().dims: return var_name for var_name in list(dataset.data_vars.keys()): if "time" in var_name and dataset[var_name].squeeze().dims == lat_var.squeeze().dims: return var_name raise ValueError('Unable to determine time variable') def get_time_epoch_var(dataset, time_var_name): """ Get the name of the epoch time var. This is only needed in the case where there is a single time var (of size 1) that contains the time epoch used by the actual time var. Parameters ---------- dataset : xr.Dataset Dataset that contains time var time_var_name : str The name of the actual time var (with matching dims to the coord vars) Returns ------- str The name of the epoch time variable """ time_var = dataset[time_var_name] if 'comment' in time_var.attrs: epoch_var_name = time_var.attrs['comment'].split('plus')[0].strip() elif 'time' in dataset.variables.keys() and time_var_name != 'time': epoch_var_name = 'time' else: raise ValueError('Unable to determine time variables') return epoch_var_name def is_time_mjd(dataset, time_var_name): """ Check to see if the time format is a time delta from a modified julian date. Parameters ---------- dataset : xr.Dataset Dataset that contains time var time_var_name : str The name of the actual time var (with matching dims to the coord vars) Returns ------- boolean is time delta format in modified julian date """ time_var = dataset[time_var_name] if 'comment' in time_var.attrs: if 'Modified Julian Day' in time_var.attrs['comment']: return True return False def translate_timestamp(str_timestamp): """ Translate timestamp to datetime object Parameters ---------- str_timestamp : str Timestamp string. ISO or RFC Returns ------- datetime Constructed Datetime object """ allowed_ts_formats = [ '%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S%Z', '%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S.%f%Z' ] for timestamp_format in allowed_ts_formats: try: return datetime.datetime.strptime(str_timestamp, timestamp_format) except ValueError: pass return datetime.datetime.fromisoformat(str_timestamp) def datetime_from_mjd(dataset, time_var_name): """ Translate the modified julian date from the long name in the time attribute. Parameters ---------- dataset : xr.Dataset Dataset that contains time var time_var_name : str The name of the actual time var (with matching dims to the coord vars) Returns ------- datetime the datetime of the modified julian date """ time_var = dataset[time_var_name] if 'long_name' in time_var.attrs: mdj_string = time_var.attrs['long_name'] mjd = mdj_string[mdj_string.find("(")+1:mdj_string.find(")")].split("= ")[1] try: mjd_float = float(mjd) except ValueError: return None mjd_datetime = julian.from_jd(mjd_float, fmt='mjd') return mjd_datetime return None def build_temporal_cond(min_time, max_time, dataset, time_var_name): """ Build the temporal condition used in the xarray 'where' call which drops data not in the given bounds. If the data in the time var is of type 'datetime', assume this is a normal case where the time var uses the epoch from the 'units' metadata attribute to get epoch. If the data in the time var is of type 'timedelta', the epoch var is needed to calculate the datetime. Parameters ---------- min_time : str ISO timestamp representing the lower temporal bound max_time : str ISO timestamp representing the upper temporal bound dataset : xr.Dataset Dataset to build the condition off of time_var_name : str Name of the time variable Returns ------- np.array or boolean If temporally subsetted, returns a boolean ND-array the shape of which matches the dimensions of the coordinate vars. 'True' is essentially a noop. """ def build_cond(str_timestamp, compare): timestamp = translate_timestamp(str_timestamp) if np.issubdtype(dataset[time_var_name].dtype, np.dtype(np.datetime64)): timestamp = pd.to_datetime(timestamp) if np.issubdtype(dataset[time_var_name].dtype, np.dtype(np.timedelta64)): if is_time_mjd(dataset, time_var_name): # mjd when timedelta based on mjd_datetime = datetime_from_mjd(dataset, time_var_name) if mjd_datetime is None: raise ValueError('Unable to get datetime from dataset to calculate time delta') # timedelta between timestamp and mjd timestamp = np.datetime64(timestamp) - np.datetime64(mjd_datetime) else: epoch_time_var_name = get_time_epoch_var(dataset, time_var_name) epoch_datetime = dataset[epoch_time_var_name].values[0] timestamp = np.datetime64(timestamp) - epoch_datetime return compare(dataset[time_var_name], timestamp) temporal_conds = [] if min_time: comparison_op = operator.ge temporal_conds.append(build_cond(min_time, comparison_op)) if max_time: comparison_op = operator.le temporal_conds.append(build_cond(max_time, comparison_op)) temporal_cond = True if min_time or max_time: temporal_cond = functools.reduce(lambda cond_a, cond_b: cond_a & cond_b, temporal_conds) return temporal_cond def subset_with_bbox(dataset, lat_var_names, lon_var_names, time_var_names, bbox=None, cut=True, min_time=None, max_time=None): """ Subset an xarray Dataset using a spatial bounding box. Parameters ---------- dataset : xr.Dataset Dataset to subset lat_var_names : list Name of the latitude variables in the given dataset lon_var_names : list Name of the longitude variables in the given dataset time_var_names : list Name of the time variables in the given dataset bbox : np.array Spatial bounding box to subset Dataset with. cut : bool True if scanline should be cut. min_time : str ISO timestamp of min temporal bound max_time : str ISO timestamp of max temporal bound Returns ------- np.array Spatial bounds of Dataset after subset operation """ lon_bounds, lat_bounds = convert_bbox(bbox, dataset, lat_var_names[0], lon_var_names[0]) # condition should be 'or' instead of 'and' when bbox lon_min > lon_max oper = operator.and_ if lon_bounds[0] > lon_bounds[1]: oper = operator.or_ datasets = [] for lat_var_name, lon_var_name, time_var_name in zip( lat_var_names, lon_var_names, time_var_names ): if GROUP_DELIM in lat_var_name: var_prefix = GROUP_DELIM.join(lat_var_name.strip(GROUP_DELIM).split(GROUP_DELIM)[:-1]) group_vars = [ var for var in dataset.data_vars.keys() if var.startswith(f'{GROUP_DELIM}{var_prefix}') ] else: group_vars = list(dataset.keys()) group_dataset = dataset[group_vars] # Calculate temporal conditions temporal_cond = build_temporal_cond(min_time, max_time, group_dataset, time_var_name) group_dataset = xre.where( group_dataset, oper( (group_dataset[lon_var_name] >= lon_bounds[0]), (group_dataset[lon_var_name] <= lon_bounds[1]) ) & (group_dataset[lat_var_name] >= lat_bounds[0]) & (group_dataset[lat_var_name] <= lat_bounds[1]) & temporal_cond, cut ) datasets.append(group_dataset) return datasets def subset_with_shapefile(dataset, lat_var_name, lon_var_name, shapefile, cut): """ Subset an xarray Dataset using a shapefile Parameters ---------- dataset : xr.Dataset Dataset to subset lat_var_name : str Name of the latitude variable in the given dataset lon_var_name : str Name of the longitude variable in the given dataset shapefile : np.array Absolute path to the shapefile used to subset the given dataset cut : bool True if scanline should be cut. Returns ------- np.array Spatial bounds of Dataset after shapefile subset operation """ shapefile_df = gpd.read_file(shapefile) lat_scale = dataset[lat_var_name].attrs.get('scale_factor', 1.0) lon_scale = dataset[lon_var_name].attrs.get('scale_factor', 1.0) lat_offset = dataset[lat_var_name].attrs.get('add_offset', 0.0) lon_offset = dataset[lon_var_name].attrs.get('add_offset', 0.0) # If data is '360', convert shapefile to '360' as well. There is an # assumption that the shapefile is -180,180. if is_360(dataset[lon_var_name], lon_scale, lon_offset): # Transform def convert_180_to_360(lon, lat): return tuple(map(lambda value: value + 360 if value < 0 else value, lon)), lat geometries = [transform(convert_180_to_360, geometry) for geometry in shapefile_df.geometry] shapefile_df.geometry = geometries # Mask and scale shapefile def scale(lon, lat): lon = tuple(map(functools.partial(apply_scale_offset, lon_scale, lon_offset), lon)) lat = tuple(map(functools.partial(apply_scale_offset, lat_scale, lat_offset), lat)) return lon, lat geometries = [transform(scale, geometry) for geometry in shapefile_df.geometry] shapefile_df.geometry = geometries def in_shape(lon, lat): point = Point(lon, lat) point_in_shapefile = shapefile_df.contains(point) return point_in_shapefile.array[0] in_shape_vec = np.vectorize(in_shape) boolean_mask = xr.apply_ufunc(in_shape_vec, dataset[lon_var_name], dataset[lat_var_name]) return xre.where(dataset, boolean_mask, cut) def transform_grouped_dataset(nc_dataset, file_to_subset): """ Transform a netCDF4 Dataset that has groups to an xarray compatible dataset. xarray does not work with groups, so this transformation will flatten the variables in the dataset and use the group path as the new variable name. For example, data_01 > km > sst would become 'data_01__km__sst', where GROUP_DELIM is __. This same pattern is applied to dimensions, which are located under the appropriate group. They are renamed and placed in the root group. Parameters ---------- nc_dataset : nc.Dataset netCDF4 Dataset that contains groups Returns ------- nc.Dataset netCDF4 Dataset that does not contain groups and that has been flattened. """ # Close the existing read-only dataset and reopen in append mode nc_dataset.close() nc_dataset = nc.Dataset(file_to_subset, 'r+') dimensions = {} def walk(group_node, path): for key, item in group_node.items(): group_path = f'{path}{GROUP_DELIM}{key}' # If there are variables in this group, copy to root group # and then delete from current group if item.variables: # Copy variables to root group with new name for var_name, var in item.variables.items(): var_group_name = f'{group_path}{GROUP_DELIM}{var_name}' nc_dataset.variables[var_group_name] = var # Delete variables var_names = list(item.variables.keys()) for var_name in var_names: del item.variables[var_name] if item.dimensions: dims = list(item.dimensions.keys()) for dim_name in dims: new_dim_name = f'{group_path.replace("/", GROUP_DELIM)}{GROUP_DELIM}{dim_name}' item.dimensions[new_dim_name] = item.dimensions[dim_name] dimensions[new_dim_name] = item.dimensions[dim_name] item.renameDimension(dim_name, new_dim_name) # If there are subgroups in this group, call this function # again on that group. if item.groups: walk(item.groups, group_path) # Delete non-root groups group_names = list(group_node.keys()) for group_name in group_names: del group_node[group_name] for var_name in list(nc_dataset.variables.keys()): new_var_name = f'{GROUP_DELIM}{var_name}' nc_dataset.variables[new_var_name] = nc_dataset.variables[var_name] del nc_dataset.variables[var_name] walk(nc_dataset.groups, '') # Update the dimensions of the dataset in the root group nc_dataset.dimensions.update(dimensions) return nc_dataset def recombine_grouped_datasets(datasets, output_file): """ Given a list of xarray datasets, combine those datasets into a single netCDF4 Dataset and write to the disk. Each dataset has been transformed using its group path and needs to be un-transformed and placed in the appropriate group. Parameters ---------- datasets : list (xr.Dataset) List of xarray datasets to be combined output_file : str Name of the output file to write the resulting NetCDF file to. """ def get_nested_group(dataset, group_path): nested_group = dataset for group in group_path.strip(GROUP_DELIM).split(GROUP_DELIM)[:-1]: nested_group = nested_group.groups[group] return nested_group base_dataset = nc.Dataset(output_file, mode='w') for dataset in datasets: group_lst = [] for var_name in dataset.variables.keys(): # need logic if there is data in the top level not in a group group_lst.append('/'.join(var_name.split(GROUP_DELIM)[:-1])) group_lst = ['/' if group == '' else group for group in group_lst] groups = set(group_lst) for group in groups: base_dataset.createGroup(group) for dim_name in list(dataset.dims.keys()): new_dim_name = dim_name.split(GROUP_DELIM)[-1] dim_group = get_nested_group(base_dataset, dim_name) dim_group.createDimension(new_dim_name, dataset.dims[dim_name]) # Rename variables for var_name in list(dataset.variables.keys()): new_var_name = var_name.split(GROUP_DELIM)[-1] var_group = get_nested_group(base_dataset, var_name) var_dims = list(var_group.dimensions.keys()) variable = dataset.variables[var_name] if not var_dims: var_group_parent = var_group # This group doesn't contain dimensions. Look at parent group to find dimensions. while not var_dims: var_group_parent = var_group_parent.parent var_dims = list(var_group_parent.dimensions.keys()) if np.issubdtype( dataset.variables[var_name].dtype, np.dtype(np.datetime64) ) or np.issubdtype( dataset.variables[var_name].dtype, np.dtype(np.timedelta64) ): # Use xarray datetime encoder cf_dt_coder = xr.coding.times.CFDatetimeCoder() encoded_var = cf_dt_coder.encode(dataset.variables[var_name]) variable = encoded_var var_group.createVariable(new_var_name, variable.dtype, var_dims) # Copy attributes var_attrs = variable.attrs var_group.variables[new_var_name].setncatts(var_attrs) # Copy data var_group.variables[new_var_name].set_auto_maskandscale(False) var_group.variables[new_var_name][:] = variable.data # Remove group vars from base dataset for var_name in list(base_dataset.variables.keys()): if GROUP_DELIM in var_name: del base_dataset.variables[var_name] # Remove group dims from base dataset for dim_name in list(base_dataset.dimensions.keys()): if GROUP_DELIM in dim_name: del base_dataset.dimensions[dim_name] # Copy global attributes base_dataset.setncatts(datasets[0].attrs) # Write and close base_dataset.close() def subset(file_to_subset, bbox, output_file, variables=None, # pylint: disable=too-many-branches cut=True, shapefile=None, min_time=None, max_time=None, origin_source=None): """ Subset a given NetCDF file given a bounding box Parameters ---------- file_to_subset : string The location of the file which will be subset output_file : string The file path for the output of the subsetting operation. bbox : np.ndarray The chosen bounding box. This is a tuple of tuples formatted as such: ((west, east), (south, north)). The assumption is that the valid range is ((-180, 180), (-90, 90)). This will be transformed as appropriate if the actual longitude range is 0-360. shapefile : str Name of local shapefile used to subset given file. variables : list, str, optional List of variables to include in the resulting data file. NOTE: This will remove ALL variables which are not included in this list, including coordinate variables! cut : boolean True if the scanline should be cut, False if the scanline should not be cut. Defaults to True. min_time : str ISO timestamp representing the lower bound of the temporal subset to be performed. If this value is not provided, the granule will not be subset temporally on the lower bound. max_time : str ISO timestamp representing the upper bound of the temporal subset to be performed. If this value is not provided, the granule will not be subset temporally on the upper bound. """ # Open dataset with netCDF4 first, so we can get group info nc_dataset = nc.Dataset(file_to_subset, mode='r') has_groups = bool(nc_dataset.groups) # If dataset has groups, transform to work with xarray if has_groups: nc_dataset = transform_grouped_dataset(nc_dataset, file_to_subset) args = { 'decode_coords': False, 'mask_and_scale': False, 'decode_times': False } if min_time or max_time: args['decode_times'] = True with xr.open_dataset( xr.backends.NetCDF4DataStore(nc_dataset), **args ) as dataset: lat_var_names, lon_var_names = get_coord_variable_names(dataset) time_var_names = [ get_time_variable_name( dataset, dataset[lat_var_name] ) for lat_var_name in lat_var_names ] chunks_dict = calculate_chunks(dataset) if chunks_dict: dataset = dataset.chunk(chunks_dict) if variables: # Drop variables that aren't explicitly requested, except lat_var_name and # lon_var_name which are needed for subsetting variables = [variable.upper() for variable in variables] vars_to_drop = [ var_name for var_name, var in dataset.data_vars.items() if var_name.upper() not in variables and var_name not in lat_var_names and var_name not in lon_var_names and var_name not in time_var_names ] dataset = dataset.drop_vars(vars_to_drop) if bbox is not None: datasets = subset_with_bbox( dataset=dataset, lat_var_names=lat_var_names, lon_var_names=lon_var_names, time_var_names=time_var_names, bbox=bbox, cut=cut, min_time=min_time, max_time=max_time ) elif shapefile: datasets = [ subset_with_shapefile(dataset, lat_var_names[0], lon_var_names[0], shapefile, cut) ] else: raise ValueError('Either bbox or shapefile must be provided') spatial_bounds = [] for dataset in datasets: set_version_history(dataset, cut, bbox, shapefile) set_json_history(dataset, cut, file_to_subset, bbox, shapefile, origin_source) if has_groups: spatial_bounds.append(get_spatial_bounds( dataset=dataset, lat_var_names=lat_var_names, lon_var_names=lon_var_names )) else: encoding = {} compression = dict(zlib=True, complevel=5, _FillValue=None) if (min_time or max_time) and any(dataset.dims.values()): encoding = { var_name: { 'units': nc_dataset.variables[var_name].__dict__['units'], 'zlib': True, "complevel": 5, "_FillValue": None } for var_name in time_var_names if 'units' in nc_dataset.variables[var_name].__dict__ } for var in dataset.data_vars: if var not in encoding: encoding[var] = compression dataset.load().to_netcdf(output_file, 'w', encoding=encoding) if has_groups: recombine_grouped_datasets(datasets, output_file) return np.array([[ min(lon[0][0][0] for lon in zip(spatial_bounds)), max(lon[0][0][1] for lon in zip(spatial_bounds)) ], [ min(lat[0][1][0] for lat in zip(spatial_bounds)), max(lat[0][1][1] for lat in zip(spatial_bounds)) ]]) return get_spatial_bounds( dataset=dataset, lat_var_names=lat_var_names, lon_var_names=lon_var_names )
the-stack_0_14296
import time import logging from datetime import datetime import pytest from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import ptf_test_port_map from tests.ptf_runner import ptf_runner from tests.common.dualtor.mux_simulator_control import mux_server_url from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_random_side from tests.common.utilities import is_ipv4_address from tests.common.fixtures.fib_utils import fib_info_files_per_function from tests.common.utilities import wait logger = logging.getLogger(__name__) pytestmark = [ pytest.mark.topology('any') ] # Usually src-mac, dst-mac, vlan-id are optional hash keys. Not all the platform supports these optional hash keys. Not enable these three by default. # The 'ingress-port' key is not used in hash by design. We are doing negative test for 'ingress-port'. # When 'ingress-port' is included in HASH_KEYS, the PTF test will try to inject same packet to different ingress ports # and expect that they are forwarded from same egress port. # HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'src-mac', 'dst-mac', 'ip-proto', 'vlan-id'] HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'ip-proto'] SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255'] DST_IP_RANGE = ['9.0.0.0', '9.255.255.255'] SRC_IPV6_RANGE = ['20D0:A800:0:00::', '20D0:FFFF:0:00::FFFF'] DST_IPV6_RANGE = ['20D0:A800:0:01::', '20D0:FFFF:0:01::FFFF'] VLANIDS = range(1032, 1279) VLANIP = '192.168.{}.1/24' PTF_QLEN = 2000 DEFAULT_MUX_SERVER_PORT = 8080 PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json' @pytest.fixture(scope='module') def router_macs(duthosts): return [duthost.facts['router_mac'] for duthost in duthosts] @pytest.fixture(scope="module") def ignore_ttl(duthosts): # on the multi asic devices, the packet can have different ttl based on how the packet is routed # within in the device. So set this flag to mask the ttl in the ptf test for duthost in duthosts: if duthost.sonichost.is_multi_asic: return True return False @pytest.fixture(scope="module") def single_fib_for_duts(tbinfo): # For a T2 topology, we are generating a single fib file across all asics, but have multiple frontend nodes (DUTS). if tbinfo['topo']['type'] == "t2": return True return False @pytest.mark.parametrize("ipv4, ipv6, mtu", [pytest.param(True, True, 1514)]) def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, toggle_all_simulator_ports_to_random_side, fib_info_files_per_function, tbinfo, mux_server_url, router_macs, ignore_ttl, single_fib_for_duts): if 'dualtor' in tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') # do not test load balancing for vs platform as kernel 4.9 # can only do load balance base on L3 if duthosts[0].facts['asic_type'] in ["vs"]: test_balancing = False else: test_balancing = True logging.info("run ptf test") log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format(ipv4, ipv6, timestamp) logging.info("PTF log file: %s" % log_file) ptf_runner(ptfhost, "ptftests", "fib_test.FibTest", platform_dir="ptftests", params={"fib_info_files": fib_info_files_per_function[:3], # Test at most 3 DUTs "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url), "router_macs": router_macs, "ipv4": ipv4, "ipv6": ipv6, "testbed_mtu": mtu, "test_balancing": test_balancing, "ignore_ttl": ignore_ttl, "single_fib_for_duts": single_fib_for_duts}, log_file=log_file, qlen=PTF_QLEN, socket_recv_size=16384) def get_vlan_untag_ports(duthosts, duts_running_config_facts): """Get vlan untagged ports. Args: duthosts (DutHosts): Instance of DutHosts for interacting with DUT hosts. duts_running_config_facts (dict): Running config facts of all DUT hosts. Returns: [type]: [description] """ vlan_untag_ports = {} for duthost in duthosts: if duthost.is_multi_asic: continue ports = [] for asic_cfg_facts in duts_running_config_facts[duthost.hostname]: vlans = asic_cfg_facts.get('VLAN_INTERFACE', {}).keys() for vlan in vlans: vlan_member_info = asic_cfg_facts.get('VLAN_MEMBER', {}).get(vlan, {}) if vlan_member_info: for port_name, tag_mode in vlan_member_info.items(): if tag_mode['tagging_mode'] == 'untagged': ports.append(port_name) vlan_untag_ports[duthost.hostname] = ports return vlan_untag_ports @pytest.fixture(scope="module") def hash_keys(duthost): hash_keys = HASH_KEYS[:] # Copy from global var to avoid side effects of multiple iterations if 'dst-mac' in hash_keys: hash_keys.remove('dst-mac') # do not test load balancing on L4 port on vs platform as kernel 4.9 # can only do load balance base on L3 if duthost.facts['asic_type'] in ["vs"]: if 'src-port' in hash_keys: hash_keys.remove('src-port') if 'dst-port' in hash_keys: hash_keys.remove('dst-port') if duthost.facts['asic_type'] in ["mellanox"]: if 'ip-proto' in hash_keys: hash_keys.remove('ip-proto') if duthost.facts['asic_type'] in ["barefoot"]: if 'ingress-port' in hash_keys: hash_keys.remove('ingress-port') # removing ingress-port and ip-proto from hash_keys not supported by Marvell SAI if duthost.facts['platform'] in ['armhf-nokia_ixs7215_52x-r0']: if 'ip-proto' in hash_keys: hash_keys.remove('ip-proto') if 'ingress-port' in hash_keys: hash_keys.remove('ingress-port') # remove the ingress port from multi asic platform # In multi asic platform each asic has different hash seed, # the same packet coming in different asic # could egress out of different port # the hash_test condition for hash_key == ingress_port will fail if duthost.sonichost.is_multi_asic: hash_keys.remove('ingress-port') return hash_keys def configure_vlan(duthost, ports): for vlan in VLANIDS: duthost.shell('config vlan add {}'.format(vlan)) for port in ports: duthost.shell('config vlan member add {} {}'.format(vlan, port)) duthost.shell('config interface ip add Vlan{} '.format(vlan) + VLANIP.format(vlan%256)) time.sleep(5) def unconfigure_vlan(duthost, ports): for vlan in VLANIDS: for port in ports: duthost.shell('config vlan member del {} {}'.format(vlan, port)) duthost.shell('config interface ip remove Vlan{} '.format(vlan) + VLANIP.format(vlan%256)) duthost.shell('config vlan del {}'.format(vlan)) time.sleep(5) @pytest.fixture def setup_vlan(tbinfo, duthosts, duts_running_config_facts, hash_keys): vlan_untag_ports = get_vlan_untag_ports(duthosts, duts_running_config_facts) need_to_clean_vlan = False # add some vlan for hash_key vlan-id test if tbinfo['topo']['type'] == 't0' and 'dualtor' not in tbinfo['topo']['name'] and 'vlan-id' in hash_keys: for duthost in duthosts: configure_vlan(duthost, vlan_untag_ports[duthost.hostname]) need_to_clean_vlan = True yield # remove added vlan if need_to_clean_vlan: for duthost in duthosts: unconfigure_vlan(duthost, vlan_untag_ports[duthost.hostname]) @pytest.fixture(params=["ipv4", "ipv6"]) def ipver(request): return request.param @pytest.fixture def add_default_route_to_dut(duts_running_config_facts, duthosts, tbinfo): """ Add a default route to the device for storage backend testbed. This is to ensure the IO packets could be successfully directed. """ if "backend" in tbinfo["topo"]["name"]: logging.info("Add default route on the DUT.") try: for duthost in duthosts: cfg_facts = duts_running_config_facts[duthost.hostname] for asic_index, asic_cfg_facts in enumerate(cfg_facts): asic = duthost.asic_instance(asic_index) bgp_neighbors = asic_cfg_facts["BGP_NEIGHBOR"] ipv4_cmd_parts = ["ip route add default"] ipv6_cmd_parts = ["ip -6 route add default"] for neighbor in bgp_neighbors.keys(): if is_ipv4_address(neighbor): ipv4_cmd_parts.append("nexthop via %s" % neighbor) else: ipv6_cmd_parts.append("nexthop via %s" % neighbor) ipv4_cmd_parts.sort() ipv6_cmd_parts.sort() # limit to 4 nexthop entries ipv4_cmd = " ".join(ipv4_cmd_parts[:5]) ipv6_cmd = " ".join(ipv6_cmd_parts[:5]) asic.shell(ipv4_cmd) asic.shell(ipv6_cmd) yield finally: logging.info("Remove default route on the DUT.") for duthost in duthosts: for asic in duthost.asics: if asic.is_it_backend(): continue asic.shell("ip route del default", module_ignore_errors=True) asic.shell("ip -6 route del default", module_ignore_errors=True) else: yield def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, setup_vlan, hash_keys, ptfhost, ipver, toggle_all_simulator_ports_to_rand_selected_tor, tbinfo, mux_server_url, router_macs, ignore_ttl, single_fib_for_duts): if 'dualtor' in tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') log_file = "/tmp/hash_test.HashTest.{}.{}.log".format(ipver, timestamp) logging.info("PTF log file: %s" % log_file) if ipver == "ipv4": src_ip_range = SRC_IP_RANGE dst_ip_range = DST_IP_RANGE else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE ptf_runner(ptfhost, "ptftests", "hash_test.HashTest", platform_dir="ptftests", params={"fib_info_files": fib_info_files_per_function[:3], # Test at most 3 DUTs "ptf_test_port_map": ptf_test_port_map(ptfhost, tbinfo, duthosts, mux_server_url), "hash_keys": hash_keys, "src_ip_range": ",".join(src_ip_range), "dst_ip_range": ",".join(dst_ip_range), "router_macs": router_macs, "vlan_ids": VLANIDS, "ignore_ttl":ignore_ttl, "single_fib_for_duts": single_fib_for_duts }, log_file=log_file, qlen=PTF_QLEN, socket_recv_size=16384)
the-stack_0_14298
# Given a binary tree # struct TreeLinkNode { # TreeLinkNode *left; # TreeLinkNode *right; # TreeLinkNode *next; # } # Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL. # Initially, all next pointers are set to NULL. # Note: # You may only use constant extra space. # Recursive approach is fine, implicit stack space does not count as extra space for this problem. # Example: # Given the following binary tree, # 1 # / \ # 2 3 # / \ \ # 4 5 7 # After calling your function, the tree should look like: # 1 -> NULL # / \ # 2 -> 3 -> NULL # / \ \ # 4-> 5 -> 7 -> NULL # Definition for binary tree with next pointer. # class TreeLinkNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None # self.next = None class Solution: # @param root, a tree link node # @return nothing def connect(self, root): # from collections import deque if root: queue = [root] while queue: tem = [] for i in range(len(queue)): if queue[i].left: tem.append(queue[i].left) if queue[i].right: tem.append(queue[i].right) for i in range(len(queue)-1): queue[i].next = queue[i+1] queue[len(queue) - 1].next = None queue = tem # Time: O(n) # Space: O(1) # Difficulty: medium
the-stack_0_14300
# # Copyright 2020--2021 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # "Format to loader map." import os from pathlib import Path import re from typing import Any, Dict, Mapping, Optional, Union from .._schema import SchemaDict from .._typing import PathLike from ._base import Loader from .audio import WaveLoader from .image import PillowLoader from .text import PlainTextLoader from .table import CSVPandasLoader class FormatLoaderMap: """Manage a map between formats and loaders. This is usually used to determine which loader should be used for a given format. :param m: A dict that maps formats to loaders. """ def __init__(self, m: Optional[Mapping[str, Loader]] = None) -> None: """Constructor method. """ self._map: Dict[str, Loader] = {} if m is not None: for fmt, loader in m.items(): self.register_loader(fmt, loader) def register_loader(self, fmt: str, loader: Loader) -> None: """Register a loader. If the format exists in the table, update it. :param fmt: The format. :param loader: The corresponding loader. :raise TypeError: ``loader`` is not a :class:`Loader` object. """ if not isinstance(loader, Loader): raise TypeError(f'loader "{loader}" must be a Loader instance.') # We may support an overriding check in the future self._map[fmt] = loader def __getitem__(self, fmt: str) -> Loader: """Get the loader of a given format. :param fmt: The format. """ return self._map[fmt] def __contains__(self, fmt: str) -> bool: """Whether a format is covered by this format loader map. :param fmt: Name of the format. """ return fmt in self._map _default_format_loader_map: FormatLoaderMap = FormatLoaderMap({ 'text/plain': PlainTextLoader(), 'table/csv': CSVPandasLoader(), 'image/jpeg': PillowLoader(), 'image/png': PillowLoader(), 'audio/wav': WaveLoader(), }) def load_data_files(fmt: Union[str, SchemaDict], data_dir: PathLike, path: Union[str, SchemaDict], *, format_loader_map: FormatLoaderMap = None) -> Any: """Load data files. :param fmt: The format. :param data_dir: The path to the directory that holds the data files. :param path: If it is a :class:`str`, it is the path to the file. If it is a :class:`dict`, it consists of two keys: ``type`` and ``value``. If ``type`` is ``"regex"``, ``value`` is the regular expression of the paths of the files. :param format_loader_map: The format loader map to use. :raises TypeError: ``fmt`` or ``path`` is neither a string nor a :class:`SchemaDict`. :raises ValueError: If ``path`` is a :class:`SchemaDict`, but ``path[type]`` is not ``"regex"``. :return: Loaded data file objects. """ # We only support path as a plain path for now, but we will extend path to support regex and other types. if format_loader_map is None: format_loader_map = _default_format_loader_map if isinstance(fmt, str): fmt_id: str = fmt fmt_options: SchemaDict = {} elif isinstance(fmt, Dict): # In Python 3.8+, this can be done with isinstance(fmt, typing.get_args(SchemaDict)) fmt_id = fmt['id'] fmt_options = fmt.get('options', {}) else: raise TypeError(f'Parameter "fmt" must be a string or a dict, but it is of type "{type(fmt)}".') if fmt_id not in format_loader_map: raise RuntimeError(f'The format loader map does not specify a loader for format "{fmt_id}".') data_dir = Path(data_dir) loader = format_loader_map[fmt_id] if isinstance(path, str): return loader.load(data_dir / path, fmt_options) elif isinstance(path, Dict): # In Python 3.8+, this can be done with isinstance(fmt, typing.get_args(SchemaDict)) path_type = path['type'] if path_type == 'regex': loaded_data = {} path_value = path['value'] # We don't use pathlib to operate the string here because of Windows compatibility and character escaping. path_pattern = re.compile(re.escape(str(data_dir) + os.path.sep) + path_value.replace('/', re.escape(os.path.sep))) for f in data_dir.rglob('*'): if path_pattern.fullmatch(str(f)): loaded_data[str(f)] = loader.load(data_dir / f, fmt_options) return loaded_data else: raise ValueError(f'Unknown type of path "{path_type}".') else: raise TypeError(f'Unsupported type of the "path" parameter: {type(path)}.')
the-stack_0_14305
import datetime from pycspr import crypto from pycspr.serialisation.json.encoder.cl import encode_cl_value from pycspr.types import Deploy from pycspr.types import DeployApproval from pycspr.types import DeployHeader from pycspr.types import ExecutionArgument from pycspr.types import ExecutableDeployItem from pycspr.types import ExecutableDeployItem_ModuleBytes from pycspr.types import ExecutableDeployItem_StoredContractByHash from pycspr.types import ExecutableDeployItem_StoredContractByHashVersioned from pycspr.types import ExecutableDeployItem_StoredContractByName from pycspr.types import ExecutableDeployItem_StoredContractByNameVersioned from pycspr.types import ExecutableDeployItem_Transfer from pycspr.types import PublicKey from pycspr.types import Timestamp def encode_deploy(entity: Deploy) -> dict: """Encodes a deploy. """ return { "approvals": [encode_deploy_approval(i) for i in entity.approvals], "hash": entity.hash.hex(), "header": encode_deploy_header(entity.header), "payment": encode_execution_info(entity.payment), "session": encode_execution_info(entity.session) } def encode_deploy_approval(entity: DeployApproval) -> dict: """Encodes a deploy approval. """ return { "signature": entity.signature.hex(), "signer": entity.signer.hex() } def encode_deploy_header(entity: DeployHeader) -> dict: """Encodes a deploy header. """ return { "account": encode_public_key(entity.account_public_key), "body_hash": entity.body_hash.hex(), "chain_name": entity.chain_name, "dependencies": entity.dependencies, "gas_price": entity.gas_price, "timestamp": encode_timestamp(entity.timestamp), "ttl": entity.ttl.humanized } def encode_execution_argument(entity: ExecutionArgument) -> dict: """Encodes an execution argument. """ return [ entity.name, encode_cl_value(entity.value) ] def encode_execution_info(entity: ExecutableDeployItem) -> dict: """Encodes execution information to be interpreted at a node. """ def _encode_module_bytes() -> dict: return { "ModuleBytes": { "args": [encode_execution_argument(i) for i in entity.args], "module_bytes": entity.module_bytes.hex() } } def _encode_stored_contract_by_hash() -> dict: return { "StoredContractByHash": { "args": [encode_execution_argument(i) for i in entity.args], "entry_point": entity.entry_point, "hash": entity.hash.hex() } } def _encode_stored_contract_by_hash_versioned() -> dict: return { "StoredContractByHashVersioned": { "args": [encode_execution_argument(i) for i in entity.args], "entry_point": entity.entry_point, "hash": entity.hash.hex(), "version": entity.version } } def _encode_stored_contract_by_name() -> dict: return { "StoredContractByName": { "args": [encode_execution_argument(i) for i in entity.args], "entry_point": entity.entry_point, "name": entity.name } } def _encode_stored_contract_by_name_versioned() -> dict: return { "StoredContractByNameVersioned": { "args": [encode_execution_argument(i) for i in entity.args], "entry_point": entity.entry_point, "name": entity.name, "version": entity.version } } def _encode_session_for_transfer() -> dict: return { "Transfer": { "args": [encode_execution_argument(i) for i in entity.args] } } _ENCODERS = { ExecutableDeployItem_ModuleBytes: _encode_module_bytes, ExecutableDeployItem_StoredContractByHash: _encode_stored_contract_by_hash, ExecutableDeployItem_StoredContractByHashVersioned: _encode_stored_contract_by_hash_versioned, ExecutableDeployItem_StoredContractByName: _encode_stored_contract_by_name, ExecutableDeployItem_StoredContractByNameVersioned: _encode_stored_contract_by_name_versioned, ExecutableDeployItem_Transfer: _encode_session_for_transfer, } return _ENCODERS[type(entity)]() def encode_public_key(entity: PublicKey) -> str: """Encodes a public key. """ return entity.account_key.hex() def encode_timestamp(entity: Timestamp) -> str: """Encodes a millisecond precise timestamp. """ # Node understands ISO millisecond precise timestamps. as_ts_3_decimal_places = round(entity, 3) as_datetime = datetime.datetime.fromtimestamp(as_ts_3_decimal_places, tz=datetime.timezone.utc) as_iso = as_datetime.isoformat() return f"{as_iso[:-9]}Z"